]> git.ipfire.org Git - thirdparty/git.git/blame - builtin/fsmonitor--daemon.c
fsmonitor--daemon: cd out of worktree root
[thirdparty/git.git] / builtin / fsmonitor--daemon.c
CommitLineData
16d9d617
JH
1#include "builtin.h"
2#include "config.h"
3#include "parse-options.h"
4#include "fsmonitor.h"
5#include "fsmonitor-ipc.h"
9dcba0ba
JH
6#include "compat/fsmonitor/fsm-listen.h"
7#include "fsmonitor--daemon.h"
16d9d617
JH
8#include "simple-ipc.h"
9#include "khash.h"
518a522f 10#include "pkt-line.h"
16d9d617
JH
11
12static const char * const builtin_fsmonitor__daemon_usage[] = {
c284e27b 13 N_("git fsmonitor--daemon start [<options>]"),
9dcba0ba 14 N_("git fsmonitor--daemon run [<options>]"),
abc9dbc0
JH
15 N_("git fsmonitor--daemon stop"),
16 N_("git fsmonitor--daemon status"),
16d9d617
JH
17 NULL
18};
19
20#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
9dcba0ba
JH
21/*
22 * Global state loaded from config.
23 */
24#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
25static int fsmonitor__ipc_threads = 8;
26
c284e27b
JH
27#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
28static int fsmonitor__start_timeout_sec = 60;
29
9dcba0ba
JH
30#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
31static int fsmonitor__announce_startup = 0;
32
33static int fsmonitor_config(const char *var, const char *value, void *cb)
34{
35 if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
36 int i = git_config_int(var, value);
37 if (i < 1)
38 return error(_("value of '%s' out of range: %d"),
39 FSMONITOR__IPC_THREADS, i);
40 fsmonitor__ipc_threads = i;
41 return 0;
42 }
43
c284e27b
JH
44 if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
45 int i = git_config_int(var, value);
46 if (i < 0)
47 return error(_("value of '%s' out of range: %d"),
48 FSMONITOR__START_TIMEOUT, i);
49 fsmonitor__start_timeout_sec = i;
50 return 0;
51 }
52
9dcba0ba
JH
53 if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
54 int is_bool;
55 int i = git_config_bool_or_int(var, value, &is_bool);
56 if (i < 0)
57 return error(_("value of '%s' not bool or int: %d"),
58 var, i);
59 fsmonitor__announce_startup = i;
60 return 0;
61 }
62
63 return git_default_config(var, value, cb);
64}
65
abc9dbc0
JH
66/*
67 * Acting as a CLIENT.
68 *
69 * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
70 * and wait for it to shutdown.
71 */
72static int do_as_client__send_stop(void)
73{
74 struct strbuf answer = STRBUF_INIT;
75 int ret;
76
77 ret = fsmonitor_ipc__send_command("quit", &answer);
78
79 /* The quit command does not return any response data. */
80 strbuf_release(&answer);
81
82 if (ret)
83 return ret;
84
85 trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
86 while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
87 sleep_millisec(50);
88 trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
89
90 return 0;
91}
92
93static int do_as_client__status(void)
94{
95 enum ipc_active_state state = fsmonitor_ipc__get_state();
96
97 switch (state) {
98 case IPC_STATE__LISTENING:
99 printf(_("fsmonitor-daemon is watching '%s'\n"),
100 the_repository->worktree);
101 return 0;
102
103 default:
104 printf(_("fsmonitor-daemon is not watching '%s'\n"),
105 the_repository->worktree);
106 return 1;
107 }
108}
16d9d617 109
b05880d3
JH
110enum fsmonitor_cookie_item_result {
111 FCIR_ERROR = -1, /* could not create cookie file ? */
112 FCIR_INIT,
113 FCIR_SEEN,
114 FCIR_ABORT,
115};
116
117struct fsmonitor_cookie_item {
118 struct hashmap_entry entry;
119 char *name;
120 enum fsmonitor_cookie_item_result result;
121};
122
123static int cookies_cmp(const void *data, const struct hashmap_entry *he1,
124 const struct hashmap_entry *he2, const void *keydata)
125{
126 const struct fsmonitor_cookie_item *a =
127 container_of(he1, const struct fsmonitor_cookie_item, entry);
128 const struct fsmonitor_cookie_item *b =
129 container_of(he2, const struct fsmonitor_cookie_item, entry);
130
131 return strcmp(a->name, keydata ? keydata : b->name);
132}
133
134static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
135 struct fsmonitor_daemon_state *state)
136{
137 /* assert current thread holding state->main_lock */
138
139 int fd;
140 struct fsmonitor_cookie_item *cookie;
141 struct strbuf cookie_pathname = STRBUF_INIT;
142 struct strbuf cookie_filename = STRBUF_INIT;
143 enum fsmonitor_cookie_item_result result;
144 int my_cookie_seq;
145
146 CALLOC_ARRAY(cookie, 1);
147
148 my_cookie_seq = state->cookie_seq++;
149
150 strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
151
152 strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
153 strbuf_addbuf(&cookie_pathname, &cookie_filename);
154
155 cookie->name = strbuf_detach(&cookie_filename, NULL);
156 cookie->result = FCIR_INIT;
157 hashmap_entry_init(&cookie->entry, strhash(cookie->name));
158
159 hashmap_add(&state->cookies, &cookie->entry);
160
161 trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
162 cookie->name, cookie_pathname.buf);
163
164 /*
165 * Create the cookie file on disk and then wait for a notification
166 * that the listener thread has seen it.
167 */
168 fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
169 if (fd < 0) {
170 error_errno(_("could not create fsmonitor cookie '%s'"),
171 cookie->name);
172
173 cookie->result = FCIR_ERROR;
174 goto done;
175 }
176
177 /*
178 * Technically, close() and unlink() can fail, but we don't
179 * care here. We only created the file to trigger a watch
180 * event from the FS to know that when we're up to date.
181 */
182 close(fd);
183 unlink(cookie_pathname.buf);
184
185 /*
186 * Technically, this is an infinite wait (well, unless another
187 * thread sends us an abort). I'd like to change this to
188 * use `pthread_cond_timedwait()` and return an error/timeout
189 * and let the caller do the trivial response thing, but we
190 * don't have that routine in our thread-utils.
191 *
192 * After extensive beta testing I'm not really worried about
193 * this. Also note that the above open() and unlink() calls
194 * will cause at least two FS events on that path, so the odds
195 * of getting stuck are pretty slim.
196 */
197 while (cookie->result == FCIR_INIT)
198 pthread_cond_wait(&state->cookies_cond,
199 &state->main_lock);
200
201done:
202 hashmap_remove(&state->cookies, &cookie->entry, NULL);
203
204 result = cookie->result;
205
206 free(cookie->name);
207 free(cookie);
208 strbuf_release(&cookie_pathname);
209
210 return result;
211}
212
213/*
214 * Mark these cookies as _SEEN and wake up the corresponding client threads.
215 */
216static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
217 const struct string_list *cookie_names)
218{
219 /* assert current thread holding state->main_lock */
220
221 int k;
222 int nr_seen = 0;
223
224 for (k = 0; k < cookie_names->nr; k++) {
225 struct fsmonitor_cookie_item key;
226 struct fsmonitor_cookie_item *cookie;
227
228 key.name = cookie_names->items[k].string;
229 hashmap_entry_init(&key.entry, strhash(key.name));
230
231 cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
232 if (cookie) {
233 trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
234 cookie->name);
235 cookie->result = FCIR_SEEN;
236 nr_seen++;
237 }
238 }
239
240 if (nr_seen)
241 pthread_cond_broadcast(&state->cookies_cond);
242}
243
244/*
245 * Set _ABORT on all pending cookies and wake up all client threads.
246 */
247static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
248{
249 /* assert current thread holding state->main_lock */
250
251 struct hashmap_iter iter;
252 struct fsmonitor_cookie_item *cookie;
253 int nr_aborted = 0;
254
255 hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
256 trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
257 cookie->name);
258 cookie->result = FCIR_ABORT;
259 nr_aborted++;
260 }
261
262 if (nr_aborted)
263 pthread_cond_broadcast(&state->cookies_cond);
264}
265
aeef767a
JH
266/*
267 * Requests to and from a FSMonitor Protocol V2 provider use an opaque
268 * "token" as a virtual timestamp. Clients can request a summary of all
269 * created/deleted/modified files relative to a token. In the response,
270 * clients receive a new token for the next (relative) request.
271 *
272 *
273 * Token Format
274 * ============
275 *
276 * The contents of the token are private and provider-specific.
277 *
278 * For the built-in fsmonitor--daemon, we define a token as follows:
279 *
280 * "builtin" ":" <token_id> ":" <sequence_nr>
281 *
282 * The "builtin" prefix is used as a namespace to avoid conflicts
283 * with other providers (such as Watchman).
284 *
285 * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
286 * UUID, or {timestamp,pid}. It is used to group all filesystem
287 * events that happened while the daemon was monitoring (and in-sync
288 * with the filesystem).
289 *
290 * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
291 * and does not define less-than/greater-than relationships.
292 * (There are too many race conditions to rely on file system
293 * event timestamps.)
294 *
295 * The <sequence_nr> is a simple integer incremented whenever the
296 * daemon needs to make its state public. For example, if 1000 file
297 * system events come in, but no clients have requested the data,
298 * the daemon can continue to accumulate file changes in the same
299 * bin and does not need to advance the sequence number. However,
300 * as soon as a client does arrive, the daemon needs to start a new
301 * bin and increment the sequence number.
302 *
303 * The sequence number serves as the boundary between 2 sets
304 * of bins -- the older ones that the client has already seen
305 * and the newer ones that it hasn't.
306 *
307 * When a new <token_id> is created, the <sequence_nr> is reset to
308 * zero.
309 *
310 *
311 * About Token Ids
312 * ===============
313 *
314 * A new token_id is created:
315 *
316 * [1] each time the daemon is started.
317 *
318 * [2] any time that the daemon must re-sync with the filesystem
319 * (such as when the kernel drops or we miss events on a very
320 * active volume).
321 *
322 * [3] in response to a client "flush" command (for dropped event
323 * testing).
324 *
325 * When a new token_id is created, the daemon is free to discard all
326 * cached filesystem events associated with any previous token_ids.
327 * Events associated with a non-current token_id will never be sent
328 * to a client. A token_id change implicitly means that the daemon
329 * has gap in its event history.
330 *
331 * Therefore, clients that present a token with a stale (non-current)
332 * token_id will always be given a trivial response.
333 */
334struct fsmonitor_token_data {
335 struct strbuf token_id;
336 struct fsmonitor_batch *batch_head;
337 struct fsmonitor_batch *batch_tail;
338 uint64_t client_ref_count;
339};
340
bec486b9
JH
341struct fsmonitor_batch {
342 struct fsmonitor_batch *next;
343 uint64_t batch_seq_nr;
344 const char **interned_paths;
345 size_t nr, alloc;
346 time_t pinned_time;
347};
348
aeef767a
JH
349static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
350{
351 static int test_env_value = -1;
352 static uint64_t flush_count = 0;
353 struct fsmonitor_token_data *token;
bec486b9 354 struct fsmonitor_batch *batch;
aeef767a
JH
355
356 CALLOC_ARRAY(token, 1);
bec486b9 357 batch = fsmonitor_batch__new();
aeef767a
JH
358
359 strbuf_init(&token->token_id, 0);
bec486b9
JH
360 token->batch_head = batch;
361 token->batch_tail = batch;
aeef767a
JH
362 token->client_ref_count = 0;
363
364 if (test_env_value < 0)
365 test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
366
367 if (!test_env_value) {
368 struct timeval tv;
369 struct tm tm;
370 time_t secs;
371
372 gettimeofday(&tv, NULL);
373 secs = tv.tv_sec;
374 gmtime_r(&secs, &tm);
375
376 strbuf_addf(&token->token_id,
377 "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
378 flush_count++,
379 getpid(),
380 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
381 tm.tm_hour, tm.tm_min, tm.tm_sec,
382 (long)tv.tv_usec);
383 } else {
384 strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
385 }
386
bec486b9
JH
387 /*
388 * We created a new <token_id> and are starting a new series
389 * of tokens with a zero <seq_nr>.
390 *
391 * Since clients cannot guess our new (non test) <token_id>
392 * they will always receive a trivial response (because of the
393 * mismatch on the <token_id>). The trivial response will
394 * tell them our new <token_id> so that subsequent requests
395 * will be relative to our new series. (And when sending that
396 * response, we pin the current head of the batch list.)
397 *
398 * Even if the client correctly guesses the <token_id>, their
399 * request of "builtin:<token_id>:0" asks for all changes MORE
400 * RECENT than batch/bin 0.
401 *
402 * This implies that it is a waste to accumulate paths in the
403 * initial batch/bin (because they will never be transmitted).
404 *
405 * So the daemon could be running for days and watching the
406 * file system, but doesn't need to actually accumulate any
407 * paths UNTIL we need to set a reference point for a later
408 * relative request.
409 *
410 * However, it is very useful for testing to always have a
411 * reference point set. Pin batch 0 to force early file system
412 * events to accumulate.
413 */
414 if (test_env_value)
415 batch->pinned_time = time(NULL);
416
aeef767a
JH
417 return token;
418}
419
bec486b9
JH
420struct fsmonitor_batch *fsmonitor_batch__new(void)
421{
422 struct fsmonitor_batch *batch;
423
424 CALLOC_ARRAY(batch, 1);
425
426 return batch;
427}
428
429void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
430{
431 while (batch) {
432 struct fsmonitor_batch *next = batch->next;
433
434 /*
435 * The actual strings within the array of this batch
436 * are interned, so we don't own them. We only own
437 * the array.
438 */
439 free(batch->interned_paths);
440 free(batch);
441
442 batch = next;
443 }
444}
445
446void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
447 const char *path)
448{
449 const char *interned_path = strintern(path);
450
451 trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
452
453 ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
454 batch->interned_paths[batch->nr++] = interned_path;
455}
456
457static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
458 const struct fsmonitor_batch *batch_src)
459{
460 size_t k;
461
462 ALLOC_GROW(batch_dest->interned_paths,
463 batch_dest->nr + batch_src->nr + 1,
464 batch_dest->alloc);
465
466 for (k = 0; k < batch_src->nr; k++)
467 batch_dest->interned_paths[batch_dest->nr++] =
468 batch_src->interned_paths[k];
469}
470
50c725d6
JH
471/*
472 * To keep the batch list from growing unbounded in response to filesystem
473 * activity, we try to truncate old batches from the end of the list as
474 * they become irrelevant.
475 *
476 * We assume that the .git/index will be updated with the most recent token
477 * any time the index is updated. And future commands will only ask for
478 * recent changes *since* that new token. So as tokens advance into the
479 * future, older batch items will never be requested/needed. So we can
480 * truncate them without loss of functionality.
481 *
482 * However, multiple commands may be talking to the daemon concurrently
483 * or perform a slow command, so a little "token skew" is possible.
484 * Therefore, we want this to be a little bit lazy and have a generous
485 * delay.
486 *
487 * The current reader thread walked backwards in time from `token->batch_head`
488 * back to `batch_marker` somewhere in the middle of the batch list.
489 *
490 * Let's walk backwards in time from that marker an arbitrary delay
491 * and truncate the list there. Note that these timestamps are completely
492 * artificial (based on when we pinned the batch item) and not on any
493 * filesystem activity.
494 *
495 * Return the obsolete portion of the list after we have removed it from
496 * the official list so that the caller can free it after leaving the lock.
497 */
498#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
499
500static struct fsmonitor_batch *with_lock__truncate_old_batches(
501 struct fsmonitor_daemon_state *state,
502 const struct fsmonitor_batch *batch_marker)
503{
504 /* assert current thread holding state->main_lock */
505
506 const struct fsmonitor_batch *batch;
507 struct fsmonitor_batch *remainder;
508
509 if (!batch_marker)
510 return NULL;
511
512 trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
513 batch_marker->batch_seq_nr,
514 (uint64_t)batch_marker->pinned_time);
515
516 for (batch = batch_marker; batch; batch = batch->next) {
517 time_t t;
518
519 if (!batch->pinned_time) /* an overflow batch */
520 continue;
521
522 t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
523 if (t > batch_marker->pinned_time) /* too close to marker */
524 continue;
525
526 goto truncate_past_here;
527 }
528
529 return NULL;
530
531truncate_past_here:
532 state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
533
534 remainder = ((struct fsmonitor_batch *)batch)->next;
535 ((struct fsmonitor_batch *)batch)->next = NULL;
536
537 return remainder;
538}
539
bec486b9
JH
540static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
541{
542 if (!token)
543 return;
544
545 assert(token->client_ref_count == 0);
546
547 strbuf_release(&token->token_id);
548
549 fsmonitor_batch__free_list(token->batch_head);
550
551 free(token);
552}
553
554/*
555 * Flush all of our cached data about the filesystem. Call this if we
556 * lose sync with the filesystem and miss some notification events.
557 *
558 * [1] If we are missing events, then we no longer have a complete
559 * history of the directory (relative to our current start token).
560 * We should create a new token and start fresh (as if we just
561 * booted up).
562 *
b05880d3
JH
563 * [2] Some of those lost events may have been for cookie files. We
564 * should assume the worst and abort them rather letting them starve.
565 *
bec486b9
JH
566 * If there are no concurrent threads reading the current token data
567 * series, we can free it now. Otherwise, let the last reader free
568 * it.
569 *
570 * Either way, the old token data series is no longer associated with
571 * our state data.
572 */
573static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
574{
575 /* assert current thread holding state->main_lock */
576
577 struct fsmonitor_token_data *free_me = NULL;
578 struct fsmonitor_token_data *new_one = NULL;
579
580 new_one = fsmonitor_new_token_data();
581
582 if (state->current_token_data->client_ref_count == 0)
583 free_me = state->current_token_data;
584 state->current_token_data = new_one;
585
586 fsmonitor_free_token_data(free_me);
b05880d3
JH
587
588 with_lock__abort_all_cookies(state);
bec486b9
JH
589}
590
591void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
592{
593 pthread_mutex_lock(&state->main_lock);
594 with_lock__do_force_resync(state);
595 pthread_mutex_unlock(&state->main_lock);
596}
597
518a522f
JH
598/*
599 * Format an opaque token string to send to the client.
600 */
601static void with_lock__format_response_token(
602 struct strbuf *response_token,
603 const struct strbuf *response_token_id,
604 const struct fsmonitor_batch *batch)
605{
606 /* assert current thread holding state->main_lock */
607
608 strbuf_reset(response_token);
609 strbuf_addf(response_token, "builtin:%s:%"PRIu64,
610 response_token_id->buf, batch->batch_seq_nr);
611}
612
613/*
614 * Parse an opaque token from the client.
615 * Returns -1 on error.
616 */
617static int fsmonitor_parse_client_token(const char *buf_token,
618 struct strbuf *requested_token_id,
619 uint64_t *seq_nr)
620{
621 const char *p;
622 char *p_end;
623
624 strbuf_reset(requested_token_id);
625 *seq_nr = 0;
626
627 if (!skip_prefix(buf_token, "builtin:", &p))
628 return -1;
629
630 while (*p && *p != ':')
631 strbuf_addch(requested_token_id, *p++);
632 if (!*p++)
633 return -1;
634
635 *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
636 if (*p_end)
637 return -1;
638
639 return 0;
640}
641
642KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
643
644static int do_handle_client(struct fsmonitor_daemon_state *state,
645 const char *command,
646 ipc_server_reply_cb *reply,
647 struct ipc_server_reply_data *reply_data)
648{
649 struct fsmonitor_token_data *token_data = NULL;
650 struct strbuf response_token = STRBUF_INIT;
651 struct strbuf requested_token_id = STRBUF_INIT;
652 struct strbuf payload = STRBUF_INIT;
653 uint64_t requested_oldest_seq_nr = 0;
654 uint64_t total_response_len = 0;
655 const char *p;
656 const struct fsmonitor_batch *batch_head;
657 const struct fsmonitor_batch *batch;
50c725d6 658 struct fsmonitor_batch *remainder = NULL;
518a522f
JH
659 intmax_t count = 0, duplicates = 0;
660 kh_str_t *shown;
661 int hash_ret;
662 int do_trivial = 0;
663 int do_flush = 0;
b05880d3
JH
664 int do_cookie = 0;
665 enum fsmonitor_cookie_item_result cookie_result;
518a522f
JH
666
667 /*
668 * We expect `command` to be of the form:
669 *
670 * <command> := quit NUL
671 * | flush NUL
672 * | <V1-time-since-epoch-ns> NUL
673 * | <V2-opaque-fsmonitor-token> NUL
674 */
675
676 if (!strcmp(command, "quit")) {
677 /*
678 * A client has requested over the socket/pipe that the
679 * daemon shutdown.
680 *
681 * Tell the IPC thread pool to shutdown (which completes
682 * the await in the main thread (which can stop the
683 * fsmonitor listener thread)).
684 *
685 * There is no reply to the client.
686 */
687 return SIMPLE_IPC_QUIT;
688
689 } else if (!strcmp(command, "flush")) {
690 /*
691 * Flush all of our cached data and generate a new token
692 * just like if we lost sync with the filesystem.
693 *
694 * Then send a trivial response using the new token.
695 */
696 do_flush = 1;
697 do_trivial = 1;
698
699 } else if (!skip_prefix(command, "builtin:", &p)) {
700 /* assume V1 timestamp or garbage */
701
702 char *p_end;
703
704 strtoumax(command, &p_end, 10);
705 trace_printf_key(&trace_fsmonitor,
706 ((*p_end) ?
707 "fsmonitor: invalid command line '%s'" :
708 "fsmonitor: unsupported V1 protocol '%s'"),
709 command);
710 do_trivial = 1;
711
712 } else {
713 /* We have "builtin:*" */
714 if (fsmonitor_parse_client_token(command, &requested_token_id,
715 &requested_oldest_seq_nr)) {
716 trace_printf_key(&trace_fsmonitor,
717 "fsmonitor: invalid V2 protocol token '%s'",
718 command);
719 do_trivial = 1;
720
721 } else {
722 /*
723 * We have a V2 valid token:
724 * "builtin:<token_id>:<seq_nr>"
725 */
b05880d3 726 do_cookie = 1;
518a522f
JH
727 }
728 }
729
730 pthread_mutex_lock(&state->main_lock);
731
732 if (!state->current_token_data)
733 BUG("fsmonitor state does not have a current token");
734
b05880d3
JH
735 /*
736 * Write a cookie file inside the directory being watched in
737 * an effort to flush out existing filesystem events that we
738 * actually care about. Suspend this client thread until we
739 * see the filesystem events for this cookie file.
740 *
741 * Creating the cookie lets us guarantee that our FS listener
742 * thread has drained the kernel queue and we are caught up
743 * with the kernel.
744 *
745 * If we cannot create the cookie (or otherwise guarantee that
746 * we are caught up), we send a trivial response. We have to
747 * assume that there might be some very, very recent activity
748 * on the FS still in flight.
749 */
750 if (do_cookie) {
751 cookie_result = with_lock__wait_for_cookie(state);
752 if (cookie_result != FCIR_SEEN) {
753 error(_("fsmonitor: cookie_result '%d' != SEEN"),
754 cookie_result);
755 do_trivial = 1;
756 }
757 }
758
518a522f
JH
759 if (do_flush)
760 with_lock__do_force_resync(state);
761
762 /*
763 * We mark the current head of the batch list as "pinned" so
764 * that the listener thread will treat this item as read-only
765 * (and prevent any more paths from being added to it) from
766 * now on.
767 */
768 token_data = state->current_token_data;
769 batch_head = token_data->batch_head;
770 ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
771
772 /*
773 * FSMonitor Protocol V2 requires that we send a response header
774 * with a "new current token" and then all of the paths that changed
775 * since the "requested token". We send the seq_nr of the just-pinned
776 * head batch so that future requests from a client will be relative
777 * to it.
778 */
779 with_lock__format_response_token(&response_token,
780 &token_data->token_id, batch_head);
781
782 reply(reply_data, response_token.buf, response_token.len + 1);
783 total_response_len += response_token.len + 1;
784
785 trace2_data_string("fsmonitor", the_repository, "response/token",
786 response_token.buf);
787 trace_printf_key(&trace_fsmonitor, "response token: %s",
788 response_token.buf);
789
790 if (!do_trivial) {
791 if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
792 /*
793 * The client last spoke to a different daemon
794 * instance -OR- the daemon had to resync with
795 * the filesystem (and lost events), so reject.
796 */
797 trace2_data_string("fsmonitor", the_repository,
798 "response/token", "different");
799 do_trivial = 1;
800
801 } else if (requested_oldest_seq_nr <
802 token_data->batch_tail->batch_seq_nr) {
803 /*
804 * The client wants older events than we have for
805 * this token_id. This means that the end of our
806 * batch list was truncated and we cannot give the
807 * client a complete snapshot relative to their
808 * request.
809 */
810 trace_printf_key(&trace_fsmonitor,
811 "client requested truncated data");
812 do_trivial = 1;
813 }
814 }
815
816 if (do_trivial) {
817 pthread_mutex_unlock(&state->main_lock);
818
819 reply(reply_data, "/", 2);
820
821 trace2_data_intmax("fsmonitor", the_repository,
822 "response/trivial", 1);
823
824 goto cleanup;
825 }
826
827 /*
828 * We're going to hold onto a pointer to the current
829 * token-data while we walk the list of batches of files.
830 * During this time, we will NOT be under the lock.
831 * So we ref-count it.
832 *
833 * This allows the listener thread to continue prepending
834 * new batches of items to the token-data (which we'll ignore).
835 *
836 * AND it allows the listener thread to do a token-reset
837 * (and install a new `current_token_data`).
838 */
839 token_data->client_ref_count++;
840
841 pthread_mutex_unlock(&state->main_lock);
842
843 /*
844 * The client request is relative to the token that they sent,
845 * so walk the batch list backwards from the current head back
846 * to the batch (sequence number) they named.
847 *
848 * We use khash to de-dup the list of pathnames.
849 *
850 * NEEDSWORK: each batch contains a list of interned strings,
851 * so we only need to do pointer comparisons here to build the
852 * hash table. Currently, we're still comparing the string
853 * values.
854 */
855 shown = kh_init_str();
856 for (batch = batch_head;
857 batch && batch->batch_seq_nr > requested_oldest_seq_nr;
858 batch = batch->next) {
859 size_t k;
860
861 for (k = 0; k < batch->nr; k++) {
862 const char *s = batch->interned_paths[k];
863 size_t s_len;
864
865 if (kh_get_str(shown, s) != kh_end(shown))
866 duplicates++;
867 else {
868 kh_put_str(shown, s, &hash_ret);
869
870 trace_printf_key(&trace_fsmonitor,
871 "send[%"PRIuMAX"]: %s",
872 count, s);
873
874 /* Each path gets written with a trailing NUL */
875 s_len = strlen(s) + 1;
876
877 if (payload.len + s_len >=
878 LARGE_PACKET_DATA_MAX) {
879 reply(reply_data, payload.buf,
880 payload.len);
881 total_response_len += payload.len;
882 strbuf_reset(&payload);
883 }
884
885 strbuf_add(&payload, s, s_len);
886 count++;
887 }
888 }
889 }
890
891 if (payload.len) {
892 reply(reply_data, payload.buf, payload.len);
893 total_response_len += payload.len;
894 }
895
896 kh_release_str(shown);
897
898 pthread_mutex_lock(&state->main_lock);
899
900 if (token_data->client_ref_count > 0)
901 token_data->client_ref_count--;
902
903 if (token_data->client_ref_count == 0) {
904 if (token_data != state->current_token_data) {
905 /*
906 * The listener thread did a token-reset while we were
907 * walking the batch list. Therefore, this token is
908 * stale and can be discarded completely. If we are
909 * the last reader thread using this token, we own
910 * that work.
911 */
912 fsmonitor_free_token_data(token_data);
50c725d6
JH
913 } else if (batch) {
914 /*
915 * We are holding the lock and are the only
916 * reader of the ref-counted portion of the
917 * list, so we get the honor of seeing if the
918 * list can be truncated to save memory.
919 *
920 * The main loop did not walk to the end of the
921 * list, so this batch is the first item in the
922 * batch-list that is older than the requested
923 * end-point sequence number. See if the tail
924 * end of the list is obsolete.
925 */
926 remainder = with_lock__truncate_old_batches(state,
927 batch);
518a522f
JH
928 }
929 }
930
931 pthread_mutex_unlock(&state->main_lock);
932
50c725d6
JH
933 if (remainder)
934 fsmonitor_batch__free_list(remainder);
935
518a522f
JH
936 trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
937 trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
938 trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
939
940cleanup:
941 strbuf_release(&response_token);
942 strbuf_release(&requested_token_id);
943 strbuf_release(&payload);
944
945 return 0;
946}
947
9dcba0ba
JH
948static ipc_server_application_cb handle_client;
949
950static int handle_client(void *data,
951 const char *command, size_t command_len,
952 ipc_server_reply_cb *reply,
953 struct ipc_server_reply_data *reply_data)
954{
518a522f 955 struct fsmonitor_daemon_state *state = data;
9dcba0ba
JH
956 int result;
957
958 /*
959 * The Simple IPC API now supports {char*, len} arguments, but
960 * FSMonitor always uses proper null-terminated strings, so
961 * we can ignore the command_len argument. (Trust, but verify.)
962 */
963 if (command_len != strlen(command))
964 BUG("FSMonitor assumes text messages");
965
518a522f
JH
966 trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
967
9dcba0ba
JH
968 trace2_region_enter("fsmonitor", "handle_client", the_repository);
969 trace2_data_string("fsmonitor", the_repository, "request", command);
970
518a522f 971 result = do_handle_client(state, command, reply, reply_data);
9dcba0ba
JH
972
973 trace2_region_leave("fsmonitor", "handle_client", the_repository);
974
975 return result;
976}
977
b05880d3
JH
978#define FSMONITOR_DIR "fsmonitor--daemon"
979#define FSMONITOR_COOKIE_DIR "cookies"
980#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
0ae7a1d9
JH
981
982enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
983 const char *rel)
984{
985 if (fspathncmp(rel, ".git", 4))
986 return IS_WORKDIR_PATH;
987 rel += 4;
988
989 if (!*rel)
990 return IS_DOT_GIT;
991 if (*rel != '/')
992 return IS_WORKDIR_PATH; /* e.g. .gitignore */
993 rel++;
994
995 if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
996 strlen(FSMONITOR_COOKIE_PREFIX)))
997 return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
998
999 return IS_INSIDE_DOT_GIT;
1000}
1001
1002enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
1003 const char *rel)
1004{
1005 if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
1006 strlen(FSMONITOR_COOKIE_PREFIX)))
1007 return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
1008
1009 return IS_INSIDE_GITDIR;
1010}
1011
1012static enum fsmonitor_path_type try_classify_workdir_abs_path(
1013 struct fsmonitor_daemon_state *state,
1014 const char *path)
1015{
1016 const char *rel;
1017
1018 if (fspathncmp(path, state->path_worktree_watch.buf,
1019 state->path_worktree_watch.len))
1020 return IS_OUTSIDE_CONE;
1021
1022 rel = path + state->path_worktree_watch.len;
1023
1024 if (!*rel)
1025 return IS_WORKDIR_PATH; /* it is the root dir exactly */
1026 if (*rel != '/')
1027 return IS_OUTSIDE_CONE;
1028 rel++;
1029
1030 return fsmonitor_classify_path_workdir_relative(rel);
1031}
1032
1033enum fsmonitor_path_type fsmonitor_classify_path_absolute(
1034 struct fsmonitor_daemon_state *state,
1035 const char *path)
1036{
1037 const char *rel;
1038 enum fsmonitor_path_type t;
1039
1040 t = try_classify_workdir_abs_path(state, path);
1041 if (state->nr_paths_watching == 1)
1042 return t;
1043 if (t != IS_OUTSIDE_CONE)
1044 return t;
1045
1046 if (fspathncmp(path, state->path_gitdir_watch.buf,
1047 state->path_gitdir_watch.len))
1048 return IS_OUTSIDE_CONE;
1049
1050 rel = path + state->path_gitdir_watch.len;
1051
1052 if (!*rel)
1053 return IS_GITDIR; /* it is the <gitdir> exactly */
1054 if (*rel != '/')
1055 return IS_OUTSIDE_CONE;
1056 rel++;
1057
1058 return fsmonitor_classify_path_gitdir_relative(rel);
1059}
1060
bec486b9
JH
1061/*
1062 * We try to combine small batches at the front of the batch-list to avoid
1063 * having a long list. This hopefully makes it a little easier when we want
1064 * to truncate and maintain the list. However, we don't want the paths array
1065 * to just keep growing and growing with realloc, so we insert an arbitrary
1066 * limit.
1067 */
1068#define MY_COMBINE_LIMIT (1024)
1069
1070void fsmonitor_publish(struct fsmonitor_daemon_state *state,
1071 struct fsmonitor_batch *batch,
1072 const struct string_list *cookie_names)
1073{
1074 if (!batch && !cookie_names->nr)
1075 return;
1076
1077 pthread_mutex_lock(&state->main_lock);
1078
1079 if (batch) {
1080 struct fsmonitor_batch *head;
1081
1082 head = state->current_token_data->batch_head;
1083 if (!head) {
1084 BUG("token does not have batch");
1085 } else if (head->pinned_time) {
1086 /*
1087 * We cannot alter the current batch list
1088 * because:
1089 *
1090 * [a] it is being transmitted to at least one
1091 * client and the handle_client() thread has a
1092 * ref-count, but not a lock on the batch list
1093 * starting with this item.
1094 *
1095 * [b] it has been transmitted in the past to
1096 * at least one client such that future
1097 * requests are relative to this head batch.
1098 *
1099 * So, we can only prepend a new batch onto
1100 * the front of the list.
1101 */
1102 batch->batch_seq_nr = head->batch_seq_nr + 1;
1103 batch->next = head;
1104 state->current_token_data->batch_head = batch;
1105 } else if (!head->batch_seq_nr) {
1106 /*
1107 * Batch 0 is unpinned. See the note in
1108 * `fsmonitor_new_token_data()` about why we
1109 * don't need to accumulate these paths.
1110 */
1111 fsmonitor_batch__free_list(batch);
1112 } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
1113 /*
1114 * The head batch in the list has never been
1115 * transmitted to a client, but folding the
1116 * contents of the new batch onto it would
1117 * exceed our arbitrary limit, so just prepend
1118 * the new batch onto the list.
1119 */
1120 batch->batch_seq_nr = head->batch_seq_nr + 1;
1121 batch->next = head;
1122 state->current_token_data->batch_head = batch;
1123 } else {
1124 /*
1125 * We are free to add the paths in the given
1126 * batch onto the end of the current head batch.
1127 */
1128 fsmonitor_batch__combine(head, batch);
1129 fsmonitor_batch__free_list(batch);
1130 }
1131 }
1132
b05880d3
JH
1133 if (cookie_names->nr)
1134 with_lock__mark_cookies_seen(state, cookie_names);
1135
bec486b9
JH
1136 pthread_mutex_unlock(&state->main_lock);
1137}
1138
9dcba0ba
JH
1139static void *fsm_listen__thread_proc(void *_state)
1140{
1141 struct fsmonitor_daemon_state *state = _state;
1142
1143 trace2_thread_start("fsm-listen");
1144
1145 trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
1146 state->path_worktree_watch.buf);
1147 if (state->nr_paths_watching > 1)
1148 trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
1149 state->path_gitdir_watch.buf);
1150
1151 fsm_listen__loop(state);
1152
bec486b9
JH
1153 pthread_mutex_lock(&state->main_lock);
1154 if (state->current_token_data &&
1155 state->current_token_data->client_ref_count == 0)
1156 fsmonitor_free_token_data(state->current_token_data);
1157 state->current_token_data = NULL;
1158 pthread_mutex_unlock(&state->main_lock);
1159
9dcba0ba
JH
1160 trace2_thread_exit();
1161 return NULL;
1162}
1163
1164static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
1165{
1166 struct ipc_server_opts ipc_opts = {
1167 .nr_threads = fsmonitor__ipc_threads,
1168
1169 /*
1170 * We know that there are no other active threads yet,
1171 * so we can let the IPC layer temporarily chdir() if
1172 * it needs to when creating the server side of the
1173 * Unix domain socket.
1174 */
1175 .uds_disallow_chdir = 0
1176 };
1177
1178 /*
1179 * Start the IPC thread pool before the we've started the file
1180 * system event listener thread so that we have the IPC handle
1181 * before we need it.
1182 */
1183 if (ipc_server_run_async(&state->ipc_server_data,
39664e93 1184 state->path_ipc.buf, &ipc_opts,
9dcba0ba
JH
1185 handle_client, state))
1186 return error_errno(
1187 _("could not start IPC thread pool on '%s'"),
39664e93 1188 state->path_ipc.buf);
9dcba0ba
JH
1189
1190 /*
1191 * Start the fsmonitor listener thread to collect filesystem
1192 * events.
1193 */
1194 if (pthread_create(&state->listener_thread, NULL,
1195 fsm_listen__thread_proc, state) < 0) {
1196 ipc_server_stop_async(state->ipc_server_data);
1197 ipc_server_await(state->ipc_server_data);
1198
1199 return error(_("could not start fsmonitor listener thread"));
1200 }
1201
1202 /*
1203 * The daemon is now fully functional in background threads.
1204 * Wait for the IPC thread pool to shutdown (whether by client
1205 * request or from filesystem activity).
1206 */
1207 ipc_server_await(state->ipc_server_data);
1208
1209 /*
1210 * The fsmonitor listener thread may have received a shutdown
1211 * event from the IPC thread pool, but it doesn't hurt to tell
1212 * it again. And wait for it to shutdown.
1213 */
1214 fsm_listen__stop_async(state);
1215 pthread_join(state->listener_thread, NULL);
1216
1217 return state->error_code;
1218}
1219
1220static int fsmonitor_run_daemon(void)
1221{
1222 struct fsmonitor_daemon_state state;
39664e93 1223 const char *home;
9dcba0ba
JH
1224 int err;
1225
1226 memset(&state, 0, sizeof(state));
1227
b05880d3 1228 hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
9dcba0ba 1229 pthread_mutex_init(&state.main_lock, NULL);
b05880d3 1230 pthread_cond_init(&state.cookies_cond, NULL);
9dcba0ba 1231 state.error_code = 0;
aeef767a 1232 state.current_token_data = fsmonitor_new_token_data();
9dcba0ba
JH
1233
1234 /* Prepare to (recursively) watch the <worktree-root> directory. */
1235 strbuf_init(&state.path_worktree_watch, 0);
1236 strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
1237 state.nr_paths_watching = 1;
1238
1239 /*
1240 * We create and delete cookie files somewhere inside the .git
1241 * directory to help us keep sync with the file system. If
1242 * ".git" is not a directory, then <gitdir> is not inside the
1243 * cone of <worktree-root>, so set up a second watch to watch
1244 * the <gitdir> so that we get events for the cookie files.
1245 */
1246 strbuf_init(&state.path_gitdir_watch, 0);
1247 strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
1248 strbuf_addstr(&state.path_gitdir_watch, "/.git");
1249 if (!is_directory(state.path_gitdir_watch.buf)) {
1250 strbuf_reset(&state.path_gitdir_watch);
1251 strbuf_addstr(&state.path_gitdir_watch, absolute_path(get_git_dir()));
1252 state.nr_paths_watching = 2;
1253 }
1254
b05880d3
JH
1255 /*
1256 * We will write filesystem syncing cookie files into
1257 * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
1258 *
1259 * The extra layers of subdirectories here keep us from
1260 * changing the mtime on ".git/" or ".git/foo/" when we create
1261 * or delete cookie files.
1262 *
1263 * There have been problems with some IDEs that do a
1264 * non-recursive watch of the ".git/" directory and run a
1265 * series of commands any time something happens.
1266 *
1267 * For example, if we place our cookie files directly in
1268 * ".git/" or ".git/foo/" then a `git status` (or similar
1269 * command) from the IDE will cause a cookie file to be
1270 * created in one of those dirs. This causes the mtime of
1271 * those dirs to change. This triggers the IDE's watch
1272 * notification. This triggers the IDE to run those commands
1273 * again. And the process repeats and the machine never goes
1274 * idle.
1275 *
1276 * Adding the extra layers of subdirectories prevents the
1277 * mtime of ".git/" and ".git/foo" from changing when a
1278 * cookie file is created.
1279 */
1280 strbuf_init(&state.path_cookie_prefix, 0);
1281 strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
1282
1283 strbuf_addch(&state.path_cookie_prefix, '/');
1284 strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
1285 mkdir(state.path_cookie_prefix.buf, 0777);
1286
1287 strbuf_addch(&state.path_cookie_prefix, '/');
1288 strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
1289 mkdir(state.path_cookie_prefix.buf, 0777);
1290
1291 strbuf_addch(&state.path_cookie_prefix, '/');
1292
39664e93
JH
1293 /*
1294 * We create a named-pipe or unix domain socket inside of the
1295 * ".git" directory. (Well, on Windows, we base our named
1296 * pipe in the NPFS on the absolute path of the git
1297 * directory.)
1298 */
1299 strbuf_init(&state.path_ipc, 0);
1300 strbuf_addstr(&state.path_ipc, absolute_path(fsmonitor_ipc__get_path()));
1301
9dcba0ba
JH
1302 /*
1303 * Confirm that we can create platform-specific resources for the
1304 * filesystem listener before we bother starting all the threads.
1305 */
1306 if (fsm_listen__ctor(&state)) {
1307 err = error(_("could not initialize listener thread"));
1308 goto done;
1309 }
1310
39664e93
JH
1311 /*
1312 * CD out of the worktree root directory.
1313 *
1314 * The common Git startup mechanism causes our CWD to be the
1315 * root of the worktree. On Windows, this causes our process
1316 * to hold a locked handle on the CWD. This prevents the
1317 * worktree from being moved or deleted while the daemon is
1318 * running.
1319 *
1320 * We assume that our FS and IPC listener threads have either
1321 * opened all of the handles that they need or will do
1322 * everything using absolute paths.
1323 */
1324 home = getenv("HOME");
1325 if (home && *home && chdir(home))
1326 die_errno(_("could not cd home '%s'"), home);
1327
9dcba0ba
JH
1328 err = fsmonitor_run_daemon_1(&state);
1329
1330done:
b05880d3 1331 pthread_cond_destroy(&state.cookies_cond);
9dcba0ba
JH
1332 pthread_mutex_destroy(&state.main_lock);
1333 fsm_listen__dtor(&state);
1334
1335 ipc_server_free(state.ipc_server_data);
1336
1337 strbuf_release(&state.path_worktree_watch);
1338 strbuf_release(&state.path_gitdir_watch);
b05880d3 1339 strbuf_release(&state.path_cookie_prefix);
39664e93 1340 strbuf_release(&state.path_ipc);
9dcba0ba
JH
1341
1342 return err;
1343}
1344
c284e27b 1345static int try_to_run_foreground_daemon(int detach_console)
9dcba0ba
JH
1346{
1347 /*
1348 * Technically, we don't need to probe for an existing daemon
1349 * process, since we could just call `fsmonitor_run_daemon()`
1350 * and let it fail if the pipe/socket is busy.
1351 *
1352 * However, this method gives us a nicer error message for a
1353 * common error case.
1354 */
1355 if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
1356 die(_("fsmonitor--daemon is already running '%s'"),
1357 the_repository->worktree);
1358
1359 if (fsmonitor__announce_startup) {
1360 fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
1361 the_repository->worktree);
1362 fflush(stderr);
1363 }
1364
c284e27b
JH
1365#ifdef GIT_WINDOWS_NATIVE
1366 if (detach_console)
1367 FreeConsole();
1368#endif
1369
9dcba0ba
JH
1370 return !!fsmonitor_run_daemon();
1371}
1372
c284e27b
JH
1373static start_bg_wait_cb bg_wait_cb;
1374
1375static int bg_wait_cb(const struct child_process *cp, void *cb_data)
1376{
1377 enum ipc_active_state s = fsmonitor_ipc__get_state();
1378
1379 switch (s) {
1380 case IPC_STATE__LISTENING:
1381 /* child is "ready" */
1382 return 0;
1383
1384 case IPC_STATE__NOT_LISTENING:
1385 case IPC_STATE__PATH_NOT_FOUND:
1386 /* give child more time */
1387 return 1;
1388
1389 default:
1390 case IPC_STATE__INVALID_PATH:
1391 case IPC_STATE__OTHER_ERROR:
1392 /* all the time in world won't help */
1393 return -1;
1394 }
1395}
1396
1397static int try_to_start_background_daemon(void)
1398{
1399 struct child_process cp = CHILD_PROCESS_INIT;
1400 enum start_bg_result sbgr;
1401
1402 /*
1403 * Before we try to create a background daemon process, see
1404 * if a daemon process is already listening. This makes it
1405 * easier for us to report an already-listening error to the
1406 * console, since our spawn/daemon can only report the success
1407 * of creating the background process (and not whether it
1408 * immediately exited).
1409 */
1410 if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
1411 die(_("fsmonitor--daemon is already running '%s'"),
1412 the_repository->worktree);
1413
1414 if (fsmonitor__announce_startup) {
1415 fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
1416 the_repository->worktree);
1417 fflush(stderr);
1418 }
1419
1420 cp.git_cmd = 1;
1421
1422 strvec_push(&cp.args, "fsmonitor--daemon");
1423 strvec_push(&cp.args, "run");
1424 strvec_push(&cp.args, "--detach");
1425 strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
1426
1427 cp.no_stdin = 1;
1428 cp.no_stdout = 1;
1429 cp.no_stderr = 1;
1430
1431 sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
1432 fsmonitor__start_timeout_sec);
1433
1434 switch (sbgr) {
1435 case SBGR_READY:
1436 return 0;
1437
1438 default:
1439 case SBGR_ERROR:
1440 case SBGR_CB_ERROR:
1441 return error(_("daemon failed to start"));
1442
1443 case SBGR_TIMEOUT:
1444 return error(_("daemon not online yet"));
1445
1446 case SBGR_DIED:
1447 return error(_("daemon terminated"));
1448 }
1449}
1450
16d9d617
JH
1451int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
1452{
1453 const char *subcmd;
62a62a28 1454 enum fsmonitor_reason reason;
c284e27b 1455 int detach_console = 0;
16d9d617
JH
1456
1457 struct option options[] = {
c284e27b 1458 OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
9dcba0ba
JH
1459 OPT_INTEGER(0, "ipc-threads",
1460 &fsmonitor__ipc_threads,
1461 N_("use <n> ipc worker threads")),
c284e27b
JH
1462 OPT_INTEGER(0, "start-timeout",
1463 &fsmonitor__start_timeout_sec,
1464 N_("max seconds to wait for background daemon startup")),
1465
16d9d617
JH
1466 OPT_END()
1467 };
1468
9dcba0ba 1469 git_config(fsmonitor_config, NULL);
16d9d617
JH
1470
1471 argc = parse_options(argc, argv, prefix, options,
1472 builtin_fsmonitor__daemon_usage, 0);
1473 if (argc != 1)
1474 usage_with_options(builtin_fsmonitor__daemon_usage, options);
1475 subcmd = argv[0];
1476
9dcba0ba
JH
1477 if (fsmonitor__ipc_threads < 1)
1478 die(_("invalid 'ipc-threads' value (%d)"),
1479 fsmonitor__ipc_threads);
1480
62a62a28
JH
1481 prepare_repo_settings(the_repository);
1482 /*
1483 * If the repo is fsmonitor-compatible, explicitly set IPC-mode
1484 * (without bothering to load the `core.fsmonitor` config settings).
1485 *
1486 * If the repo is not compatible, the repo-settings will be set to
1487 * incompatible rather than IPC, so we can use one of the __get
1488 * routines to detect the discrepancy.
1489 */
1490 fsm_settings__set_ipc(the_repository);
1491
1492 reason = fsm_settings__get_reason(the_repository);
1493 if (reason > FSMONITOR_REASON_OK)
1494 die("%s",
1495 fsm_settings__get_incompatible_msg(the_repository,
1496 reason));
1497
c284e27b
JH
1498 if (!strcmp(subcmd, "start"))
1499 return !!try_to_start_background_daemon();
1500
9dcba0ba 1501 if (!strcmp(subcmd, "run"))
c284e27b 1502 return !!try_to_run_foreground_daemon(detach_console);
9dcba0ba 1503
abc9dbc0
JH
1504 if (!strcmp(subcmd, "stop"))
1505 return !!do_as_client__send_stop();
1506
1507 if (!strcmp(subcmd, "status"))
1508 return !!do_as_client__status();
1509
16d9d617
JH
1510 die(_("Unhandled subcommand '%s'"), subcmd);
1511}
1512
1513#else
1514int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
1515{
1516 struct option options[] = {
1517 OPT_END()
1518 };
1519
1520 if (argc == 2 && !strcmp(argv[1], "-h"))
1521 usage_with_options(builtin_fsmonitor__daemon_usage, options);
1522
1523 die(_("fsmonitor--daemon not supported on this platform"));
1524}
1525#endif