]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
manager: use sd_notify() to notify parent systemd that we have finished startup
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61 #include "sd-daemon.h"
62
63 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
64 #define GC_QUEUE_ENTRIES_MAX 16
65
66 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
67 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
68
69 /* Where clients shall send notification messages to */
70 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
71 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
72
73 static int manager_setup_notify(Manager *m) {
74 union {
75 struct sockaddr sa;
76 struct sockaddr_un un;
77 } sa;
78 struct epoll_event ev;
79 int one = 1;
80
81 assert(m);
82
83 m->notify_watch.type = WATCH_NOTIFY;
84 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
85 log_error("Failed to allocate notification socket: %m");
86 return -errno;
87 }
88
89 zero(sa);
90 sa.sa.sa_family = AF_UNIX;
91
92 if (getpid() != 1)
93 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
94 else {
95 unlink(NOTIFY_SOCKET_SYSTEM);
96 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
97 }
98
99 if (sa.un.sun_path[0] == '@')
100 sa.un.sun_path[0] = 0;
101
102 if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
103 log_error("bind() failed: %m");
104 return -errno;
105 }
106
107 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
108 log_error("SO_PASSCRED failed: %m");
109 return -errno;
110 }
111
112 zero(ev);
113 ev.events = EPOLLIN;
114 ev.data.ptr = &m->notify_watch;
115
116 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
117 return -errno;
118
119 if (sa.un.sun_path[0] == 0)
120 sa.un.sun_path[0] = '@';
121
122 if (!(m->notify_socket = strdup(sa.un.sun_path)))
123 return -ENOMEM;
124
125 log_debug("Using notification socket %s", m->notify_socket);
126
127 return 0;
128 }
129
130 static int enable_special_signals(Manager *m) {
131 int fd;
132
133 assert(m);
134
135 /* Enable that we get SIGINT on control-alt-del */
136 if (reboot(RB_DISABLE_CAD) < 0)
137 log_warning("Failed to enable ctrl-alt-del handling: %m");
138
139 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY)) < 0)
140 log_warning("Failed to open /dev/tty0: %m");
141 else {
142 /* Enable that we get SIGWINCH on kbrequest */
143 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
144 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
145
146 close_nointr_nofail(fd);
147 }
148
149 return 0;
150 }
151
152 static int manager_setup_signals(Manager *m) {
153 sigset_t mask;
154 struct epoll_event ev;
155 struct sigaction sa;
156
157 assert(m);
158
159 /* We are not interested in SIGSTOP and friends. */
160 zero(sa);
161 sa.sa_handler = SIG_DFL;
162 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
163 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
164
165 assert_se(sigemptyset(&mask) == 0);
166
167 sigset_add_many(&mask,
168 SIGCHLD, /* Child died */
169 SIGTERM, /* Reexecute daemon */
170 SIGHUP, /* Reload configuration */
171 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
172 SIGUSR2, /* systemd: dump status */
173 SIGINT, /* Kernel sends us this on control-alt-del */
174 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
175 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
176 SIGRTMIN+0, /* systemd: start default.target */
177 SIGRTMIN+1, /* systemd: isolate rescue.target */
178 SIGRTMIN+2, /* systemd: isolate emergency.target */
179 SIGRTMIN+3, /* systemd: start halt.target */
180 SIGRTMIN+4, /* systemd: start poweroff.target */
181 SIGRTMIN+5, /* systemd: start reboot.target */
182 SIGRTMIN+6, /* systemd: start kexec.target */
183 SIGRTMIN+13, /* systemd: Immediate halt */
184 SIGRTMIN+14, /* systemd: Immediate poweroff */
185 SIGRTMIN+15, /* systemd: Immediate reboot */
186 SIGRTMIN+16, /* systemd: Immediate kexec */
187 SIGRTMIN+20, /* systemd: enable status messages */
188 SIGRTMIN+21, /* systemd: disable status messages */
189 -1);
190 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
191
192 m->signal_watch.type = WATCH_SIGNAL;
193 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
194 return -errno;
195
196 zero(ev);
197 ev.events = EPOLLIN;
198 ev.data.ptr = &m->signal_watch;
199
200 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
201 return -errno;
202
203 if (m->running_as == MANAGER_SYSTEM)
204 return enable_special_signals(m);
205
206 return 0;
207 }
208
209 int manager_new(ManagerRunningAs running_as, Manager **_m) {
210 Manager *m;
211 int r = -ENOMEM;
212
213 assert(_m);
214 assert(running_as >= 0);
215 assert(running_as < _MANAGER_RUNNING_AS_MAX);
216
217 if (!(m = new0(Manager, 1)))
218 return -ENOMEM;
219
220 dual_timestamp_get(&m->startup_timestamp);
221
222 m->running_as = running_as;
223 m->name_data_slot = m->subscribed_data_slot = -1;
224 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
225 m->pin_cgroupfs_fd = -1;
226
227 #ifdef HAVE_AUDIT
228 m->audit_fd = -1;
229 #endif
230
231 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
232 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
233
234 if (!(m->environment = strv_copy(environ)))
235 goto fail;
236
237 if (!(m->default_controllers = strv_new("cpu", NULL)))
238 goto fail;
239
240 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
241 goto fail;
242
243 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
244 goto fail;
245
246 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
247 goto fail;
248
249 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
250 goto fail;
251
252 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
253 goto fail;
254
255 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
256 goto fail;
257
258 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
259 goto fail;
260
261 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
262 goto fail;
263
264 if ((r = manager_setup_signals(m)) < 0)
265 goto fail;
266
267 if ((r = manager_setup_cgroup(m)) < 0)
268 goto fail;
269
270 if ((r = manager_setup_notify(m)) < 0)
271 goto fail;
272
273 /* Try to connect to the busses, if possible. */
274 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
275 goto fail;
276
277 #ifdef HAVE_AUDIT
278 if ((m->audit_fd = audit_open()) < 0)
279 log_error("Failed to connect to audit log: %m");
280 #endif
281
282 m->taint_usr = dir_is_empty("/usr") > 0;
283
284 *_m = m;
285 return 0;
286
287 fail:
288 manager_free(m);
289 return r;
290 }
291
292 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
293 Meta *meta;
294 unsigned n = 0;
295
296 assert(m);
297
298 while ((meta = m->cleanup_queue)) {
299 assert(meta->in_cleanup_queue);
300
301 unit_free((Unit*) meta);
302 n++;
303 }
304
305 return n;
306 }
307
308 enum {
309 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
310 GC_OFFSET_UNSURE, /* No clue */
311 GC_OFFSET_GOOD, /* We still need this unit */
312 GC_OFFSET_BAD, /* We don't need this unit anymore */
313 _GC_OFFSET_MAX
314 };
315
316 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
317 Iterator i;
318 Unit *other;
319 bool is_bad;
320
321 assert(u);
322
323 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
324 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
325 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
326 return;
327
328 if (u->meta.in_cleanup_queue)
329 goto bad;
330
331 if (unit_check_gc(u))
332 goto good;
333
334 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
335
336 is_bad = true;
337
338 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
339 unit_gc_sweep(other, gc_marker);
340
341 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
342 goto good;
343
344 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
345 is_bad = false;
346 }
347
348 if (is_bad)
349 goto bad;
350
351 /* We were unable to find anything out about this entry, so
352 * let's investigate it later */
353 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
354 unit_add_to_gc_queue(u);
355 return;
356
357 bad:
358 /* We definitely know that this one is not useful anymore, so
359 * let's mark it for deletion */
360 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
361 unit_add_to_cleanup_queue(u);
362 return;
363
364 good:
365 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
366 }
367
368 static unsigned manager_dispatch_gc_queue(Manager *m) {
369 Meta *meta;
370 unsigned n = 0;
371 unsigned gc_marker;
372
373 assert(m);
374
375 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
376 (m->gc_queue_timestamp <= 0 ||
377 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
378 return 0;
379
380 log_debug("Running GC...");
381
382 m->gc_marker += _GC_OFFSET_MAX;
383 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
384 m->gc_marker = 1;
385
386 gc_marker = m->gc_marker;
387
388 while ((meta = m->gc_queue)) {
389 assert(meta->in_gc_queue);
390
391 unit_gc_sweep((Unit*) meta, gc_marker);
392
393 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
394 meta->in_gc_queue = false;
395
396 n++;
397
398 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
399 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
400 log_debug("Collecting %s", meta->id);
401 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
402 unit_add_to_cleanup_queue((Unit*) meta);
403 }
404 }
405
406 m->n_in_gc_queue = 0;
407 m->gc_queue_timestamp = 0;
408
409 return n;
410 }
411
412 static void manager_clear_jobs_and_units(Manager *m) {
413 Job *j;
414 Unit *u;
415
416 assert(m);
417
418 while ((j = hashmap_first(m->transaction_jobs)))
419 job_free(j);
420
421 while ((u = hashmap_first(m->units)))
422 unit_free(u);
423
424 manager_dispatch_cleanup_queue(m);
425
426 assert(!m->load_queue);
427 assert(!m->run_queue);
428 assert(!m->dbus_unit_queue);
429 assert(!m->dbus_job_queue);
430 assert(!m->cleanup_queue);
431 assert(!m->gc_queue);
432
433 assert(hashmap_isempty(m->transaction_jobs));
434 assert(hashmap_isempty(m->jobs));
435 assert(hashmap_isempty(m->units));
436 }
437
438 void manager_free(Manager *m) {
439 UnitType c;
440
441 assert(m);
442
443 manager_clear_jobs_and_units(m);
444
445 for (c = 0; c < _UNIT_TYPE_MAX; c++)
446 if (unit_vtable[c]->shutdown)
447 unit_vtable[c]->shutdown(m);
448
449 /* If we reexecute ourselves, we keep the root cgroup
450 * around */
451 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
452
453 manager_undo_generators(m);
454
455 bus_done(m);
456
457 hashmap_free(m->units);
458 hashmap_free(m->jobs);
459 hashmap_free(m->transaction_jobs);
460 hashmap_free(m->watch_pids);
461 hashmap_free(m->watch_bus);
462
463 if (m->epoll_fd >= 0)
464 close_nointr_nofail(m->epoll_fd);
465 if (m->signal_watch.fd >= 0)
466 close_nointr_nofail(m->signal_watch.fd);
467 if (m->notify_watch.fd >= 0)
468 close_nointr_nofail(m->notify_watch.fd);
469
470 #ifdef HAVE_AUDIT
471 if (m->audit_fd >= 0)
472 audit_close(m->audit_fd);
473 #endif
474
475 free(m->notify_socket);
476
477 lookup_paths_free(&m->lookup_paths);
478 strv_free(m->environment);
479
480 strv_free(m->default_controllers);
481
482 hashmap_free(m->cgroup_bondings);
483 set_free_free(m->unit_path_cache);
484
485 free(m);
486 }
487
488 int manager_enumerate(Manager *m) {
489 int r = 0, q;
490 UnitType c;
491
492 assert(m);
493
494 /* Let's ask every type to load all units from disk/kernel
495 * that it might know */
496 for (c = 0; c < _UNIT_TYPE_MAX; c++)
497 if (unit_vtable[c]->enumerate)
498 if ((q = unit_vtable[c]->enumerate(m)) < 0)
499 r = q;
500
501 manager_dispatch_load_queue(m);
502 return r;
503 }
504
505 int manager_coldplug(Manager *m) {
506 int r = 0, q;
507 Iterator i;
508 Unit *u;
509 char *k;
510
511 assert(m);
512
513 /* Then, let's set up their initial state. */
514 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
515
516 /* ignore aliases */
517 if (u->meta.id != k)
518 continue;
519
520 if ((q = unit_coldplug(u)) < 0)
521 r = q;
522 }
523
524 return r;
525 }
526
527 static void manager_build_unit_path_cache(Manager *m) {
528 char **i;
529 DIR *d = NULL;
530 int r;
531
532 assert(m);
533
534 set_free_free(m->unit_path_cache);
535
536 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
537 log_error("Failed to allocate unit path cache.");
538 return;
539 }
540
541 /* This simply builds a list of files we know exist, so that
542 * we don't always have to go to disk */
543
544 STRV_FOREACH(i, m->lookup_paths.unit_path) {
545 struct dirent *de;
546
547 if (!(d = opendir(*i))) {
548 log_error("Failed to open directory: %m");
549 continue;
550 }
551
552 while ((de = readdir(d))) {
553 char *p;
554
555 if (ignore_file(de->d_name))
556 continue;
557
558 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
559 r = -ENOMEM;
560 goto fail;
561 }
562
563 if ((r = set_put(m->unit_path_cache, p)) < 0) {
564 free(p);
565 goto fail;
566 }
567 }
568
569 closedir(d);
570 d = NULL;
571 }
572
573 return;
574
575 fail:
576 log_error("Failed to build unit path cache: %s", strerror(-r));
577
578 set_free_free(m->unit_path_cache);
579 m->unit_path_cache = NULL;
580
581 if (d)
582 closedir(d);
583 }
584
585 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
586 int r, q;
587
588 assert(m);
589
590 manager_run_generators(m);
591
592 manager_build_unit_path_cache(m);
593
594 /* If we will deserialize make sure that during enumeration
595 * this is already known, so we increase the counter here
596 * already */
597 if (serialization)
598 m->n_deserializing ++;
599
600 /* First, enumerate what we can from all config files */
601 r = manager_enumerate(m);
602
603 /* Second, deserialize if there is something to deserialize */
604 if (serialization)
605 if ((q = manager_deserialize(m, serialization, fds)) < 0)
606 r = q;
607
608 /* Third, fire things up! */
609 if ((q = manager_coldplug(m)) < 0)
610 r = q;
611
612 if (serialization) {
613 assert(m->n_deserializing > 0);
614 m->n_deserializing --;
615 }
616
617 return r;
618 }
619
620 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
621 assert(m);
622 assert(j);
623
624 /* Deletes one job from the transaction */
625
626 manager_transaction_unlink_job(m, j, delete_dependencies);
627
628 if (!j->installed)
629 job_free(j);
630 }
631
632 static void transaction_delete_unit(Manager *m, Unit *u) {
633 Job *j;
634
635 /* Deletes all jobs associated with a certain unit from the
636 * transaction */
637
638 while ((j = hashmap_get(m->transaction_jobs, u)))
639 transaction_delete_job(m, j, true);
640 }
641
642 static void transaction_clean_dependencies(Manager *m) {
643 Iterator i;
644 Job *j;
645
646 assert(m);
647
648 /* Drops all dependencies of all installed jobs */
649
650 HASHMAP_FOREACH(j, m->jobs, i) {
651 while (j->subject_list)
652 job_dependency_free(j->subject_list);
653 while (j->object_list)
654 job_dependency_free(j->object_list);
655 }
656
657 assert(!m->transaction_anchor);
658 }
659
660 static void transaction_abort(Manager *m) {
661 Job *j;
662
663 assert(m);
664
665 while ((j = hashmap_first(m->transaction_jobs)))
666 if (j->installed)
667 transaction_delete_job(m, j, true);
668 else
669 job_free(j);
670
671 assert(hashmap_isempty(m->transaction_jobs));
672
673 transaction_clean_dependencies(m);
674 }
675
676 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
677 JobDependency *l;
678
679 assert(m);
680
681 /* A recursive sweep through the graph that marks all units
682 * that matter to the anchor job, i.e. are directly or
683 * indirectly a dependency of the anchor job via paths that
684 * are fully marked as mattering. */
685
686 if (j)
687 l = j->subject_list;
688 else
689 l = m->transaction_anchor;
690
691 LIST_FOREACH(subject, l, l) {
692
693 /* This link does not matter */
694 if (!l->matters)
695 continue;
696
697 /* This unit has already been marked */
698 if (l->object->generation == generation)
699 continue;
700
701 l->object->matters_to_anchor = true;
702 l->object->generation = generation;
703
704 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
705 }
706 }
707
708 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
709 JobDependency *l, *last;
710
711 assert(j);
712 assert(other);
713 assert(j->unit == other->unit);
714 assert(!j->installed);
715
716 /* Merges 'other' into 'j' and then deletes j. */
717
718 j->type = t;
719 j->state = JOB_WAITING;
720 j->override = j->override || other->override;
721
722 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
723
724 /* Patch us in as new owner of the JobDependency objects */
725 last = NULL;
726 LIST_FOREACH(subject, l, other->subject_list) {
727 assert(l->subject == other);
728 l->subject = j;
729 last = l;
730 }
731
732 /* Merge both lists */
733 if (last) {
734 last->subject_next = j->subject_list;
735 if (j->subject_list)
736 j->subject_list->subject_prev = last;
737 j->subject_list = other->subject_list;
738 }
739
740 /* Patch us in as new owner of the JobDependency objects */
741 last = NULL;
742 LIST_FOREACH(object, l, other->object_list) {
743 assert(l->object == other);
744 l->object = j;
745 last = l;
746 }
747
748 /* Merge both lists */
749 if (last) {
750 last->object_next = j->object_list;
751 if (j->object_list)
752 j->object_list->object_prev = last;
753 j->object_list = other->object_list;
754 }
755
756 /* Kill the other job */
757 other->subject_list = NULL;
758 other->object_list = NULL;
759 transaction_delete_job(m, other, true);
760 }
761 static bool job_is_conflicted_by(Job *j) {
762 JobDependency *l;
763
764 assert(j);
765
766 /* Returns true if this job is pulled in by a least one
767 * ConflictedBy dependency. */
768
769 LIST_FOREACH(object, l, j->object_list)
770 if (l->conflicts)
771 return true;
772
773 return false;
774 }
775
776 static int delete_one_unmergeable_job(Manager *m, Job *j) {
777 Job *k;
778
779 assert(j);
780
781 /* Tries to delete one item in the linked list
782 * j->transaction_next->transaction_next->... that conflicts
783 * with another one, in an attempt to make an inconsistent
784 * transaction work. */
785
786 /* We rely here on the fact that if a merged with b does not
787 * merge with c, either a or b merge with c neither */
788 LIST_FOREACH(transaction, j, j)
789 LIST_FOREACH(transaction, k, j->transaction_next) {
790 Job *d;
791
792 /* Is this one mergeable? Then skip it */
793 if (job_type_is_mergeable(j->type, k->type))
794 continue;
795
796 /* Ok, we found two that conflict, let's see if we can
797 * drop one of them */
798 if (!j->matters_to_anchor && !k->matters_to_anchor) {
799
800 /* Both jobs don't matter, so let's
801 * find the one that is smarter to
802 * remove. Let's think positive and
803 * rather remove stops then starts --
804 * except if something is being
805 * stopped because it is conflicted by
806 * another unit in which case we
807 * rather remove the start. */
808
809 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
810 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
811
812 if (j->type == JOB_STOP) {
813
814 if (job_is_conflicted_by(j))
815 d = k;
816 else
817 d = j;
818
819 } else if (k->type == JOB_STOP) {
820
821 if (job_is_conflicted_by(k))
822 d = j;
823 else
824 d = k;
825 } else
826 d = j;
827
828 } else if (!j->matters_to_anchor)
829 d = j;
830 else if (!k->matters_to_anchor)
831 d = k;
832 else
833 return -ENOEXEC;
834
835 /* Ok, we can drop one, so let's do so. */
836 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
837 transaction_delete_job(m, d, true);
838 return 0;
839 }
840
841 return -EINVAL;
842 }
843
844 static int transaction_merge_jobs(Manager *m, DBusError *e) {
845 Job *j;
846 Iterator i;
847 int r;
848
849 assert(m);
850
851 /* First step, check whether any of the jobs for one specific
852 * task conflict. If so, try to drop one of them. */
853 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
854 JobType t;
855 Job *k;
856
857 t = j->type;
858 LIST_FOREACH(transaction, k, j->transaction_next) {
859 if (job_type_merge(&t, k->type) >= 0)
860 continue;
861
862 /* OK, we could not merge all jobs for this
863 * action. Let's see if we can get rid of one
864 * of them */
865
866 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
867 /* Ok, we managed to drop one, now
868 * let's ask our callers to call us
869 * again after garbage collecting */
870 return -EAGAIN;
871
872 /* We couldn't merge anything. Failure */
873 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
874 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
875 return r;
876 }
877 }
878
879 /* Second step, merge the jobs. */
880 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
881 JobType t = j->type;
882 Job *k;
883
884 /* Merge all transactions */
885 LIST_FOREACH(transaction, k, j->transaction_next)
886 assert_se(job_type_merge(&t, k->type) == 0);
887
888 /* If an active job is mergeable, merge it too */
889 if (j->unit->meta.job)
890 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
891
892 while ((k = j->transaction_next)) {
893 if (j->installed) {
894 transaction_merge_and_delete_job(m, k, j, t);
895 j = k;
896 } else
897 transaction_merge_and_delete_job(m, j, k, t);
898 }
899
900 assert(!j->transaction_next);
901 assert(!j->transaction_prev);
902 }
903
904 return 0;
905 }
906
907 static void transaction_drop_redundant(Manager *m) {
908 bool again;
909
910 assert(m);
911
912 /* Goes through the transaction and removes all jobs that are
913 * a noop */
914
915 do {
916 Job *j;
917 Iterator i;
918
919 again = false;
920
921 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
922 bool changes_something = false;
923 Job *k;
924
925 LIST_FOREACH(transaction, k, j) {
926
927 if (!job_is_anchor(k) &&
928 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
929 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
930 continue;
931
932 changes_something = true;
933 break;
934 }
935
936 if (changes_something)
937 continue;
938
939 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
940 transaction_delete_job(m, j, false);
941 again = true;
942 break;
943 }
944
945 } while (again);
946 }
947
948 static bool unit_matters_to_anchor(Unit *u, Job *j) {
949 assert(u);
950 assert(!j->transaction_prev);
951
952 /* Checks whether at least one of the jobs for this unit
953 * matters to the anchor. */
954
955 LIST_FOREACH(transaction, j, j)
956 if (j->matters_to_anchor)
957 return true;
958
959 return false;
960 }
961
962 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
963 Iterator i;
964 Unit *u;
965 int r;
966
967 assert(m);
968 assert(j);
969 assert(!j->transaction_prev);
970
971 /* Does a recursive sweep through the ordering graph, looking
972 * for a cycle. If we find cycle we try to break it. */
973
974 /* Have we seen this before? */
975 if (j->generation == generation) {
976 Job *k, *delete;
977
978 /* If the marker is NULL we have been here already and
979 * decided the job was loop-free from here. Hence
980 * shortcut things and return right-away. */
981 if (!j->marker)
982 return 0;
983
984 /* So, the marker is not NULL and we already have been
985 * here. We have a cycle. Let's try to break it. We go
986 * backwards in our path and try to find a suitable
987 * job to remove. We use the marker to find our way
988 * back, since smart how we are we stored our way back
989 * in there. */
990 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
991
992 delete = NULL;
993 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
994
995 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
996
997 if (!delete &&
998 !k->installed &&
999 !unit_matters_to_anchor(k->unit, k)) {
1000 /* Ok, we can drop this one, so let's
1001 * do so. */
1002 delete = k;
1003 }
1004
1005 /* Check if this in fact was the beginning of
1006 * the cycle */
1007 if (k == j)
1008 break;
1009 }
1010
1011
1012 if (delete) {
1013 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1014 transaction_delete_unit(m, delete->unit);
1015 return -EAGAIN;
1016 }
1017
1018 log_error("Unable to break cycle");
1019
1020 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1021 return -ENOEXEC;
1022 }
1023
1024 /* Make the marker point to where we come from, so that we can
1025 * find our way backwards if we want to break a cycle. We use
1026 * a special marker for the beginning: we point to
1027 * ourselves. */
1028 j->marker = from ? from : j;
1029 j->generation = generation;
1030
1031 /* We assume that the the dependencies are bidirectional, and
1032 * hence can ignore UNIT_AFTER */
1033 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1034 Job *o;
1035
1036 /* Is there a job for this unit? */
1037 if (!(o = hashmap_get(m->transaction_jobs, u)))
1038
1039 /* Ok, there is no job for this in the
1040 * transaction, but maybe there is already one
1041 * running? */
1042 if (!(o = u->meta.job))
1043 continue;
1044
1045 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1046 return r;
1047 }
1048
1049 /* Ok, let's backtrack, and remember that this entry is not on
1050 * our path anymore. */
1051 j->marker = NULL;
1052
1053 return 0;
1054 }
1055
1056 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1057 Job *j;
1058 int r;
1059 Iterator i;
1060 unsigned g;
1061
1062 assert(m);
1063 assert(generation);
1064
1065 /* Check if the ordering graph is cyclic. If it is, try to fix
1066 * that up by dropping one of the jobs. */
1067
1068 g = (*generation)++;
1069
1070 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1071 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1072 return r;
1073
1074 return 0;
1075 }
1076
1077 static void transaction_collect_garbage(Manager *m) {
1078 bool again;
1079
1080 assert(m);
1081
1082 /* Drop jobs that are not required by any other job */
1083
1084 do {
1085 Iterator i;
1086 Job *j;
1087
1088 again = false;
1089
1090 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1091 if (j->object_list) {
1092 /* log_debug("Keeping job %s/%s because of %s/%s", */
1093 /* j->unit->meta.id, job_type_to_string(j->type), */
1094 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1095 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1096 continue;
1097 }
1098
1099 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1100 transaction_delete_job(m, j, true);
1101 again = true;
1102 break;
1103 }
1104
1105 } while (again);
1106 }
1107
1108 static int transaction_is_destructive(Manager *m, DBusError *e) {
1109 Iterator i;
1110 Job *j;
1111
1112 assert(m);
1113
1114 /* Checks whether applying this transaction means that
1115 * existing jobs would be replaced */
1116
1117 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1118
1119 /* Assume merged */
1120 assert(!j->transaction_prev);
1121 assert(!j->transaction_next);
1122
1123 if (j->unit->meta.job &&
1124 j->unit->meta.job != j &&
1125 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1126
1127 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1128 return -EEXIST;
1129 }
1130 }
1131
1132 return 0;
1133 }
1134
1135 static void transaction_minimize_impact(Manager *m) {
1136 bool again;
1137 assert(m);
1138
1139 /* Drops all unnecessary jobs that reverse already active jobs
1140 * or that stop a running service. */
1141
1142 do {
1143 Job *j;
1144 Iterator i;
1145
1146 again = false;
1147
1148 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1149 LIST_FOREACH(transaction, j, j) {
1150 bool stops_running_service, changes_existing_job;
1151
1152 /* If it matters, we shouldn't drop it */
1153 if (j->matters_to_anchor)
1154 continue;
1155
1156 /* Would this stop a running service?
1157 * Would this change an existing job?
1158 * If so, let's drop this entry */
1159
1160 stops_running_service =
1161 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1162
1163 changes_existing_job =
1164 j->unit->meta.job &&
1165 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1166
1167 if (!stops_running_service && !changes_existing_job)
1168 continue;
1169
1170 if (stops_running_service)
1171 log_debug("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1172
1173 if (changes_existing_job)
1174 log_debug("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1175
1176 /* Ok, let's get rid of this */
1177 log_debug("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1178
1179 transaction_delete_job(m, j, true);
1180 again = true;
1181 break;
1182 }
1183
1184 if (again)
1185 break;
1186 }
1187
1188 } while (again);
1189 }
1190
1191 static int transaction_apply(Manager *m, JobMode mode) {
1192 Iterator i;
1193 Job *j;
1194 int r;
1195
1196 /* Moves the transaction jobs to the set of active jobs */
1197
1198 if (mode == JOB_ISOLATE) {
1199
1200 /* When isolating first kill all installed jobs which
1201 * aren't part of the new transaction */
1202 HASHMAP_FOREACH(j, m->jobs, i) {
1203 assert(j->installed);
1204
1205 if (hashmap_get(m->transaction_jobs, j->unit))
1206 continue;
1207
1208 job_finish_and_invalidate(j, JOB_CANCELED);
1209 }
1210 }
1211
1212 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1213 /* Assume merged */
1214 assert(!j->transaction_prev);
1215 assert(!j->transaction_next);
1216
1217 if (j->installed)
1218 continue;
1219
1220 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1221 goto rollback;
1222 }
1223
1224 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1225 if (j->installed) {
1226 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1227 continue;
1228 }
1229
1230 if (j->unit->meta.job)
1231 job_free(j->unit->meta.job);
1232
1233 j->unit->meta.job = j;
1234 j->installed = true;
1235 m->n_installed_jobs ++;
1236
1237 /* We're fully installed. Now let's free data we don't
1238 * need anymore. */
1239
1240 assert(!j->transaction_next);
1241 assert(!j->transaction_prev);
1242
1243 job_add_to_run_queue(j);
1244 job_add_to_dbus_queue(j);
1245 job_start_timer(j);
1246
1247 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1248 }
1249
1250 /* As last step, kill all remaining job dependencies. */
1251 transaction_clean_dependencies(m);
1252
1253 return 0;
1254
1255 rollback:
1256
1257 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1258 if (j->installed)
1259 continue;
1260
1261 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1262 }
1263
1264 return r;
1265 }
1266
1267 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1268 int r;
1269 unsigned generation = 1;
1270
1271 assert(m);
1272
1273 /* This applies the changes recorded in transaction_jobs to
1274 * the actual list of jobs, if possible. */
1275
1276 /* First step: figure out which jobs matter */
1277 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1278
1279 /* Second step: Try not to stop any running services if
1280 * we don't have to. Don't try to reverse running
1281 * jobs if we don't have to. */
1282 if (mode == JOB_FAIL)
1283 transaction_minimize_impact(m);
1284
1285 /* Third step: Drop redundant jobs */
1286 transaction_drop_redundant(m);
1287
1288 for (;;) {
1289 /* Fourth step: Let's remove unneeded jobs that might
1290 * be lurking. */
1291 if (mode != JOB_ISOLATE)
1292 transaction_collect_garbage(m);
1293
1294 /* Fifth step: verify order makes sense and correct
1295 * cycles if necessary and possible */
1296 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1297 break;
1298
1299 if (r != -EAGAIN) {
1300 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1301 goto rollback;
1302 }
1303
1304 /* Let's see if the resulting transaction ordering
1305 * graph is still cyclic... */
1306 }
1307
1308 for (;;) {
1309 /* Sixth step: let's drop unmergeable entries if
1310 * necessary and possible, merge entries we can
1311 * merge */
1312 if ((r = transaction_merge_jobs(m, e)) >= 0)
1313 break;
1314
1315 if (r != -EAGAIN) {
1316 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1317 goto rollback;
1318 }
1319
1320 /* Seventh step: an entry got dropped, let's garbage
1321 * collect its dependencies. */
1322 if (mode != JOB_ISOLATE)
1323 transaction_collect_garbage(m);
1324
1325 /* Let's see if the resulting transaction still has
1326 * unmergeable entries ... */
1327 }
1328
1329 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1330 transaction_drop_redundant(m);
1331
1332 /* Ninth step: check whether we can actually apply this */
1333 if (mode == JOB_FAIL)
1334 if ((r = transaction_is_destructive(m, e)) < 0) {
1335 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1336 goto rollback;
1337 }
1338
1339 /* Tenth step: apply changes */
1340 if ((r = transaction_apply(m, mode)) < 0) {
1341 log_warning("Failed to apply transaction: %s", strerror(-r));
1342 goto rollback;
1343 }
1344
1345 assert(hashmap_isempty(m->transaction_jobs));
1346 assert(!m->transaction_anchor);
1347
1348 return 0;
1349
1350 rollback:
1351 transaction_abort(m);
1352 return r;
1353 }
1354
1355 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1356 Job *j, *f;
1357
1358 assert(m);
1359 assert(unit);
1360
1361 /* Looks for an existing prospective job and returns that. If
1362 * it doesn't exist it is created and added to the prospective
1363 * jobs list. */
1364
1365 f = hashmap_get(m->transaction_jobs, unit);
1366
1367 LIST_FOREACH(transaction, j, f) {
1368 assert(j->unit == unit);
1369
1370 if (j->type == type) {
1371 if (is_new)
1372 *is_new = false;
1373 return j;
1374 }
1375 }
1376
1377 if (unit->meta.job && unit->meta.job->type == type)
1378 j = unit->meta.job;
1379 else if (!(j = job_new(m, type, unit)))
1380 return NULL;
1381
1382 j->generation = 0;
1383 j->marker = NULL;
1384 j->matters_to_anchor = false;
1385 j->override = override;
1386
1387 LIST_PREPEND(Job, transaction, f, j);
1388
1389 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1390 job_free(j);
1391 return NULL;
1392 }
1393
1394 if (is_new)
1395 *is_new = true;
1396
1397 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1398
1399 return j;
1400 }
1401
1402 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1403 assert(m);
1404 assert(j);
1405
1406 if (j->transaction_prev)
1407 j->transaction_prev->transaction_next = j->transaction_next;
1408 else if (j->transaction_next)
1409 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1410 else
1411 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1412
1413 if (j->transaction_next)
1414 j->transaction_next->transaction_prev = j->transaction_prev;
1415
1416 j->transaction_prev = j->transaction_next = NULL;
1417
1418 while (j->subject_list)
1419 job_dependency_free(j->subject_list);
1420
1421 while (j->object_list) {
1422 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1423
1424 job_dependency_free(j->object_list);
1425
1426 if (other && delete_dependencies) {
1427 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1428 other->unit->meta.id, job_type_to_string(other->type),
1429 j->unit->meta.id, job_type_to_string(j->type));
1430 transaction_delete_job(m, other, delete_dependencies);
1431 }
1432 }
1433 }
1434
1435 static int transaction_add_job_and_dependencies(
1436 Manager *m,
1437 JobType type,
1438 Unit *unit,
1439 Job *by,
1440 bool matters,
1441 bool override,
1442 bool conflicts,
1443 bool ignore_requirements,
1444 bool ignore_order,
1445 DBusError *e,
1446 Job **_ret) {
1447 Job *ret;
1448 Iterator i;
1449 Unit *dep;
1450 int r;
1451 bool is_new;
1452
1453 assert(m);
1454 assert(type < _JOB_TYPE_MAX);
1455 assert(unit);
1456
1457 /* log_debug("Pulling in %s/%s from %s/%s", */
1458 /* unit->meta.id, job_type_to_string(type), */
1459 /* by ? by->unit->meta.id : "NA", */
1460 /* by ? job_type_to_string(by->type) : "NA"); */
1461
1462 if (unit->meta.load_state != UNIT_LOADED &&
1463 unit->meta.load_state != UNIT_ERROR &&
1464 unit->meta.load_state != UNIT_MASKED) {
1465 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1466 return -EINVAL;
1467 }
1468
1469 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1470 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1471 "Unit %s failed to load: %s. "
1472 "See system logs and 'systemctl status %s' for details.",
1473 unit->meta.id,
1474 strerror(-unit->meta.load_error),
1475 unit->meta.id);
1476 return -EINVAL;
1477 }
1478
1479 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1480 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1481 return -EINVAL;
1482 }
1483
1484 if (!unit_job_is_applicable(unit, type)) {
1485 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1486 return -EBADR;
1487 }
1488
1489 /* First add the job. */
1490 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1491 return -ENOMEM;
1492
1493 ret->ignore_order = ret->ignore_order || ignore_order;
1494
1495 /* Then, add a link to the job. */
1496 if (!job_dependency_new(by, ret, matters, conflicts))
1497 return -ENOMEM;
1498
1499 if (is_new && !ignore_requirements) {
1500 Set *following;
1501
1502 /* If we are following some other unit, make sure we
1503 * add all dependencies of everybody following. */
1504 if (unit_following_set(ret->unit, &following) > 0) {
1505 SET_FOREACH(dep, following, i)
1506 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1507 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1508
1509 if (e)
1510 dbus_error_free(e);
1511 }
1512
1513 set_free(following);
1514 }
1515
1516 /* Finally, recursively add in all dependencies. */
1517 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1518 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1519 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1520 if (r != -EBADR)
1521 goto fail;
1522
1523 if (e)
1524 dbus_error_free(e);
1525 }
1526
1527 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1528 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1529
1530 if (r != -EBADR)
1531 goto fail;
1532
1533 if (e)
1534 dbus_error_free(e);
1535 }
1536
1537 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1538 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1539 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1540
1541 if (e)
1542 dbus_error_free(e);
1543 }
1544
1545 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1546 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1547 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1548
1549 if (e)
1550 dbus_error_free(e);
1551 }
1552
1553 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1554 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1555
1556 if (r != -EBADR)
1557 goto fail;
1558
1559 if (e)
1560 dbus_error_free(e);
1561 }
1562
1563 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1564 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1565 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1566
1567 if (e)
1568 dbus_error_free(e);
1569 }
1570
1571 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1572 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1573
1574 if (r != -EBADR)
1575 goto fail;
1576
1577 if (e)
1578 dbus_error_free(e);
1579 }
1580
1581 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1582 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1583 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1584
1585 if (e)
1586 dbus_error_free(e);
1587 }
1588
1589 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1590
1591 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1592 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1593
1594 if (r != -EBADR)
1595 goto fail;
1596
1597 if (e)
1598 dbus_error_free(e);
1599 }
1600
1601 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1602 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1603
1604 if (r != -EBADR)
1605 goto fail;
1606
1607 if (e)
1608 dbus_error_free(e);
1609 }
1610 }
1611
1612 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1613 }
1614
1615 if (_ret)
1616 *_ret = ret;
1617
1618 return 0;
1619
1620 fail:
1621 return r;
1622 }
1623
1624 static int transaction_add_isolate_jobs(Manager *m) {
1625 Iterator i;
1626 Unit *u;
1627 char *k;
1628 int r;
1629
1630 assert(m);
1631
1632 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1633
1634 /* ignore aliases */
1635 if (u->meta.id != k)
1636 continue;
1637
1638 if (u->meta.ignore_on_isolate)
1639 continue;
1640
1641 /* No need to stop inactive jobs */
1642 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1643 continue;
1644
1645 /* Is there already something listed for this? */
1646 if (hashmap_get(m->transaction_jobs, u))
1647 continue;
1648
1649 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1650 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1651 }
1652
1653 return 0;
1654 }
1655
1656 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1657 int r;
1658 Job *ret;
1659
1660 assert(m);
1661 assert(type < _JOB_TYPE_MAX);
1662 assert(unit);
1663 assert(mode < _JOB_MODE_MAX);
1664
1665 if (mode == JOB_ISOLATE && type != JOB_START) {
1666 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1667 return -EINVAL;
1668 }
1669
1670 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1671 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1672 return -EPERM;
1673 }
1674
1675 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1676
1677 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1678 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1679 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1680 transaction_abort(m);
1681 return r;
1682 }
1683
1684 if (mode == JOB_ISOLATE)
1685 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1686 transaction_abort(m);
1687 return r;
1688 }
1689
1690 if ((r = transaction_activate(m, mode, e)) < 0)
1691 return r;
1692
1693 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1694
1695 if (_ret)
1696 *_ret = ret;
1697
1698 return 0;
1699 }
1700
1701 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1702 Unit *unit;
1703 int r;
1704
1705 assert(m);
1706 assert(type < _JOB_TYPE_MAX);
1707 assert(name);
1708 assert(mode < _JOB_MODE_MAX);
1709
1710 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1711 return r;
1712
1713 return manager_add_job(m, type, unit, mode, override, e, _ret);
1714 }
1715
1716 Job *manager_get_job(Manager *m, uint32_t id) {
1717 assert(m);
1718
1719 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1720 }
1721
1722 Unit *manager_get_unit(Manager *m, const char *name) {
1723 assert(m);
1724 assert(name);
1725
1726 return hashmap_get(m->units, name);
1727 }
1728
1729 unsigned manager_dispatch_load_queue(Manager *m) {
1730 Meta *meta;
1731 unsigned n = 0;
1732
1733 assert(m);
1734
1735 /* Make sure we are not run recursively */
1736 if (m->dispatching_load_queue)
1737 return 0;
1738
1739 m->dispatching_load_queue = true;
1740
1741 /* Dispatches the load queue. Takes a unit from the queue and
1742 * tries to load its data until the queue is empty */
1743
1744 while ((meta = m->load_queue)) {
1745 assert(meta->in_load_queue);
1746
1747 unit_load((Unit*) meta);
1748 n++;
1749 }
1750
1751 m->dispatching_load_queue = false;
1752 return n;
1753 }
1754
1755 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1756 Unit *ret;
1757 int r;
1758
1759 assert(m);
1760 assert(name || path);
1761
1762 /* This will prepare the unit for loading, but not actually
1763 * load anything from disk. */
1764
1765 if (path && !is_path(path)) {
1766 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1767 return -EINVAL;
1768 }
1769
1770 if (!name)
1771 name = file_name_from_path(path);
1772
1773 if (!unit_name_is_valid(name, false)) {
1774 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1775 return -EINVAL;
1776 }
1777
1778 if ((ret = manager_get_unit(m, name))) {
1779 *_ret = ret;
1780 return 1;
1781 }
1782
1783 if (!(ret = unit_new(m)))
1784 return -ENOMEM;
1785
1786 if (path)
1787 if (!(ret->meta.fragment_path = strdup(path))) {
1788 unit_free(ret);
1789 return -ENOMEM;
1790 }
1791
1792 if ((r = unit_add_name(ret, name)) < 0) {
1793 unit_free(ret);
1794 return r;
1795 }
1796
1797 unit_add_to_load_queue(ret);
1798 unit_add_to_dbus_queue(ret);
1799 unit_add_to_gc_queue(ret);
1800
1801 if (_ret)
1802 *_ret = ret;
1803
1804 return 0;
1805 }
1806
1807 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1808 int r;
1809
1810 assert(m);
1811
1812 /* This will load the service information files, but not actually
1813 * start any services or anything. */
1814
1815 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1816 return r;
1817
1818 manager_dispatch_load_queue(m);
1819
1820 if (_ret)
1821 *_ret = unit_follow_merge(*_ret);
1822
1823 return 0;
1824 }
1825
1826 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1827 Iterator i;
1828 Job *j;
1829
1830 assert(s);
1831 assert(f);
1832
1833 HASHMAP_FOREACH(j, s->jobs, i)
1834 job_dump(j, f, prefix);
1835 }
1836
1837 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1838 Iterator i;
1839 Unit *u;
1840 const char *t;
1841
1842 assert(s);
1843 assert(f);
1844
1845 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1846 if (u->meta.id == t)
1847 unit_dump(u, f, prefix);
1848 }
1849
1850 void manager_clear_jobs(Manager *m) {
1851 Job *j;
1852
1853 assert(m);
1854
1855 transaction_abort(m);
1856
1857 while ((j = hashmap_first(m->jobs)))
1858 job_finish_and_invalidate(j, JOB_CANCELED);
1859 }
1860
1861 unsigned manager_dispatch_run_queue(Manager *m) {
1862 Job *j;
1863 unsigned n = 0;
1864
1865 if (m->dispatching_run_queue)
1866 return 0;
1867
1868 m->dispatching_run_queue = true;
1869
1870 while ((j = m->run_queue)) {
1871 assert(j->installed);
1872 assert(j->in_run_queue);
1873
1874 job_run_and_invalidate(j);
1875 n++;
1876 }
1877
1878 m->dispatching_run_queue = false;
1879 return n;
1880 }
1881
1882 unsigned manager_dispatch_dbus_queue(Manager *m) {
1883 Job *j;
1884 Meta *meta;
1885 unsigned n = 0;
1886
1887 assert(m);
1888
1889 if (m->dispatching_dbus_queue)
1890 return 0;
1891
1892 m->dispatching_dbus_queue = true;
1893
1894 while ((meta = m->dbus_unit_queue)) {
1895 assert(meta->in_dbus_queue);
1896
1897 bus_unit_send_change_signal((Unit*) meta);
1898 n++;
1899 }
1900
1901 while ((j = m->dbus_job_queue)) {
1902 assert(j->in_dbus_queue);
1903
1904 bus_job_send_change_signal(j);
1905 n++;
1906 }
1907
1908 m->dispatching_dbus_queue = false;
1909 return n;
1910 }
1911
1912 static int manager_process_notify_fd(Manager *m) {
1913 ssize_t n;
1914
1915 assert(m);
1916
1917 for (;;) {
1918 char buf[4096];
1919 struct msghdr msghdr;
1920 struct iovec iovec;
1921 struct ucred *ucred;
1922 union {
1923 struct cmsghdr cmsghdr;
1924 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1925 } control;
1926 Unit *u;
1927 char **tags;
1928
1929 zero(iovec);
1930 iovec.iov_base = buf;
1931 iovec.iov_len = sizeof(buf)-1;
1932
1933 zero(control);
1934 zero(msghdr);
1935 msghdr.msg_iov = &iovec;
1936 msghdr.msg_iovlen = 1;
1937 msghdr.msg_control = &control;
1938 msghdr.msg_controllen = sizeof(control);
1939
1940 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1941 if (n >= 0)
1942 return -EIO;
1943
1944 if (errno == EAGAIN || errno == EINTR)
1945 break;
1946
1947 return -errno;
1948 }
1949
1950 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1951 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1952 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1953 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1954 log_warning("Received notify message without credentials. Ignoring.");
1955 continue;
1956 }
1957
1958 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1959
1960 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1961 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1962 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1963 continue;
1964 }
1965
1966 assert((size_t) n < sizeof(buf));
1967 buf[n] = 0;
1968 if (!(tags = strv_split(buf, "\n\r")))
1969 return -ENOMEM;
1970
1971 log_debug("Got notification message for unit %s", u->meta.id);
1972
1973 if (UNIT_VTABLE(u)->notify_message)
1974 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1975
1976 strv_free(tags);
1977 }
1978
1979 return 0;
1980 }
1981
1982 static int manager_dispatch_sigchld(Manager *m) {
1983 assert(m);
1984
1985 for (;;) {
1986 siginfo_t si;
1987 Unit *u;
1988 int r;
1989
1990 zero(si);
1991
1992 /* First we call waitd() for a PID and do not reap the
1993 * zombie. That way we can still access /proc/$PID for
1994 * it while it is a zombie. */
1995 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1996
1997 if (errno == ECHILD)
1998 break;
1999
2000 if (errno == EINTR)
2001 continue;
2002
2003 return -errno;
2004 }
2005
2006 if (si.si_pid <= 0)
2007 break;
2008
2009 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2010 char *name = NULL;
2011
2012 get_process_name(si.si_pid, &name);
2013 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2014 free(name);
2015 }
2016
2017 /* Let's flush any message the dying child might still
2018 * have queued for us. This ensures that the process
2019 * still exists in /proc so that we can figure out
2020 * which cgroup and hence unit it belongs to. */
2021 if ((r = manager_process_notify_fd(m)) < 0)
2022 return r;
2023
2024 /* And now figure out the unit this belongs to */
2025 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2026 u = cgroup_unit_by_pid(m, si.si_pid);
2027
2028 /* And now, we actually reap the zombie. */
2029 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2030 if (errno == EINTR)
2031 continue;
2032
2033 return -errno;
2034 }
2035
2036 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2037 continue;
2038
2039 log_debug("Child %lu died (code=%s, status=%i/%s)",
2040 (long unsigned) si.si_pid,
2041 sigchld_code_to_string(si.si_code),
2042 si.si_status,
2043 strna(si.si_code == CLD_EXITED
2044 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2045 : signal_to_string(si.si_status)));
2046
2047 if (!u)
2048 continue;
2049
2050 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2051
2052 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2053 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2054 }
2055
2056 return 0;
2057 }
2058
2059 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2060 int r;
2061 DBusError error;
2062
2063 dbus_error_init(&error);
2064
2065 log_debug("Activating special unit %s", name);
2066
2067 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2068 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2069
2070 dbus_error_free(&error);
2071
2072 return r;
2073 }
2074
2075 static int manager_process_signal_fd(Manager *m) {
2076 ssize_t n;
2077 struct signalfd_siginfo sfsi;
2078 bool sigchld = false;
2079
2080 assert(m);
2081
2082 for (;;) {
2083 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2084
2085 if (n >= 0)
2086 return -EIO;
2087
2088 if (errno == EINTR || errno == EAGAIN)
2089 break;
2090
2091 return -errno;
2092 }
2093
2094 if (sfsi.ssi_pid > 0) {
2095 char *p = NULL;
2096
2097 get_process_name(sfsi.ssi_pid, &p);
2098
2099 log_debug("Received SIG%s from PID %lu (%s).",
2100 strna(signal_to_string(sfsi.ssi_signo)),
2101 (unsigned long) sfsi.ssi_pid, strna(p));
2102 free(p);
2103 } else
2104 log_debug("Received SIG%s.", strna(signal_to_string(sfsi.ssi_signo)));
2105
2106 switch (sfsi.ssi_signo) {
2107
2108 case SIGCHLD:
2109 sigchld = true;
2110 break;
2111
2112 case SIGTERM:
2113 if (m->running_as == MANAGER_SYSTEM) {
2114 /* This is for compatibility with the
2115 * original sysvinit */
2116 m->exit_code = MANAGER_REEXECUTE;
2117 break;
2118 }
2119
2120 /* Fall through */
2121
2122 case SIGINT:
2123 if (m->running_as == MANAGER_SYSTEM) {
2124 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2125 break;
2126 }
2127
2128 /* Run the exit target if there is one, if not, just exit. */
2129 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2130 m->exit_code = MANAGER_EXIT;
2131 return 0;
2132 }
2133
2134 break;
2135
2136 case SIGWINCH:
2137 if (m->running_as == MANAGER_SYSTEM)
2138 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2139
2140 /* This is a nop on non-init */
2141 break;
2142
2143 case SIGPWR:
2144 if (m->running_as == MANAGER_SYSTEM)
2145 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2146
2147 /* This is a nop on non-init */
2148 break;
2149
2150 case SIGUSR1: {
2151 Unit *u;
2152
2153 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2154
2155 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2156 log_info("Trying to reconnect to bus...");
2157 bus_init(m, true);
2158 }
2159
2160 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2161 log_info("Loading D-Bus service...");
2162 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2163 }
2164
2165 break;
2166 }
2167
2168 case SIGUSR2: {
2169 FILE *f;
2170 char *dump = NULL;
2171 size_t size;
2172
2173 if (!(f = open_memstream(&dump, &size))) {
2174 log_warning("Failed to allocate memory stream.");
2175 break;
2176 }
2177
2178 manager_dump_units(m, f, "\t");
2179 manager_dump_jobs(m, f, "\t");
2180
2181 if (ferror(f)) {
2182 fclose(f);
2183 free(dump);
2184 log_warning("Failed to write status stream");
2185 break;
2186 }
2187
2188 fclose(f);
2189 log_dump(LOG_INFO, dump);
2190 free(dump);
2191
2192 break;
2193 }
2194
2195 case SIGHUP:
2196 m->exit_code = MANAGER_RELOAD;
2197 break;
2198
2199 default: {
2200 /* Starting SIGRTMIN+0 */
2201 static const char * const target_table[] = {
2202 [0] = SPECIAL_DEFAULT_TARGET,
2203 [1] = SPECIAL_RESCUE_TARGET,
2204 [2] = SPECIAL_EMERGENCY_TARGET,
2205 [3] = SPECIAL_HALT_TARGET,
2206 [4] = SPECIAL_POWEROFF_TARGET,
2207 [5] = SPECIAL_REBOOT_TARGET,
2208 [6] = SPECIAL_KEXEC_TARGET
2209 };
2210
2211 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2212 static const ManagerExitCode code_table[] = {
2213 [0] = MANAGER_HALT,
2214 [1] = MANAGER_POWEROFF,
2215 [2] = MANAGER_REBOOT,
2216 [3] = MANAGER_KEXEC
2217 };
2218
2219 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2220 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2221 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2222 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2223 break;
2224 }
2225
2226 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2227 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2228 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2229 break;
2230 }
2231
2232 switch (sfsi.ssi_signo - SIGRTMIN) {
2233
2234 case 20:
2235 log_debug("Enabling showing of status.");
2236 m->show_status = true;
2237 break;
2238
2239 case 21:
2240 log_debug("Disabling showing of status.");
2241 m->show_status = false;
2242 break;
2243
2244 default:
2245 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2246 }
2247 }
2248 }
2249 }
2250
2251 if (sigchld)
2252 return manager_dispatch_sigchld(m);
2253
2254 return 0;
2255 }
2256
2257 static int process_event(Manager *m, struct epoll_event *ev) {
2258 int r;
2259 Watch *w;
2260
2261 assert(m);
2262 assert(ev);
2263
2264 assert_se(w = ev->data.ptr);
2265
2266 if (w->type == WATCH_INVALID)
2267 return 0;
2268
2269 switch (w->type) {
2270
2271 case WATCH_SIGNAL:
2272
2273 /* An incoming signal? */
2274 if (ev->events != EPOLLIN)
2275 return -EINVAL;
2276
2277 if ((r = manager_process_signal_fd(m)) < 0)
2278 return r;
2279
2280 break;
2281
2282 case WATCH_NOTIFY:
2283
2284 /* An incoming daemon notification event? */
2285 if (ev->events != EPOLLIN)
2286 return -EINVAL;
2287
2288 if ((r = manager_process_notify_fd(m)) < 0)
2289 return r;
2290
2291 break;
2292
2293 case WATCH_FD:
2294
2295 /* Some fd event, to be dispatched to the units */
2296 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2297 break;
2298
2299 case WATCH_UNIT_TIMER:
2300 case WATCH_JOB_TIMER: {
2301 uint64_t v;
2302 ssize_t k;
2303
2304 /* Some timer event, to be dispatched to the units */
2305 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2306
2307 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2308 break;
2309
2310 return k < 0 ? -errno : -EIO;
2311 }
2312
2313 if (w->type == WATCH_UNIT_TIMER)
2314 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2315 else
2316 job_timer_event(w->data.job, v, w);
2317 break;
2318 }
2319
2320 case WATCH_MOUNT:
2321 /* Some mount table change, intended for the mount subsystem */
2322 mount_fd_event(m, ev->events);
2323 break;
2324
2325 case WATCH_SWAP:
2326 /* Some swap table change, intended for the swap subsystem */
2327 swap_fd_event(m, ev->events);
2328 break;
2329
2330 case WATCH_UDEV:
2331 /* Some notification from udev, intended for the device subsystem */
2332 device_fd_event(m, ev->events);
2333 break;
2334
2335 case WATCH_DBUS_WATCH:
2336 bus_watch_event(m, w, ev->events);
2337 break;
2338
2339 case WATCH_DBUS_TIMEOUT:
2340 bus_timeout_event(m, w, ev->events);
2341 break;
2342
2343 default:
2344 log_error("event type=%i", w->type);
2345 assert_not_reached("Unknown epoll event type.");
2346 }
2347
2348 return 0;
2349 }
2350
2351 int manager_loop(Manager *m) {
2352 int r;
2353
2354 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2355
2356 assert(m);
2357 m->exit_code = MANAGER_RUNNING;
2358
2359 /* Release the path cache */
2360 set_free_free(m->unit_path_cache);
2361 m->unit_path_cache = NULL;
2362
2363 manager_check_finished(m);
2364
2365 /* There might still be some zombies hanging around from
2366 * before we were exec()'ed. Leat's reap them */
2367 if ((r = manager_dispatch_sigchld(m)) < 0)
2368 return r;
2369
2370 while (m->exit_code == MANAGER_RUNNING) {
2371 struct epoll_event event;
2372 int n;
2373
2374 if (!ratelimit_test(&rl)) {
2375 /* Yay, something is going seriously wrong, pause a little */
2376 log_warning("Looping too fast. Throttling execution a little.");
2377 sleep(1);
2378 }
2379
2380 if (manager_dispatch_load_queue(m) > 0)
2381 continue;
2382
2383 if (manager_dispatch_run_queue(m) > 0)
2384 continue;
2385
2386 if (bus_dispatch(m) > 0)
2387 continue;
2388
2389 if (manager_dispatch_cleanup_queue(m) > 0)
2390 continue;
2391
2392 if (manager_dispatch_gc_queue(m) > 0)
2393 continue;
2394
2395 if (manager_dispatch_dbus_queue(m) > 0)
2396 continue;
2397
2398 if (swap_dispatch_reload(m) > 0)
2399 continue;
2400
2401 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2402
2403 if (errno == EINTR)
2404 continue;
2405
2406 return -errno;
2407 }
2408
2409 assert(n == 1);
2410
2411 if ((r = process_event(m, &event)) < 0)
2412 return r;
2413 }
2414
2415 return m->exit_code;
2416 }
2417
2418 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2419 char *n;
2420 Unit *u;
2421
2422 assert(m);
2423 assert(s);
2424 assert(_u);
2425
2426 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2427 return -EINVAL;
2428
2429 if (!(n = bus_path_unescape(s+31)))
2430 return -ENOMEM;
2431
2432 u = manager_get_unit(m, n);
2433 free(n);
2434
2435 if (!u)
2436 return -ENOENT;
2437
2438 *_u = u;
2439
2440 return 0;
2441 }
2442
2443 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2444 Job *j;
2445 unsigned id;
2446 int r;
2447
2448 assert(m);
2449 assert(s);
2450 assert(_j);
2451
2452 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2453 return -EINVAL;
2454
2455 if ((r = safe_atou(s + 30, &id)) < 0)
2456 return r;
2457
2458 if (!(j = manager_get_job(m, id)))
2459 return -ENOENT;
2460
2461 *_j = j;
2462
2463 return 0;
2464 }
2465
2466 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2467
2468 #ifdef HAVE_AUDIT
2469 char *p;
2470
2471 if (m->audit_fd < 0)
2472 return;
2473
2474 /* Don't generate audit events if the service was already
2475 * started and we're just deserializing */
2476 if (m->n_deserializing > 0)
2477 return;
2478
2479 if (m->running_as != MANAGER_SYSTEM)
2480 return;
2481
2482 if (u->meta.type != UNIT_SERVICE)
2483 return;
2484
2485 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2486 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2487 return;
2488 }
2489
2490 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2491 log_warning("Failed to send audit message: %m");
2492
2493 if (errno == EPERM) {
2494 /* We aren't allowed to send audit messages?
2495 * Then let's not retry again, to avoid
2496 * spamming the user with the same and same
2497 * messages over and over. */
2498
2499 audit_close(m->audit_fd);
2500 m->audit_fd = -1;
2501 }
2502 }
2503
2504 free(p);
2505 #endif
2506
2507 }
2508
2509 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2510 int fd = -1;
2511 union sockaddr_union sa;
2512 int n = 0;
2513 char *message = NULL;
2514
2515 /* Don't generate plymouth events if the service was already
2516 * started and we're just deserializing */
2517 if (m->n_deserializing > 0)
2518 return;
2519
2520 if (m->running_as != MANAGER_SYSTEM)
2521 return;
2522
2523 if (u->meta.type != UNIT_SERVICE &&
2524 u->meta.type != UNIT_MOUNT &&
2525 u->meta.type != UNIT_SWAP)
2526 return;
2527
2528 /* We set SOCK_NONBLOCK here so that we rather drop the
2529 * message then wait for plymouth */
2530 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2531 log_error("socket() failed: %m");
2532 return;
2533 }
2534
2535 zero(sa);
2536 sa.sa.sa_family = AF_UNIX;
2537 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2538 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2539
2540 if (errno != EPIPE &&
2541 errno != EAGAIN &&
2542 errno != ENOENT &&
2543 errno != ECONNREFUSED &&
2544 errno != ECONNRESET &&
2545 errno != ECONNABORTED)
2546 log_error("connect() failed: %m");
2547
2548 goto finish;
2549 }
2550
2551 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2552 log_error("Out of memory");
2553 goto finish;
2554 }
2555
2556 errno = 0;
2557 if (write(fd, message, n + 1) != n + 1) {
2558
2559 if (errno != EPIPE &&
2560 errno != EAGAIN &&
2561 errno != ENOENT &&
2562 errno != ECONNREFUSED &&
2563 errno != ECONNRESET &&
2564 errno != ECONNABORTED)
2565 log_error("Failed to write Plymouth message: %m");
2566
2567 goto finish;
2568 }
2569
2570 finish:
2571 if (fd >= 0)
2572 close_nointr_nofail(fd);
2573
2574 free(message);
2575 }
2576
2577 void manager_dispatch_bus_name_owner_changed(
2578 Manager *m,
2579 const char *name,
2580 const char* old_owner,
2581 const char *new_owner) {
2582
2583 Unit *u;
2584
2585 assert(m);
2586 assert(name);
2587
2588 if (!(u = hashmap_get(m->watch_bus, name)))
2589 return;
2590
2591 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2592 }
2593
2594 void manager_dispatch_bus_query_pid_done(
2595 Manager *m,
2596 const char *name,
2597 pid_t pid) {
2598
2599 Unit *u;
2600
2601 assert(m);
2602 assert(name);
2603 assert(pid >= 1);
2604
2605 if (!(u = hashmap_get(m->watch_bus, name)))
2606 return;
2607
2608 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2609 }
2610
2611 int manager_open_serialization(Manager *m, FILE **_f) {
2612 char *path = NULL;
2613 mode_t saved_umask;
2614 int fd;
2615 FILE *f;
2616
2617 assert(_f);
2618
2619 if (m->running_as == MANAGER_SYSTEM)
2620 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2621 else
2622 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2623
2624 if (!path)
2625 return -ENOMEM;
2626
2627 saved_umask = umask(0077);
2628 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2629 umask(saved_umask);
2630
2631 if (fd < 0) {
2632 free(path);
2633 return -errno;
2634 }
2635
2636 unlink(path);
2637
2638 log_debug("Serializing state to %s", path);
2639 free(path);
2640
2641 if (!(f = fdopen(fd, "w+")))
2642 return -errno;
2643
2644 *_f = f;
2645
2646 return 0;
2647 }
2648
2649 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2650 Iterator i;
2651 Unit *u;
2652 const char *t;
2653 int r;
2654
2655 assert(m);
2656 assert(f);
2657 assert(fds);
2658
2659 m->n_serializing ++;
2660
2661 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2662 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2663
2664 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2665 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2666 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2667
2668 fputc('\n', f);
2669
2670 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2671 if (u->meta.id != t)
2672 continue;
2673
2674 if (!unit_can_serialize(u))
2675 continue;
2676
2677 /* Start marker */
2678 fputs(u->meta.id, f);
2679 fputc('\n', f);
2680
2681 if ((r = unit_serialize(u, f, fds)) < 0) {
2682 m->n_serializing --;
2683 return r;
2684 }
2685 }
2686
2687 assert(m->n_serializing > 0);
2688 m->n_serializing --;
2689
2690 if (ferror(f))
2691 return -EIO;
2692
2693 r = bus_fdset_add_all(m, fds);
2694 if (r < 0)
2695 return r;
2696
2697 return 0;
2698 }
2699
2700 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2701 int r = 0;
2702
2703 assert(m);
2704 assert(f);
2705
2706 log_debug("Deserializing state...");
2707
2708 m->n_deserializing ++;
2709
2710 for (;;) {
2711 char line[LINE_MAX], *l;
2712
2713 if (!fgets(line, sizeof(line), f)) {
2714 if (feof(f))
2715 r = 0;
2716 else
2717 r = -errno;
2718
2719 goto finish;
2720 }
2721
2722 char_array_0(line);
2723 l = strstrip(line);
2724
2725 if (l[0] == 0)
2726 break;
2727
2728 if (startswith(l, "current-job-id=")) {
2729 uint32_t id;
2730
2731 if (safe_atou32(l+15, &id) < 0)
2732 log_debug("Failed to parse current job id value %s", l+15);
2733 else
2734 m->current_job_id = MAX(m->current_job_id, id);
2735 } else if (startswith(l, "taint-usr=")) {
2736 int b;
2737
2738 if ((b = parse_boolean(l+10)) < 0)
2739 log_debug("Failed to parse taint /usr flag %s", l+10);
2740 else
2741 m->taint_usr = m->taint_usr || b;
2742 } else if (startswith(l, "initrd-timestamp="))
2743 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2744 else if (startswith(l, "startup-timestamp="))
2745 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2746 else if (startswith(l, "finish-timestamp="))
2747 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2748 else
2749 log_debug("Unknown serialization item '%s'", l);
2750 }
2751
2752 for (;;) {
2753 Unit *u;
2754 char name[UNIT_NAME_MAX+2];
2755
2756 /* Start marker */
2757 if (!fgets(name, sizeof(name), f)) {
2758 if (feof(f))
2759 r = 0;
2760 else
2761 r = -errno;
2762
2763 goto finish;
2764 }
2765
2766 char_array_0(name);
2767
2768 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2769 goto finish;
2770
2771 if ((r = unit_deserialize(u, f, fds)) < 0)
2772 goto finish;
2773 }
2774
2775 finish:
2776 if (ferror(f)) {
2777 r = -EIO;
2778 goto finish;
2779 }
2780
2781 assert(m->n_deserializing > 0);
2782 m->n_deserializing --;
2783
2784 return r;
2785 }
2786
2787 int manager_reload(Manager *m) {
2788 int r, q;
2789 FILE *f;
2790 FDSet *fds;
2791
2792 assert(m);
2793
2794 if ((r = manager_open_serialization(m, &f)) < 0)
2795 return r;
2796
2797 m->n_serializing ++;
2798
2799 if (!(fds = fdset_new())) {
2800 m->n_serializing --;
2801 r = -ENOMEM;
2802 goto finish;
2803 }
2804
2805 if ((r = manager_serialize(m, f, fds)) < 0) {
2806 m->n_serializing --;
2807 goto finish;
2808 }
2809
2810 if (fseeko(f, 0, SEEK_SET) < 0) {
2811 m->n_serializing --;
2812 r = -errno;
2813 goto finish;
2814 }
2815
2816 /* From here on there is no way back. */
2817 manager_clear_jobs_and_units(m);
2818 manager_undo_generators(m);
2819
2820 assert(m->n_serializing > 0);
2821 m->n_serializing --;
2822
2823 /* Find new unit paths */
2824 lookup_paths_free(&m->lookup_paths);
2825 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2826 r = q;
2827
2828 manager_run_generators(m);
2829
2830 manager_build_unit_path_cache(m);
2831
2832 m->n_deserializing ++;
2833
2834 /* First, enumerate what we can from all config files */
2835 if ((q = manager_enumerate(m)) < 0)
2836 r = q;
2837
2838 /* Second, deserialize our stored data */
2839 if ((q = manager_deserialize(m, f, fds)) < 0)
2840 r = q;
2841
2842 fclose(f);
2843 f = NULL;
2844
2845 /* Third, fire things up! */
2846 if ((q = manager_coldplug(m)) < 0)
2847 r = q;
2848
2849 assert(m->n_deserializing > 0);
2850 m->n_deserializing--;
2851
2852 finish:
2853 if (f)
2854 fclose(f);
2855
2856 if (fds)
2857 fdset_free(fds);
2858
2859 return r;
2860 }
2861
2862 bool manager_is_booting_or_shutting_down(Manager *m) {
2863 Unit *u;
2864
2865 assert(m);
2866
2867 /* Is the initial job still around? */
2868 if (manager_get_job(m, 1))
2869 return true;
2870
2871 /* Is there a job for the shutdown target? */
2872 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2873 return !!u->meta.job;
2874
2875 return false;
2876 }
2877
2878 void manager_reset_failed(Manager *m) {
2879 Unit *u;
2880 Iterator i;
2881
2882 assert(m);
2883
2884 HASHMAP_FOREACH(u, m->units, i)
2885 unit_reset_failed(u);
2886 }
2887
2888 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2889 Unit *u;
2890
2891 assert(m);
2892 assert(name);
2893
2894 /* Returns true if the unit is inactive or going down */
2895 if (!(u = manager_get_unit(m, name)))
2896 return true;
2897
2898 return unit_pending_inactive(u);
2899 }
2900
2901 void manager_check_finished(Manager *m) {
2902 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2903 usec_t kernel_usec = 0, initrd_usec = 0, userspace_usec = 0, total_usec = 0;
2904
2905 assert(m);
2906
2907 if (dual_timestamp_is_set(&m->finish_timestamp))
2908 return;
2909
2910 if (hashmap_size(m->jobs) > 0)
2911 return;
2912
2913 dual_timestamp_get(&m->finish_timestamp);
2914
2915 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2916
2917 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2918 total_usec = m->finish_timestamp.monotonic;
2919
2920 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2921
2922 kernel_usec = m->initrd_timestamp.monotonic;
2923 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
2924
2925 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2926 format_timespan(kernel, sizeof(kernel), kernel_usec),
2927 format_timespan(initrd, sizeof(initrd), initrd_usec),
2928 format_timespan(userspace, sizeof(userspace), userspace_usec),
2929 format_timespan(sum, sizeof(sum), total_usec));
2930 } else {
2931 kernel_usec = m->startup_timestamp.monotonic;
2932 initrd_usec = 0;
2933
2934 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2935 format_timespan(kernel, sizeof(kernel), kernel_usec),
2936 format_timespan(userspace, sizeof(userspace), userspace_usec),
2937 format_timespan(sum, sizeof(sum), total_usec));
2938 }
2939 } else {
2940 userspace_usec = initrd_usec = kernel_usec = 0;
2941 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2942
2943 log_debug("Startup finished in %s.",
2944 format_timespan(sum, sizeof(sum), total_usec));
2945 }
2946
2947 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
2948
2949 sd_notifyf(false,
2950 "READY=1\nSTATUS=Startup finished in %s.",
2951 format_timespan(sum, sizeof(sum), total_usec));
2952 }
2953
2954 void manager_run_generators(Manager *m) {
2955 DIR *d = NULL;
2956 const char *generator_path;
2957 const char *argv[3];
2958
2959 assert(m);
2960
2961 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2962 if (!(d = opendir(generator_path))) {
2963
2964 if (errno == ENOENT)
2965 return;
2966
2967 log_error("Failed to enumerate generator directory: %m");
2968 return;
2969 }
2970
2971 if (!m->generator_unit_path) {
2972 const char *p;
2973 char user_path[] = "/tmp/systemd-generator-XXXXXX";
2974
2975 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
2976 p = "/run/systemd/generator";
2977
2978 if (mkdir_p(p, 0755) < 0) {
2979 log_error("Failed to create generator directory: %m");
2980 goto finish;
2981 }
2982
2983 } else {
2984 if (!(p = mkdtemp(user_path))) {
2985 log_error("Failed to create generator directory: %m");
2986 goto finish;
2987 }
2988 }
2989
2990 if (!(m->generator_unit_path = strdup(p))) {
2991 log_error("Failed to allocate generator unit path.");
2992 goto finish;
2993 }
2994 }
2995
2996 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
2997 argv[1] = m->generator_unit_path;
2998 argv[2] = NULL;
2999
3000 execute_directory(generator_path, d, (char**) argv);
3001
3002 if (rmdir(m->generator_unit_path) >= 0) {
3003 /* Uh? we were able to remove this dir? I guess that
3004 * means the directory was empty, hence let's shortcut
3005 * this */
3006
3007 free(m->generator_unit_path);
3008 m->generator_unit_path = NULL;
3009 goto finish;
3010 }
3011
3012 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3013 char **l;
3014
3015 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3016 log_error("Failed to add generator directory to unit search path: %m");
3017 goto finish;
3018 }
3019
3020 strv_free(m->lookup_paths.unit_path);
3021 m->lookup_paths.unit_path = l;
3022
3023 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3024 }
3025
3026 finish:
3027 if (d)
3028 closedir(d);
3029 }
3030
3031 void manager_undo_generators(Manager *m) {
3032 assert(m);
3033
3034 if (!m->generator_unit_path)
3035 return;
3036
3037 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3038 rm_rf(m->generator_unit_path, false, true);
3039
3040 free(m->generator_unit_path);
3041 m->generator_unit_path = NULL;
3042 }
3043
3044 int manager_set_default_controllers(Manager *m, char **controllers) {
3045 char **l;
3046
3047 assert(m);
3048
3049 if (!(l = strv_copy(controllers)))
3050 return -ENOMEM;
3051
3052 strv_free(m->default_controllers);
3053 m->default_controllers = l;
3054
3055 return 0;
3056 }
3057
3058 void manager_recheck_syslog(Manager *m) {
3059 Unit *u;
3060
3061 assert(m);
3062
3063 if (m->running_as != MANAGER_SYSTEM)
3064 return;
3065
3066 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
3067 SocketState state;
3068
3069 state = SOCKET(u)->state;
3070
3071 if (state != SOCKET_DEAD &&
3072 state != SOCKET_FAILED &&
3073 state != SOCKET_RUNNING) {
3074
3075 /* Hmm, the socket is not set up, or is still
3076 * listening, let's better not try to use
3077 * it. Note that we have no problem if the
3078 * socket is completely down, since there
3079 * might be a foreign /dev/log socket around
3080 * and we want to make use of that.
3081 */
3082
3083 log_close_syslog();
3084 return;
3085 }
3086 }
3087
3088 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3089 if (TARGET(u)->state != TARGET_ACTIVE) {
3090 log_close_syslog();
3091 return;
3092 }
3093
3094 /* Hmm, OK, so the socket is either fully up, or fully down,
3095 * and the target is up, then let's make use of the socket */
3096 log_open();
3097 }
3098
3099 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3100 [MANAGER_SYSTEM] = "system",
3101 [MANAGER_USER] = "user"
3102 };
3103
3104 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);