]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
manager: include full systemctl status command line in error message
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61
62 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
63 #define GC_QUEUE_ENTRIES_MAX 16
64
65 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
66 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
67
68 /* Where clients shall send notification messages to */
69 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
70 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
71
72 static int manager_setup_notify(Manager *m) {
73 union {
74 struct sockaddr sa;
75 struct sockaddr_un un;
76 } sa;
77 struct epoll_event ev;
78 int one = 1;
79
80 assert(m);
81
82 m->notify_watch.type = WATCH_NOTIFY;
83 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
84 log_error("Failed to allocate notification socket: %m");
85 return -errno;
86 }
87
88 zero(sa);
89 sa.sa.sa_family = AF_UNIX;
90
91 if (getpid() != 1)
92 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
93 else {
94 unlink(NOTIFY_SOCKET_SYSTEM);
95 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
96 }
97
98 if (sa.un.sun_path[0] == '@')
99 sa.un.sun_path[0] = 0;
100
101 if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
102 log_error("bind() failed: %m");
103 return -errno;
104 }
105
106 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
107 log_error("SO_PASSCRED failed: %m");
108 return -errno;
109 }
110
111 zero(ev);
112 ev.events = EPOLLIN;
113 ev.data.ptr = &m->notify_watch;
114
115 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
116 return -errno;
117
118 if (sa.un.sun_path[0] == 0)
119 sa.un.sun_path[0] = '@';
120
121 if (!(m->notify_socket = strdup(sa.un.sun_path)))
122 return -ENOMEM;
123
124 log_debug("Using notification socket %s", m->notify_socket);
125
126 return 0;
127 }
128
129 static int enable_special_signals(Manager *m) {
130 int fd;
131
132 assert(m);
133
134 /* Enable that we get SIGINT on control-alt-del */
135 if (reboot(RB_DISABLE_CAD) < 0)
136 log_warning("Failed to enable ctrl-alt-del handling: %m");
137
138 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY)) < 0)
139 log_warning("Failed to open /dev/tty0: %m");
140 else {
141 /* Enable that we get SIGWINCH on kbrequest */
142 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
143 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
144
145 close_nointr_nofail(fd);
146 }
147
148 return 0;
149 }
150
151 static int manager_setup_signals(Manager *m) {
152 sigset_t mask;
153 struct epoll_event ev;
154 struct sigaction sa;
155
156 assert(m);
157
158 /* We are not interested in SIGSTOP and friends. */
159 zero(sa);
160 sa.sa_handler = SIG_DFL;
161 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
162 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
163
164 assert_se(sigemptyset(&mask) == 0);
165
166 sigset_add_many(&mask,
167 SIGCHLD, /* Child died */
168 SIGTERM, /* Reexecute daemon */
169 SIGHUP, /* Reload configuration */
170 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
171 SIGUSR2, /* systemd: dump status */
172 SIGINT, /* Kernel sends us this on control-alt-del */
173 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
174 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
175 SIGRTMIN+0, /* systemd: start default.target */
176 SIGRTMIN+1, /* systemd: isolate rescue.target */
177 SIGRTMIN+2, /* systemd: isolate emergency.target */
178 SIGRTMIN+3, /* systemd: start halt.target */
179 SIGRTMIN+4, /* systemd: start poweroff.target */
180 SIGRTMIN+5, /* systemd: start reboot.target */
181 SIGRTMIN+6, /* systemd: start kexec.target */
182 SIGRTMIN+13, /* systemd: Immediate halt */
183 SIGRTMIN+14, /* systemd: Immediate poweroff */
184 SIGRTMIN+15, /* systemd: Immediate reboot */
185 SIGRTMIN+16, /* systemd: Immediate kexec */
186 SIGRTMIN+20, /* systemd: enable status messages */
187 SIGRTMIN+21, /* systemd: disable status messages */
188 -1);
189 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
190
191 m->signal_watch.type = WATCH_SIGNAL;
192 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
193 return -errno;
194
195 zero(ev);
196 ev.events = EPOLLIN;
197 ev.data.ptr = &m->signal_watch;
198
199 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
200 return -errno;
201
202 if (m->running_as == MANAGER_SYSTEM)
203 return enable_special_signals(m);
204
205 return 0;
206 }
207
208 int manager_new(ManagerRunningAs running_as, Manager **_m) {
209 Manager *m;
210 int r = -ENOMEM;
211
212 assert(_m);
213 assert(running_as >= 0);
214 assert(running_as < _MANAGER_RUNNING_AS_MAX);
215
216 if (!(m = new0(Manager, 1)))
217 return -ENOMEM;
218
219 dual_timestamp_get(&m->startup_timestamp);
220
221 m->running_as = running_as;
222 m->name_data_slot = m->subscribed_data_slot = -1;
223 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
224 m->pin_cgroupfs_fd = -1;
225
226 #ifdef HAVE_AUDIT
227 m->audit_fd = -1;
228 #endif
229
230 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
231 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
232
233 if (!(m->environment = strv_copy(environ)))
234 goto fail;
235
236 if (!(m->default_controllers = strv_new("cpu", NULL)))
237 goto fail;
238
239 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
240 goto fail;
241
242 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
243 goto fail;
244
245 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
246 goto fail;
247
248 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
249 goto fail;
250
251 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
252 goto fail;
253
254 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
255 goto fail;
256
257 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
258 goto fail;
259
260 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
261 goto fail;
262
263 if ((r = manager_setup_signals(m)) < 0)
264 goto fail;
265
266 if ((r = manager_setup_cgroup(m)) < 0)
267 goto fail;
268
269 if ((r = manager_setup_notify(m)) < 0)
270 goto fail;
271
272 /* Try to connect to the busses, if possible. */
273 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
274 goto fail;
275
276 #ifdef HAVE_AUDIT
277 if ((m->audit_fd = audit_open()) < 0)
278 log_error("Failed to connect to audit log: %m");
279 #endif
280
281 m->taint_usr = dir_is_empty("/usr") > 0;
282
283 *_m = m;
284 return 0;
285
286 fail:
287 manager_free(m);
288 return r;
289 }
290
291 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
292 Meta *meta;
293 unsigned n = 0;
294
295 assert(m);
296
297 while ((meta = m->cleanup_queue)) {
298 assert(meta->in_cleanup_queue);
299
300 unit_free((Unit*) meta);
301 n++;
302 }
303
304 return n;
305 }
306
307 enum {
308 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
309 GC_OFFSET_UNSURE, /* No clue */
310 GC_OFFSET_GOOD, /* We still need this unit */
311 GC_OFFSET_BAD, /* We don't need this unit anymore */
312 _GC_OFFSET_MAX
313 };
314
315 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
316 Iterator i;
317 Unit *other;
318 bool is_bad;
319
320 assert(u);
321
322 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
323 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
324 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
325 return;
326
327 if (u->meta.in_cleanup_queue)
328 goto bad;
329
330 if (unit_check_gc(u))
331 goto good;
332
333 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
334
335 is_bad = true;
336
337 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
338 unit_gc_sweep(other, gc_marker);
339
340 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
341 goto good;
342
343 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
344 is_bad = false;
345 }
346
347 if (is_bad)
348 goto bad;
349
350 /* We were unable to find anything out about this entry, so
351 * let's investigate it later */
352 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
353 unit_add_to_gc_queue(u);
354 return;
355
356 bad:
357 /* We definitely know that this one is not useful anymore, so
358 * let's mark it for deletion */
359 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
360 unit_add_to_cleanup_queue(u);
361 return;
362
363 good:
364 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
365 }
366
367 static unsigned manager_dispatch_gc_queue(Manager *m) {
368 Meta *meta;
369 unsigned n = 0;
370 unsigned gc_marker;
371
372 assert(m);
373
374 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
375 (m->gc_queue_timestamp <= 0 ||
376 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
377 return 0;
378
379 log_debug("Running GC...");
380
381 m->gc_marker += _GC_OFFSET_MAX;
382 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
383 m->gc_marker = 1;
384
385 gc_marker = m->gc_marker;
386
387 while ((meta = m->gc_queue)) {
388 assert(meta->in_gc_queue);
389
390 unit_gc_sweep((Unit*) meta, gc_marker);
391
392 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
393 meta->in_gc_queue = false;
394
395 n++;
396
397 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
398 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
399 log_debug("Collecting %s", meta->id);
400 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
401 unit_add_to_cleanup_queue((Unit*) meta);
402 }
403 }
404
405 m->n_in_gc_queue = 0;
406 m->gc_queue_timestamp = 0;
407
408 return n;
409 }
410
411 static void manager_clear_jobs_and_units(Manager *m) {
412 Job *j;
413 Unit *u;
414
415 assert(m);
416
417 while ((j = hashmap_first(m->transaction_jobs)))
418 job_free(j);
419
420 while ((u = hashmap_first(m->units)))
421 unit_free(u);
422
423 manager_dispatch_cleanup_queue(m);
424
425 assert(!m->load_queue);
426 assert(!m->run_queue);
427 assert(!m->dbus_unit_queue);
428 assert(!m->dbus_job_queue);
429 assert(!m->cleanup_queue);
430 assert(!m->gc_queue);
431
432 assert(hashmap_isempty(m->transaction_jobs));
433 assert(hashmap_isempty(m->jobs));
434 assert(hashmap_isempty(m->units));
435 }
436
437 void manager_free(Manager *m) {
438 UnitType c;
439
440 assert(m);
441
442 manager_clear_jobs_and_units(m);
443
444 for (c = 0; c < _UNIT_TYPE_MAX; c++)
445 if (unit_vtable[c]->shutdown)
446 unit_vtable[c]->shutdown(m);
447
448 /* If we reexecute ourselves, we keep the root cgroup
449 * around */
450 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
451
452 manager_undo_generators(m);
453
454 bus_done(m);
455
456 hashmap_free(m->units);
457 hashmap_free(m->jobs);
458 hashmap_free(m->transaction_jobs);
459 hashmap_free(m->watch_pids);
460 hashmap_free(m->watch_bus);
461
462 if (m->epoll_fd >= 0)
463 close_nointr_nofail(m->epoll_fd);
464 if (m->signal_watch.fd >= 0)
465 close_nointr_nofail(m->signal_watch.fd);
466 if (m->notify_watch.fd >= 0)
467 close_nointr_nofail(m->notify_watch.fd);
468
469 #ifdef HAVE_AUDIT
470 if (m->audit_fd >= 0)
471 audit_close(m->audit_fd);
472 #endif
473
474 free(m->notify_socket);
475
476 lookup_paths_free(&m->lookup_paths);
477 strv_free(m->environment);
478
479 strv_free(m->default_controllers);
480
481 hashmap_free(m->cgroup_bondings);
482 set_free_free(m->unit_path_cache);
483
484 free(m);
485 }
486
487 int manager_enumerate(Manager *m) {
488 int r = 0, q;
489 UnitType c;
490
491 assert(m);
492
493 /* Let's ask every type to load all units from disk/kernel
494 * that it might know */
495 for (c = 0; c < _UNIT_TYPE_MAX; c++)
496 if (unit_vtable[c]->enumerate)
497 if ((q = unit_vtable[c]->enumerate(m)) < 0)
498 r = q;
499
500 manager_dispatch_load_queue(m);
501 return r;
502 }
503
504 int manager_coldplug(Manager *m) {
505 int r = 0, q;
506 Iterator i;
507 Unit *u;
508 char *k;
509
510 assert(m);
511
512 /* Then, let's set up their initial state. */
513 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
514
515 /* ignore aliases */
516 if (u->meta.id != k)
517 continue;
518
519 if ((q = unit_coldplug(u)) < 0)
520 r = q;
521 }
522
523 return r;
524 }
525
526 static void manager_build_unit_path_cache(Manager *m) {
527 char **i;
528 DIR *d = NULL;
529 int r;
530
531 assert(m);
532
533 set_free_free(m->unit_path_cache);
534
535 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
536 log_error("Failed to allocate unit path cache.");
537 return;
538 }
539
540 /* This simply builds a list of files we know exist, so that
541 * we don't always have to go to disk */
542
543 STRV_FOREACH(i, m->lookup_paths.unit_path) {
544 struct dirent *de;
545
546 if (!(d = opendir(*i))) {
547 log_error("Failed to open directory: %m");
548 continue;
549 }
550
551 while ((de = readdir(d))) {
552 char *p;
553
554 if (ignore_file(de->d_name))
555 continue;
556
557 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
558 r = -ENOMEM;
559 goto fail;
560 }
561
562 if ((r = set_put(m->unit_path_cache, p)) < 0) {
563 free(p);
564 goto fail;
565 }
566 }
567
568 closedir(d);
569 d = NULL;
570 }
571
572 return;
573
574 fail:
575 log_error("Failed to build unit path cache: %s", strerror(-r));
576
577 set_free_free(m->unit_path_cache);
578 m->unit_path_cache = NULL;
579
580 if (d)
581 closedir(d);
582 }
583
584 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
585 int r, q;
586
587 assert(m);
588
589 manager_run_generators(m);
590
591 manager_build_unit_path_cache(m);
592
593 /* If we will deserialize make sure that during enumeration
594 * this is already known, so we increase the counter here
595 * already */
596 if (serialization)
597 m->n_deserializing ++;
598
599 /* First, enumerate what we can from all config files */
600 r = manager_enumerate(m);
601
602 /* Second, deserialize if there is something to deserialize */
603 if (serialization)
604 if ((q = manager_deserialize(m, serialization, fds)) < 0)
605 r = q;
606
607 /* Third, fire things up! */
608 if ((q = manager_coldplug(m)) < 0)
609 r = q;
610
611 if (serialization) {
612 assert(m->n_deserializing > 0);
613 m->n_deserializing --;
614 }
615
616 return r;
617 }
618
619 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
620 assert(m);
621 assert(j);
622
623 /* Deletes one job from the transaction */
624
625 manager_transaction_unlink_job(m, j, delete_dependencies);
626
627 if (!j->installed)
628 job_free(j);
629 }
630
631 static void transaction_delete_unit(Manager *m, Unit *u) {
632 Job *j;
633
634 /* Deletes all jobs associated with a certain unit from the
635 * transaction */
636
637 while ((j = hashmap_get(m->transaction_jobs, u)))
638 transaction_delete_job(m, j, true);
639 }
640
641 static void transaction_clean_dependencies(Manager *m) {
642 Iterator i;
643 Job *j;
644
645 assert(m);
646
647 /* Drops all dependencies of all installed jobs */
648
649 HASHMAP_FOREACH(j, m->jobs, i) {
650 while (j->subject_list)
651 job_dependency_free(j->subject_list);
652 while (j->object_list)
653 job_dependency_free(j->object_list);
654 }
655
656 assert(!m->transaction_anchor);
657 }
658
659 static void transaction_abort(Manager *m) {
660 Job *j;
661
662 assert(m);
663
664 while ((j = hashmap_first(m->transaction_jobs)))
665 if (j->installed)
666 transaction_delete_job(m, j, true);
667 else
668 job_free(j);
669
670 assert(hashmap_isempty(m->transaction_jobs));
671
672 transaction_clean_dependencies(m);
673 }
674
675 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
676 JobDependency *l;
677
678 assert(m);
679
680 /* A recursive sweep through the graph that marks all units
681 * that matter to the anchor job, i.e. are directly or
682 * indirectly a dependency of the anchor job via paths that
683 * are fully marked as mattering. */
684
685 if (j)
686 l = j->subject_list;
687 else
688 l = m->transaction_anchor;
689
690 LIST_FOREACH(subject, l, l) {
691
692 /* This link does not matter */
693 if (!l->matters)
694 continue;
695
696 /* This unit has already been marked */
697 if (l->object->generation == generation)
698 continue;
699
700 l->object->matters_to_anchor = true;
701 l->object->generation = generation;
702
703 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
704 }
705 }
706
707 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
708 JobDependency *l, *last;
709
710 assert(j);
711 assert(other);
712 assert(j->unit == other->unit);
713 assert(!j->installed);
714
715 /* Merges 'other' into 'j' and then deletes j. */
716
717 j->type = t;
718 j->state = JOB_WAITING;
719 j->override = j->override || other->override;
720
721 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
722
723 /* Patch us in as new owner of the JobDependency objects */
724 last = NULL;
725 LIST_FOREACH(subject, l, other->subject_list) {
726 assert(l->subject == other);
727 l->subject = j;
728 last = l;
729 }
730
731 /* Merge both lists */
732 if (last) {
733 last->subject_next = j->subject_list;
734 if (j->subject_list)
735 j->subject_list->subject_prev = last;
736 j->subject_list = other->subject_list;
737 }
738
739 /* Patch us in as new owner of the JobDependency objects */
740 last = NULL;
741 LIST_FOREACH(object, l, other->object_list) {
742 assert(l->object == other);
743 l->object = j;
744 last = l;
745 }
746
747 /* Merge both lists */
748 if (last) {
749 last->object_next = j->object_list;
750 if (j->object_list)
751 j->object_list->object_prev = last;
752 j->object_list = other->object_list;
753 }
754
755 /* Kill the other job */
756 other->subject_list = NULL;
757 other->object_list = NULL;
758 transaction_delete_job(m, other, true);
759 }
760 static bool job_is_conflicted_by(Job *j) {
761 JobDependency *l;
762
763 assert(j);
764
765 /* Returns true if this job is pulled in by a least one
766 * ConflictedBy dependency. */
767
768 LIST_FOREACH(object, l, j->object_list)
769 if (l->conflicts)
770 return true;
771
772 return false;
773 }
774
775 static int delete_one_unmergeable_job(Manager *m, Job *j) {
776 Job *k;
777
778 assert(j);
779
780 /* Tries to delete one item in the linked list
781 * j->transaction_next->transaction_next->... that conflicts
782 * with another one, in an attempt to make an inconsistent
783 * transaction work. */
784
785 /* We rely here on the fact that if a merged with b does not
786 * merge with c, either a or b merge with c neither */
787 LIST_FOREACH(transaction, j, j)
788 LIST_FOREACH(transaction, k, j->transaction_next) {
789 Job *d;
790
791 /* Is this one mergeable? Then skip it */
792 if (job_type_is_mergeable(j->type, k->type))
793 continue;
794
795 /* Ok, we found two that conflict, let's see if we can
796 * drop one of them */
797 if (!j->matters_to_anchor && !k->matters_to_anchor) {
798
799 /* Both jobs don't matter, so let's
800 * find the one that is smarter to
801 * remove. Let's think positive and
802 * rather remove stops then starts --
803 * except if something is being
804 * stopped because it is conflicted by
805 * another unit in which case we
806 * rather remove the start. */
807
808 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
809 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
810
811 if (j->type == JOB_STOP) {
812
813 if (job_is_conflicted_by(j))
814 d = k;
815 else
816 d = j;
817
818 } else if (k->type == JOB_STOP) {
819
820 if (job_is_conflicted_by(k))
821 d = j;
822 else
823 d = k;
824 } else
825 d = j;
826
827 } else if (!j->matters_to_anchor)
828 d = j;
829 else if (!k->matters_to_anchor)
830 d = k;
831 else
832 return -ENOEXEC;
833
834 /* Ok, we can drop one, so let's do so. */
835 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
836 transaction_delete_job(m, d, true);
837 return 0;
838 }
839
840 return -EINVAL;
841 }
842
843 static int transaction_merge_jobs(Manager *m, DBusError *e) {
844 Job *j;
845 Iterator i;
846 int r;
847
848 assert(m);
849
850 /* First step, check whether any of the jobs for one specific
851 * task conflict. If so, try to drop one of them. */
852 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
853 JobType t;
854 Job *k;
855
856 t = j->type;
857 LIST_FOREACH(transaction, k, j->transaction_next) {
858 if (job_type_merge(&t, k->type) >= 0)
859 continue;
860
861 /* OK, we could not merge all jobs for this
862 * action. Let's see if we can get rid of one
863 * of them */
864
865 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
866 /* Ok, we managed to drop one, now
867 * let's ask our callers to call us
868 * again after garbage collecting */
869 return -EAGAIN;
870
871 /* We couldn't merge anything. Failure */
872 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
873 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
874 return r;
875 }
876 }
877
878 /* Second step, merge the jobs. */
879 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
880 JobType t = j->type;
881 Job *k;
882
883 /* Merge all transactions */
884 LIST_FOREACH(transaction, k, j->transaction_next)
885 assert_se(job_type_merge(&t, k->type) == 0);
886
887 /* If an active job is mergeable, merge it too */
888 if (j->unit->meta.job)
889 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
890
891 while ((k = j->transaction_next)) {
892 if (j->installed) {
893 transaction_merge_and_delete_job(m, k, j, t);
894 j = k;
895 } else
896 transaction_merge_and_delete_job(m, j, k, t);
897 }
898
899 assert(!j->transaction_next);
900 assert(!j->transaction_prev);
901 }
902
903 return 0;
904 }
905
906 static void transaction_drop_redundant(Manager *m) {
907 bool again;
908
909 assert(m);
910
911 /* Goes through the transaction and removes all jobs that are
912 * a noop */
913
914 do {
915 Job *j;
916 Iterator i;
917
918 again = false;
919
920 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
921 bool changes_something = false;
922 Job *k;
923
924 LIST_FOREACH(transaction, k, j) {
925
926 if (!job_is_anchor(k) &&
927 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
928 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
929 continue;
930
931 changes_something = true;
932 break;
933 }
934
935 if (changes_something)
936 continue;
937
938 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
939 transaction_delete_job(m, j, false);
940 again = true;
941 break;
942 }
943
944 } while (again);
945 }
946
947 static bool unit_matters_to_anchor(Unit *u, Job *j) {
948 assert(u);
949 assert(!j->transaction_prev);
950
951 /* Checks whether at least one of the jobs for this unit
952 * matters to the anchor. */
953
954 LIST_FOREACH(transaction, j, j)
955 if (j->matters_to_anchor)
956 return true;
957
958 return false;
959 }
960
961 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
962 Iterator i;
963 Unit *u;
964 int r;
965
966 assert(m);
967 assert(j);
968 assert(!j->transaction_prev);
969
970 /* Does a recursive sweep through the ordering graph, looking
971 * for a cycle. If we find cycle we try to break it. */
972
973 /* Have we seen this before? */
974 if (j->generation == generation) {
975 Job *k, *delete;
976
977 /* If the marker is NULL we have been here already and
978 * decided the job was loop-free from here. Hence
979 * shortcut things and return right-away. */
980 if (!j->marker)
981 return 0;
982
983 /* So, the marker is not NULL and we already have been
984 * here. We have a cycle. Let's try to break it. We go
985 * backwards in our path and try to find a suitable
986 * job to remove. We use the marker to find our way
987 * back, since smart how we are we stored our way back
988 * in there. */
989 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
990
991 delete = NULL;
992 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
993
994 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
995
996 if (!delete &&
997 !k->installed &&
998 !unit_matters_to_anchor(k->unit, k)) {
999 /* Ok, we can drop this one, so let's
1000 * do so. */
1001 delete = k;
1002 }
1003
1004 /* Check if this in fact was the beginning of
1005 * the cycle */
1006 if (k == j)
1007 break;
1008 }
1009
1010
1011 if (delete) {
1012 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1013 transaction_delete_unit(m, delete->unit);
1014 return -EAGAIN;
1015 }
1016
1017 log_error("Unable to break cycle");
1018
1019 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1020 return -ENOEXEC;
1021 }
1022
1023 /* Make the marker point to where we come from, so that we can
1024 * find our way backwards if we want to break a cycle. We use
1025 * a special marker for the beginning: we point to
1026 * ourselves. */
1027 j->marker = from ? from : j;
1028 j->generation = generation;
1029
1030 /* We assume that the the dependencies are bidirectional, and
1031 * hence can ignore UNIT_AFTER */
1032 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1033 Job *o;
1034
1035 /* Is there a job for this unit? */
1036 if (!(o = hashmap_get(m->transaction_jobs, u)))
1037
1038 /* Ok, there is no job for this in the
1039 * transaction, but maybe there is already one
1040 * running? */
1041 if (!(o = u->meta.job))
1042 continue;
1043
1044 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1045 return r;
1046 }
1047
1048 /* Ok, let's backtrack, and remember that this entry is not on
1049 * our path anymore. */
1050 j->marker = NULL;
1051
1052 return 0;
1053 }
1054
1055 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1056 Job *j;
1057 int r;
1058 Iterator i;
1059 unsigned g;
1060
1061 assert(m);
1062 assert(generation);
1063
1064 /* Check if the ordering graph is cyclic. If it is, try to fix
1065 * that up by dropping one of the jobs. */
1066
1067 g = (*generation)++;
1068
1069 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1070 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1071 return r;
1072
1073 return 0;
1074 }
1075
1076 static void transaction_collect_garbage(Manager *m) {
1077 bool again;
1078
1079 assert(m);
1080
1081 /* Drop jobs that are not required by any other job */
1082
1083 do {
1084 Iterator i;
1085 Job *j;
1086
1087 again = false;
1088
1089 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1090 if (j->object_list) {
1091 /* log_debug("Keeping job %s/%s because of %s/%s", */
1092 /* j->unit->meta.id, job_type_to_string(j->type), */
1093 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1094 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1095 continue;
1096 }
1097
1098 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1099 transaction_delete_job(m, j, true);
1100 again = true;
1101 break;
1102 }
1103
1104 } while (again);
1105 }
1106
1107 static int transaction_is_destructive(Manager *m, DBusError *e) {
1108 Iterator i;
1109 Job *j;
1110
1111 assert(m);
1112
1113 /* Checks whether applying this transaction means that
1114 * existing jobs would be replaced */
1115
1116 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1117
1118 /* Assume merged */
1119 assert(!j->transaction_prev);
1120 assert(!j->transaction_next);
1121
1122 if (j->unit->meta.job &&
1123 j->unit->meta.job != j &&
1124 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1125
1126 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1127 return -EEXIST;
1128 }
1129 }
1130
1131 return 0;
1132 }
1133
1134 static void transaction_minimize_impact(Manager *m) {
1135 bool again;
1136 assert(m);
1137
1138 /* Drops all unnecessary jobs that reverse already active jobs
1139 * or that stop a running service. */
1140
1141 do {
1142 Job *j;
1143 Iterator i;
1144
1145 again = false;
1146
1147 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1148 LIST_FOREACH(transaction, j, j) {
1149 bool stops_running_service, changes_existing_job;
1150
1151 /* If it matters, we shouldn't drop it */
1152 if (j->matters_to_anchor)
1153 continue;
1154
1155 /* Would this stop a running service?
1156 * Would this change an existing job?
1157 * If so, let's drop this entry */
1158
1159 stops_running_service =
1160 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1161
1162 changes_existing_job =
1163 j->unit->meta.job &&
1164 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1165
1166 if (!stops_running_service && !changes_existing_job)
1167 continue;
1168
1169 if (stops_running_service)
1170 log_debug("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1171
1172 if (changes_existing_job)
1173 log_debug("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1174
1175 /* Ok, let's get rid of this */
1176 log_debug("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1177
1178 transaction_delete_job(m, j, true);
1179 again = true;
1180 break;
1181 }
1182
1183 if (again)
1184 break;
1185 }
1186
1187 } while (again);
1188 }
1189
1190 static int transaction_apply(Manager *m, JobMode mode) {
1191 Iterator i;
1192 Job *j;
1193 int r;
1194
1195 /* Moves the transaction jobs to the set of active jobs */
1196
1197 if (mode == JOB_ISOLATE) {
1198
1199 /* When isolating first kill all installed jobs which
1200 * aren't part of the new transaction */
1201 HASHMAP_FOREACH(j, m->jobs, i) {
1202 assert(j->installed);
1203
1204 if (hashmap_get(m->transaction_jobs, j->unit))
1205 continue;
1206
1207 job_finish_and_invalidate(j, JOB_CANCELED);
1208 }
1209 }
1210
1211 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1212 /* Assume merged */
1213 assert(!j->transaction_prev);
1214 assert(!j->transaction_next);
1215
1216 if (j->installed)
1217 continue;
1218
1219 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1220 goto rollback;
1221 }
1222
1223 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1224 if (j->installed) {
1225 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1226 continue;
1227 }
1228
1229 if (j->unit->meta.job)
1230 job_free(j->unit->meta.job);
1231
1232 j->unit->meta.job = j;
1233 j->installed = true;
1234 m->n_installed_jobs ++;
1235
1236 /* We're fully installed. Now let's free data we don't
1237 * need anymore. */
1238
1239 assert(!j->transaction_next);
1240 assert(!j->transaction_prev);
1241
1242 job_add_to_run_queue(j);
1243 job_add_to_dbus_queue(j);
1244 job_start_timer(j);
1245
1246 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1247 }
1248
1249 /* As last step, kill all remaining job dependencies. */
1250 transaction_clean_dependencies(m);
1251
1252 return 0;
1253
1254 rollback:
1255
1256 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1257 if (j->installed)
1258 continue;
1259
1260 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1261 }
1262
1263 return r;
1264 }
1265
1266 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1267 int r;
1268 unsigned generation = 1;
1269
1270 assert(m);
1271
1272 /* This applies the changes recorded in transaction_jobs to
1273 * the actual list of jobs, if possible. */
1274
1275 /* First step: figure out which jobs matter */
1276 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1277
1278 /* Second step: Try not to stop any running services if
1279 * we don't have to. Don't try to reverse running
1280 * jobs if we don't have to. */
1281 if (mode == JOB_FAIL)
1282 transaction_minimize_impact(m);
1283
1284 /* Third step: Drop redundant jobs */
1285 transaction_drop_redundant(m);
1286
1287 for (;;) {
1288 /* Fourth step: Let's remove unneeded jobs that might
1289 * be lurking. */
1290 if (mode != JOB_ISOLATE)
1291 transaction_collect_garbage(m);
1292
1293 /* Fifth step: verify order makes sense and correct
1294 * cycles if necessary and possible */
1295 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1296 break;
1297
1298 if (r != -EAGAIN) {
1299 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1300 goto rollback;
1301 }
1302
1303 /* Let's see if the resulting transaction ordering
1304 * graph is still cyclic... */
1305 }
1306
1307 for (;;) {
1308 /* Sixth step: let's drop unmergeable entries if
1309 * necessary and possible, merge entries we can
1310 * merge */
1311 if ((r = transaction_merge_jobs(m, e)) >= 0)
1312 break;
1313
1314 if (r != -EAGAIN) {
1315 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1316 goto rollback;
1317 }
1318
1319 /* Seventh step: an entry got dropped, let's garbage
1320 * collect its dependencies. */
1321 if (mode != JOB_ISOLATE)
1322 transaction_collect_garbage(m);
1323
1324 /* Let's see if the resulting transaction still has
1325 * unmergeable entries ... */
1326 }
1327
1328 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1329 transaction_drop_redundant(m);
1330
1331 /* Ninth step: check whether we can actually apply this */
1332 if (mode == JOB_FAIL)
1333 if ((r = transaction_is_destructive(m, e)) < 0) {
1334 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1335 goto rollback;
1336 }
1337
1338 /* Tenth step: apply changes */
1339 if ((r = transaction_apply(m, mode)) < 0) {
1340 log_warning("Failed to apply transaction: %s", strerror(-r));
1341 goto rollback;
1342 }
1343
1344 assert(hashmap_isempty(m->transaction_jobs));
1345 assert(!m->transaction_anchor);
1346
1347 return 0;
1348
1349 rollback:
1350 transaction_abort(m);
1351 return r;
1352 }
1353
1354 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1355 Job *j, *f;
1356
1357 assert(m);
1358 assert(unit);
1359
1360 /* Looks for an existing prospective job and returns that. If
1361 * it doesn't exist it is created and added to the prospective
1362 * jobs list. */
1363
1364 f = hashmap_get(m->transaction_jobs, unit);
1365
1366 LIST_FOREACH(transaction, j, f) {
1367 assert(j->unit == unit);
1368
1369 if (j->type == type) {
1370 if (is_new)
1371 *is_new = false;
1372 return j;
1373 }
1374 }
1375
1376 if (unit->meta.job && unit->meta.job->type == type)
1377 j = unit->meta.job;
1378 else if (!(j = job_new(m, type, unit)))
1379 return NULL;
1380
1381 j->generation = 0;
1382 j->marker = NULL;
1383 j->matters_to_anchor = false;
1384 j->override = override;
1385
1386 LIST_PREPEND(Job, transaction, f, j);
1387
1388 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1389 job_free(j);
1390 return NULL;
1391 }
1392
1393 if (is_new)
1394 *is_new = true;
1395
1396 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1397
1398 return j;
1399 }
1400
1401 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1402 assert(m);
1403 assert(j);
1404
1405 if (j->transaction_prev)
1406 j->transaction_prev->transaction_next = j->transaction_next;
1407 else if (j->transaction_next)
1408 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1409 else
1410 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1411
1412 if (j->transaction_next)
1413 j->transaction_next->transaction_prev = j->transaction_prev;
1414
1415 j->transaction_prev = j->transaction_next = NULL;
1416
1417 while (j->subject_list)
1418 job_dependency_free(j->subject_list);
1419
1420 while (j->object_list) {
1421 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1422
1423 job_dependency_free(j->object_list);
1424
1425 if (other && delete_dependencies) {
1426 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1427 other->unit->meta.id, job_type_to_string(other->type),
1428 j->unit->meta.id, job_type_to_string(j->type));
1429 transaction_delete_job(m, other, delete_dependencies);
1430 }
1431 }
1432 }
1433
1434 static int transaction_add_job_and_dependencies(
1435 Manager *m,
1436 JobType type,
1437 Unit *unit,
1438 Job *by,
1439 bool matters,
1440 bool override,
1441 bool conflicts,
1442 bool ignore_requirements,
1443 bool ignore_order,
1444 DBusError *e,
1445 Job **_ret) {
1446 Job *ret;
1447 Iterator i;
1448 Unit *dep;
1449 int r;
1450 bool is_new;
1451
1452 assert(m);
1453 assert(type < _JOB_TYPE_MAX);
1454 assert(unit);
1455
1456 /* log_debug("Pulling in %s/%s from %s/%s", */
1457 /* unit->meta.id, job_type_to_string(type), */
1458 /* by ? by->unit->meta.id : "NA", */
1459 /* by ? job_type_to_string(by->type) : "NA"); */
1460
1461 if (unit->meta.load_state != UNIT_LOADED &&
1462 unit->meta.load_state != UNIT_ERROR &&
1463 unit->meta.load_state != UNIT_MASKED) {
1464 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1465 return -EINVAL;
1466 }
1467
1468 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1469 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1470 "Unit %s failed to load: %s. "
1471 "See system logs and 'systemctl status %s' for details.",
1472 unit->meta.id,
1473 strerror(-unit->meta.load_error),
1474 unit->meta.id);
1475 return -EINVAL;
1476 }
1477
1478 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1479 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1480 return -EINVAL;
1481 }
1482
1483 if (!unit_job_is_applicable(unit, type)) {
1484 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1485 return -EBADR;
1486 }
1487
1488 /* First add the job. */
1489 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1490 return -ENOMEM;
1491
1492 ret->ignore_order = ret->ignore_order || ignore_order;
1493
1494 /* Then, add a link to the job. */
1495 if (!job_dependency_new(by, ret, matters, conflicts))
1496 return -ENOMEM;
1497
1498 if (is_new && !ignore_requirements) {
1499 Set *following;
1500
1501 /* If we are following some other unit, make sure we
1502 * add all dependencies of everybody following. */
1503 if (unit_following_set(ret->unit, &following) > 0) {
1504 SET_FOREACH(dep, following, i)
1505 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1506 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1507
1508 if (e)
1509 dbus_error_free(e);
1510 }
1511
1512 set_free(following);
1513 }
1514
1515 /* Finally, recursively add in all dependencies. */
1516 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1517 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1518 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1519 if (r != -EBADR)
1520 goto fail;
1521
1522 if (e)
1523 dbus_error_free(e);
1524 }
1525
1526 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1527 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1528
1529 if (r != -EBADR)
1530 goto fail;
1531
1532 if (e)
1533 dbus_error_free(e);
1534 }
1535
1536 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1537 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1538 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1539
1540 if (e)
1541 dbus_error_free(e);
1542 }
1543
1544 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1545 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1546 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1547
1548 if (e)
1549 dbus_error_free(e);
1550 }
1551
1552 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1553 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1554
1555 if (r != -EBADR)
1556 goto fail;
1557
1558 if (e)
1559 dbus_error_free(e);
1560 }
1561
1562 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1563 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1564 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1565
1566 if (e)
1567 dbus_error_free(e);
1568 }
1569
1570 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1571 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1572
1573 if (r != -EBADR)
1574 goto fail;
1575
1576 if (e)
1577 dbus_error_free(e);
1578 }
1579
1580 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1581 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1582 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1583
1584 if (e)
1585 dbus_error_free(e);
1586 }
1587
1588 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1589
1590 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1591 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1592
1593 if (r != -EBADR)
1594 goto fail;
1595
1596 if (e)
1597 dbus_error_free(e);
1598 }
1599
1600 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1601 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1602
1603 if (r != -EBADR)
1604 goto fail;
1605
1606 if (e)
1607 dbus_error_free(e);
1608 }
1609 }
1610
1611 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1612 }
1613
1614 if (_ret)
1615 *_ret = ret;
1616
1617 return 0;
1618
1619 fail:
1620 return r;
1621 }
1622
1623 static int transaction_add_isolate_jobs(Manager *m) {
1624 Iterator i;
1625 Unit *u;
1626 char *k;
1627 int r;
1628
1629 assert(m);
1630
1631 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1632
1633 /* ignore aliases */
1634 if (u->meta.id != k)
1635 continue;
1636
1637 if (u->meta.ignore_on_isolate)
1638 continue;
1639
1640 /* No need to stop inactive jobs */
1641 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1642 continue;
1643
1644 /* Is there already something listed for this? */
1645 if (hashmap_get(m->transaction_jobs, u))
1646 continue;
1647
1648 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1649 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1650 }
1651
1652 return 0;
1653 }
1654
1655 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1656 int r;
1657 Job *ret;
1658
1659 assert(m);
1660 assert(type < _JOB_TYPE_MAX);
1661 assert(unit);
1662 assert(mode < _JOB_MODE_MAX);
1663
1664 if (mode == JOB_ISOLATE && type != JOB_START) {
1665 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1666 return -EINVAL;
1667 }
1668
1669 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1670 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1671 return -EPERM;
1672 }
1673
1674 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1675
1676 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1677 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1678 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1679 transaction_abort(m);
1680 return r;
1681 }
1682
1683 if (mode == JOB_ISOLATE)
1684 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1685 transaction_abort(m);
1686 return r;
1687 }
1688
1689 if ((r = transaction_activate(m, mode, e)) < 0)
1690 return r;
1691
1692 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1693
1694 if (_ret)
1695 *_ret = ret;
1696
1697 return 0;
1698 }
1699
1700 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1701 Unit *unit;
1702 int r;
1703
1704 assert(m);
1705 assert(type < _JOB_TYPE_MAX);
1706 assert(name);
1707 assert(mode < _JOB_MODE_MAX);
1708
1709 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1710 return r;
1711
1712 return manager_add_job(m, type, unit, mode, override, e, _ret);
1713 }
1714
1715 Job *manager_get_job(Manager *m, uint32_t id) {
1716 assert(m);
1717
1718 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1719 }
1720
1721 Unit *manager_get_unit(Manager *m, const char *name) {
1722 assert(m);
1723 assert(name);
1724
1725 return hashmap_get(m->units, name);
1726 }
1727
1728 unsigned manager_dispatch_load_queue(Manager *m) {
1729 Meta *meta;
1730 unsigned n = 0;
1731
1732 assert(m);
1733
1734 /* Make sure we are not run recursively */
1735 if (m->dispatching_load_queue)
1736 return 0;
1737
1738 m->dispatching_load_queue = true;
1739
1740 /* Dispatches the load queue. Takes a unit from the queue and
1741 * tries to load its data until the queue is empty */
1742
1743 while ((meta = m->load_queue)) {
1744 assert(meta->in_load_queue);
1745
1746 unit_load((Unit*) meta);
1747 n++;
1748 }
1749
1750 m->dispatching_load_queue = false;
1751 return n;
1752 }
1753
1754 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1755 Unit *ret;
1756 int r;
1757
1758 assert(m);
1759 assert(name || path);
1760
1761 /* This will prepare the unit for loading, but not actually
1762 * load anything from disk. */
1763
1764 if (path && !is_path(path)) {
1765 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1766 return -EINVAL;
1767 }
1768
1769 if (!name)
1770 name = file_name_from_path(path);
1771
1772 if (!unit_name_is_valid(name, false)) {
1773 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1774 return -EINVAL;
1775 }
1776
1777 if ((ret = manager_get_unit(m, name))) {
1778 *_ret = ret;
1779 return 1;
1780 }
1781
1782 if (!(ret = unit_new(m)))
1783 return -ENOMEM;
1784
1785 if (path)
1786 if (!(ret->meta.fragment_path = strdup(path))) {
1787 unit_free(ret);
1788 return -ENOMEM;
1789 }
1790
1791 if ((r = unit_add_name(ret, name)) < 0) {
1792 unit_free(ret);
1793 return r;
1794 }
1795
1796 unit_add_to_load_queue(ret);
1797 unit_add_to_dbus_queue(ret);
1798 unit_add_to_gc_queue(ret);
1799
1800 if (_ret)
1801 *_ret = ret;
1802
1803 return 0;
1804 }
1805
1806 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1807 int r;
1808
1809 assert(m);
1810
1811 /* This will load the service information files, but not actually
1812 * start any services or anything. */
1813
1814 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1815 return r;
1816
1817 manager_dispatch_load_queue(m);
1818
1819 if (_ret)
1820 *_ret = unit_follow_merge(*_ret);
1821
1822 return 0;
1823 }
1824
1825 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1826 Iterator i;
1827 Job *j;
1828
1829 assert(s);
1830 assert(f);
1831
1832 HASHMAP_FOREACH(j, s->jobs, i)
1833 job_dump(j, f, prefix);
1834 }
1835
1836 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1837 Iterator i;
1838 Unit *u;
1839 const char *t;
1840
1841 assert(s);
1842 assert(f);
1843
1844 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1845 if (u->meta.id == t)
1846 unit_dump(u, f, prefix);
1847 }
1848
1849 void manager_clear_jobs(Manager *m) {
1850 Job *j;
1851
1852 assert(m);
1853
1854 transaction_abort(m);
1855
1856 while ((j = hashmap_first(m->jobs)))
1857 job_finish_and_invalidate(j, JOB_CANCELED);
1858 }
1859
1860 unsigned manager_dispatch_run_queue(Manager *m) {
1861 Job *j;
1862 unsigned n = 0;
1863
1864 if (m->dispatching_run_queue)
1865 return 0;
1866
1867 m->dispatching_run_queue = true;
1868
1869 while ((j = m->run_queue)) {
1870 assert(j->installed);
1871 assert(j->in_run_queue);
1872
1873 job_run_and_invalidate(j);
1874 n++;
1875 }
1876
1877 m->dispatching_run_queue = false;
1878 return n;
1879 }
1880
1881 unsigned manager_dispatch_dbus_queue(Manager *m) {
1882 Job *j;
1883 Meta *meta;
1884 unsigned n = 0;
1885
1886 assert(m);
1887
1888 if (m->dispatching_dbus_queue)
1889 return 0;
1890
1891 m->dispatching_dbus_queue = true;
1892
1893 while ((meta = m->dbus_unit_queue)) {
1894 assert(meta->in_dbus_queue);
1895
1896 bus_unit_send_change_signal((Unit*) meta);
1897 n++;
1898 }
1899
1900 while ((j = m->dbus_job_queue)) {
1901 assert(j->in_dbus_queue);
1902
1903 bus_job_send_change_signal(j);
1904 n++;
1905 }
1906
1907 m->dispatching_dbus_queue = false;
1908 return n;
1909 }
1910
1911 static int manager_process_notify_fd(Manager *m) {
1912 ssize_t n;
1913
1914 assert(m);
1915
1916 for (;;) {
1917 char buf[4096];
1918 struct msghdr msghdr;
1919 struct iovec iovec;
1920 struct ucred *ucred;
1921 union {
1922 struct cmsghdr cmsghdr;
1923 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1924 } control;
1925 Unit *u;
1926 char **tags;
1927
1928 zero(iovec);
1929 iovec.iov_base = buf;
1930 iovec.iov_len = sizeof(buf)-1;
1931
1932 zero(control);
1933 zero(msghdr);
1934 msghdr.msg_iov = &iovec;
1935 msghdr.msg_iovlen = 1;
1936 msghdr.msg_control = &control;
1937 msghdr.msg_controllen = sizeof(control);
1938
1939 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1940 if (n >= 0)
1941 return -EIO;
1942
1943 if (errno == EAGAIN || errno == EINTR)
1944 break;
1945
1946 return -errno;
1947 }
1948
1949 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1950 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1951 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1952 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1953 log_warning("Received notify message without credentials. Ignoring.");
1954 continue;
1955 }
1956
1957 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1958
1959 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1960 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1961 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1962 continue;
1963 }
1964
1965 assert((size_t) n < sizeof(buf));
1966 buf[n] = 0;
1967 if (!(tags = strv_split(buf, "\n\r")))
1968 return -ENOMEM;
1969
1970 log_debug("Got notification message for unit %s", u->meta.id);
1971
1972 if (UNIT_VTABLE(u)->notify_message)
1973 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1974
1975 strv_free(tags);
1976 }
1977
1978 return 0;
1979 }
1980
1981 static int manager_dispatch_sigchld(Manager *m) {
1982 assert(m);
1983
1984 for (;;) {
1985 siginfo_t si;
1986 Unit *u;
1987 int r;
1988
1989 zero(si);
1990
1991 /* First we call waitd() for a PID and do not reap the
1992 * zombie. That way we can still access /proc/$PID for
1993 * it while it is a zombie. */
1994 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1995
1996 if (errno == ECHILD)
1997 break;
1998
1999 if (errno == EINTR)
2000 continue;
2001
2002 return -errno;
2003 }
2004
2005 if (si.si_pid <= 0)
2006 break;
2007
2008 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2009 char *name = NULL;
2010
2011 get_process_name(si.si_pid, &name);
2012 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2013 free(name);
2014 }
2015
2016 /* Let's flush any message the dying child might still
2017 * have queued for us. This ensures that the process
2018 * still exists in /proc so that we can figure out
2019 * which cgroup and hence unit it belongs to. */
2020 if ((r = manager_process_notify_fd(m)) < 0)
2021 return r;
2022
2023 /* And now figure out the unit this belongs to */
2024 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2025 u = cgroup_unit_by_pid(m, si.si_pid);
2026
2027 /* And now, we actually reap the zombie. */
2028 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2029 if (errno == EINTR)
2030 continue;
2031
2032 return -errno;
2033 }
2034
2035 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2036 continue;
2037
2038 log_debug("Child %lu died (code=%s, status=%i/%s)",
2039 (long unsigned) si.si_pid,
2040 sigchld_code_to_string(si.si_code),
2041 si.si_status,
2042 strna(si.si_code == CLD_EXITED
2043 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2044 : signal_to_string(si.si_status)));
2045
2046 if (!u)
2047 continue;
2048
2049 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2050
2051 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2052 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2053 }
2054
2055 return 0;
2056 }
2057
2058 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2059 int r;
2060 DBusError error;
2061
2062 dbus_error_init(&error);
2063
2064 log_debug("Activating special unit %s", name);
2065
2066 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2067 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2068
2069 dbus_error_free(&error);
2070
2071 return r;
2072 }
2073
2074 static int manager_process_signal_fd(Manager *m) {
2075 ssize_t n;
2076 struct signalfd_siginfo sfsi;
2077 bool sigchld = false;
2078
2079 assert(m);
2080
2081 for (;;) {
2082 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2083
2084 if (n >= 0)
2085 return -EIO;
2086
2087 if (errno == EINTR || errno == EAGAIN)
2088 break;
2089
2090 return -errno;
2091 }
2092
2093 if (sfsi.ssi_pid > 0) {
2094 char *p = NULL;
2095
2096 get_process_name(sfsi.ssi_pid, &p);
2097
2098 log_debug("Received SIG%s from PID %lu (%s).",
2099 strna(signal_to_string(sfsi.ssi_signo)),
2100 (unsigned long) sfsi.ssi_pid, strna(p));
2101 free(p);
2102 } else
2103 log_debug("Received SIG%s.", strna(signal_to_string(sfsi.ssi_signo)));
2104
2105 switch (sfsi.ssi_signo) {
2106
2107 case SIGCHLD:
2108 sigchld = true;
2109 break;
2110
2111 case SIGTERM:
2112 if (m->running_as == MANAGER_SYSTEM) {
2113 /* This is for compatibility with the
2114 * original sysvinit */
2115 m->exit_code = MANAGER_REEXECUTE;
2116 break;
2117 }
2118
2119 /* Fall through */
2120
2121 case SIGINT:
2122 if (m->running_as == MANAGER_SYSTEM) {
2123 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2124 break;
2125 }
2126
2127 /* Run the exit target if there is one, if not, just exit. */
2128 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2129 m->exit_code = MANAGER_EXIT;
2130 return 0;
2131 }
2132
2133 break;
2134
2135 case SIGWINCH:
2136 if (m->running_as == MANAGER_SYSTEM)
2137 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2138
2139 /* This is a nop on non-init */
2140 break;
2141
2142 case SIGPWR:
2143 if (m->running_as == MANAGER_SYSTEM)
2144 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2145
2146 /* This is a nop on non-init */
2147 break;
2148
2149 case SIGUSR1: {
2150 Unit *u;
2151
2152 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2153
2154 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2155 log_info("Trying to reconnect to bus...");
2156 bus_init(m, true);
2157 }
2158
2159 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2160 log_info("Loading D-Bus service...");
2161 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2162 }
2163
2164 break;
2165 }
2166
2167 case SIGUSR2: {
2168 FILE *f;
2169 char *dump = NULL;
2170 size_t size;
2171
2172 if (!(f = open_memstream(&dump, &size))) {
2173 log_warning("Failed to allocate memory stream.");
2174 break;
2175 }
2176
2177 manager_dump_units(m, f, "\t");
2178 manager_dump_jobs(m, f, "\t");
2179
2180 if (ferror(f)) {
2181 fclose(f);
2182 free(dump);
2183 log_warning("Failed to write status stream");
2184 break;
2185 }
2186
2187 fclose(f);
2188 log_dump(LOG_INFO, dump);
2189 free(dump);
2190
2191 break;
2192 }
2193
2194 case SIGHUP:
2195 m->exit_code = MANAGER_RELOAD;
2196 break;
2197
2198 default: {
2199 /* Starting SIGRTMIN+0 */
2200 static const char * const target_table[] = {
2201 [0] = SPECIAL_DEFAULT_TARGET,
2202 [1] = SPECIAL_RESCUE_TARGET,
2203 [2] = SPECIAL_EMERGENCY_TARGET,
2204 [3] = SPECIAL_HALT_TARGET,
2205 [4] = SPECIAL_POWEROFF_TARGET,
2206 [5] = SPECIAL_REBOOT_TARGET,
2207 [6] = SPECIAL_KEXEC_TARGET
2208 };
2209
2210 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2211 static const ManagerExitCode code_table[] = {
2212 [0] = MANAGER_HALT,
2213 [1] = MANAGER_POWEROFF,
2214 [2] = MANAGER_REBOOT,
2215 [3] = MANAGER_KEXEC
2216 };
2217
2218 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2219 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2220 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2221 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2222 break;
2223 }
2224
2225 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2226 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2227 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2228 break;
2229 }
2230
2231 switch (sfsi.ssi_signo - SIGRTMIN) {
2232
2233 case 20:
2234 log_debug("Enabling showing of status.");
2235 m->show_status = true;
2236 break;
2237
2238 case 21:
2239 log_debug("Disabling showing of status.");
2240 m->show_status = false;
2241 break;
2242
2243 default:
2244 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2245 }
2246 }
2247 }
2248 }
2249
2250 if (sigchld)
2251 return manager_dispatch_sigchld(m);
2252
2253 return 0;
2254 }
2255
2256 static int process_event(Manager *m, struct epoll_event *ev) {
2257 int r;
2258 Watch *w;
2259
2260 assert(m);
2261 assert(ev);
2262
2263 assert(w = ev->data.ptr);
2264
2265 if (w->type == WATCH_INVALID)
2266 return 0;
2267
2268 switch (w->type) {
2269
2270 case WATCH_SIGNAL:
2271
2272 /* An incoming signal? */
2273 if (ev->events != EPOLLIN)
2274 return -EINVAL;
2275
2276 if ((r = manager_process_signal_fd(m)) < 0)
2277 return r;
2278
2279 break;
2280
2281 case WATCH_NOTIFY:
2282
2283 /* An incoming daemon notification event? */
2284 if (ev->events != EPOLLIN)
2285 return -EINVAL;
2286
2287 if ((r = manager_process_notify_fd(m)) < 0)
2288 return r;
2289
2290 break;
2291
2292 case WATCH_FD:
2293
2294 /* Some fd event, to be dispatched to the units */
2295 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2296 break;
2297
2298 case WATCH_UNIT_TIMER:
2299 case WATCH_JOB_TIMER: {
2300 uint64_t v;
2301 ssize_t k;
2302
2303 /* Some timer event, to be dispatched to the units */
2304 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2305
2306 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2307 break;
2308
2309 return k < 0 ? -errno : -EIO;
2310 }
2311
2312 if (w->type == WATCH_UNIT_TIMER)
2313 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2314 else
2315 job_timer_event(w->data.job, v, w);
2316 break;
2317 }
2318
2319 case WATCH_MOUNT:
2320 /* Some mount table change, intended for the mount subsystem */
2321 mount_fd_event(m, ev->events);
2322 break;
2323
2324 case WATCH_SWAP:
2325 /* Some swap table change, intended for the swap subsystem */
2326 swap_fd_event(m, ev->events);
2327 break;
2328
2329 case WATCH_UDEV:
2330 /* Some notification from udev, intended for the device subsystem */
2331 device_fd_event(m, ev->events);
2332 break;
2333
2334 case WATCH_DBUS_WATCH:
2335 bus_watch_event(m, w, ev->events);
2336 break;
2337
2338 case WATCH_DBUS_TIMEOUT:
2339 bus_timeout_event(m, w, ev->events);
2340 break;
2341
2342 default:
2343 log_error("event type=%i", w->type);
2344 assert_not_reached("Unknown epoll event type.");
2345 }
2346
2347 return 0;
2348 }
2349
2350 int manager_loop(Manager *m) {
2351 int r;
2352
2353 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2354
2355 assert(m);
2356 m->exit_code = MANAGER_RUNNING;
2357
2358 /* Release the path cache */
2359 set_free_free(m->unit_path_cache);
2360 m->unit_path_cache = NULL;
2361
2362 manager_check_finished(m);
2363
2364 /* There might still be some zombies hanging around from
2365 * before we were exec()'ed. Leat's reap them */
2366 if ((r = manager_dispatch_sigchld(m)) < 0)
2367 return r;
2368
2369 while (m->exit_code == MANAGER_RUNNING) {
2370 struct epoll_event event;
2371 int n;
2372
2373 if (!ratelimit_test(&rl)) {
2374 /* Yay, something is going seriously wrong, pause a little */
2375 log_warning("Looping too fast. Throttling execution a little.");
2376 sleep(1);
2377 }
2378
2379 if (manager_dispatch_load_queue(m) > 0)
2380 continue;
2381
2382 if (manager_dispatch_run_queue(m) > 0)
2383 continue;
2384
2385 if (bus_dispatch(m) > 0)
2386 continue;
2387
2388 if (manager_dispatch_cleanup_queue(m) > 0)
2389 continue;
2390
2391 if (manager_dispatch_gc_queue(m) > 0)
2392 continue;
2393
2394 if (manager_dispatch_dbus_queue(m) > 0)
2395 continue;
2396
2397 if (swap_dispatch_reload(m) > 0)
2398 continue;
2399
2400 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2401
2402 if (errno == EINTR)
2403 continue;
2404
2405 return -errno;
2406 }
2407
2408 assert(n == 1);
2409
2410 if ((r = process_event(m, &event)) < 0)
2411 return r;
2412 }
2413
2414 return m->exit_code;
2415 }
2416
2417 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2418 char *n;
2419 Unit *u;
2420
2421 assert(m);
2422 assert(s);
2423 assert(_u);
2424
2425 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2426 return -EINVAL;
2427
2428 if (!(n = bus_path_unescape(s+31)))
2429 return -ENOMEM;
2430
2431 u = manager_get_unit(m, n);
2432 free(n);
2433
2434 if (!u)
2435 return -ENOENT;
2436
2437 *_u = u;
2438
2439 return 0;
2440 }
2441
2442 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2443 Job *j;
2444 unsigned id;
2445 int r;
2446
2447 assert(m);
2448 assert(s);
2449 assert(_j);
2450
2451 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2452 return -EINVAL;
2453
2454 if ((r = safe_atou(s + 30, &id)) < 0)
2455 return r;
2456
2457 if (!(j = manager_get_job(m, id)))
2458 return -ENOENT;
2459
2460 *_j = j;
2461
2462 return 0;
2463 }
2464
2465 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2466
2467 #ifdef HAVE_AUDIT
2468 char *p;
2469
2470 if (m->audit_fd < 0)
2471 return;
2472
2473 /* Don't generate audit events if the service was already
2474 * started and we're just deserializing */
2475 if (m->n_deserializing > 0)
2476 return;
2477
2478 if (m->running_as != MANAGER_SYSTEM)
2479 return;
2480
2481 if (u->meta.type != UNIT_SERVICE)
2482 return;
2483
2484 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2485 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2486 return;
2487 }
2488
2489 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2490 log_warning("Failed to send audit message: %m");
2491
2492 if (errno == EPERM) {
2493 /* We aren't allowed to send audit messages?
2494 * Then let's not retry again, to avoid
2495 * spamming the user with the same and same
2496 * messages over and over. */
2497
2498 audit_close(m->audit_fd);
2499 m->audit_fd = -1;
2500 }
2501 }
2502
2503 free(p);
2504 #endif
2505
2506 }
2507
2508 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2509 int fd = -1;
2510 union sockaddr_union sa;
2511 int n = 0;
2512 char *message = NULL;
2513
2514 /* Don't generate plymouth events if the service was already
2515 * started and we're just deserializing */
2516 if (m->n_deserializing > 0)
2517 return;
2518
2519 if (m->running_as != MANAGER_SYSTEM)
2520 return;
2521
2522 if (u->meta.type != UNIT_SERVICE &&
2523 u->meta.type != UNIT_MOUNT &&
2524 u->meta.type != UNIT_SWAP)
2525 return;
2526
2527 /* We set SOCK_NONBLOCK here so that we rather drop the
2528 * message then wait for plymouth */
2529 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2530 log_error("socket() failed: %m");
2531 return;
2532 }
2533
2534 zero(sa);
2535 sa.sa.sa_family = AF_UNIX;
2536 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2537 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2538
2539 if (errno != EPIPE &&
2540 errno != EAGAIN &&
2541 errno != ENOENT &&
2542 errno != ECONNREFUSED &&
2543 errno != ECONNRESET &&
2544 errno != ECONNABORTED)
2545 log_error("connect() failed: %m");
2546
2547 goto finish;
2548 }
2549
2550 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2551 log_error("Out of memory");
2552 goto finish;
2553 }
2554
2555 errno = 0;
2556 if (write(fd, message, n + 1) != n + 1) {
2557
2558 if (errno != EPIPE &&
2559 errno != EAGAIN &&
2560 errno != ENOENT &&
2561 errno != ECONNREFUSED &&
2562 errno != ECONNRESET &&
2563 errno != ECONNABORTED)
2564 log_error("Failed to write Plymouth message: %m");
2565
2566 goto finish;
2567 }
2568
2569 finish:
2570 if (fd >= 0)
2571 close_nointr_nofail(fd);
2572
2573 free(message);
2574 }
2575
2576 void manager_dispatch_bus_name_owner_changed(
2577 Manager *m,
2578 const char *name,
2579 const char* old_owner,
2580 const char *new_owner) {
2581
2582 Unit *u;
2583
2584 assert(m);
2585 assert(name);
2586
2587 if (!(u = hashmap_get(m->watch_bus, name)))
2588 return;
2589
2590 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2591 }
2592
2593 void manager_dispatch_bus_query_pid_done(
2594 Manager *m,
2595 const char *name,
2596 pid_t pid) {
2597
2598 Unit *u;
2599
2600 assert(m);
2601 assert(name);
2602 assert(pid >= 1);
2603
2604 if (!(u = hashmap_get(m->watch_bus, name)))
2605 return;
2606
2607 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2608 }
2609
2610 int manager_open_serialization(Manager *m, FILE **_f) {
2611 char *path = NULL;
2612 mode_t saved_umask;
2613 int fd;
2614 FILE *f;
2615
2616 assert(_f);
2617
2618 if (m->running_as == MANAGER_SYSTEM)
2619 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2620 else
2621 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2622
2623 if (!path)
2624 return -ENOMEM;
2625
2626 saved_umask = umask(0077);
2627 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2628 umask(saved_umask);
2629
2630 if (fd < 0) {
2631 free(path);
2632 return -errno;
2633 }
2634
2635 unlink(path);
2636
2637 log_debug("Serializing state to %s", path);
2638 free(path);
2639
2640 if (!(f = fdopen(fd, "w+")))
2641 return -errno;
2642
2643 *_f = f;
2644
2645 return 0;
2646 }
2647
2648 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2649 Iterator i;
2650 Unit *u;
2651 const char *t;
2652 int r;
2653
2654 assert(m);
2655 assert(f);
2656 assert(fds);
2657
2658 m->n_serializing ++;
2659
2660 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2661 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2662
2663 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2664 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2665 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2666
2667 fputc('\n', f);
2668
2669 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2670 if (u->meta.id != t)
2671 continue;
2672
2673 if (!unit_can_serialize(u))
2674 continue;
2675
2676 /* Start marker */
2677 fputs(u->meta.id, f);
2678 fputc('\n', f);
2679
2680 if ((r = unit_serialize(u, f, fds)) < 0) {
2681 m->n_serializing --;
2682 return r;
2683 }
2684 }
2685
2686 assert(m->n_serializing > 0);
2687 m->n_serializing --;
2688
2689 if (ferror(f))
2690 return -EIO;
2691
2692 r = bus_fdset_add_all(m, fds);
2693 if (r < 0)
2694 return r;
2695
2696 return 0;
2697 }
2698
2699 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2700 int r = 0;
2701
2702 assert(m);
2703 assert(f);
2704
2705 log_debug("Deserializing state...");
2706
2707 m->n_deserializing ++;
2708
2709 for (;;) {
2710 char line[LINE_MAX], *l;
2711
2712 if (!fgets(line, sizeof(line), f)) {
2713 if (feof(f))
2714 r = 0;
2715 else
2716 r = -errno;
2717
2718 goto finish;
2719 }
2720
2721 char_array_0(line);
2722 l = strstrip(line);
2723
2724 if (l[0] == 0)
2725 break;
2726
2727 if (startswith(l, "current-job-id=")) {
2728 uint32_t id;
2729
2730 if (safe_atou32(l+15, &id) < 0)
2731 log_debug("Failed to parse current job id value %s", l+15);
2732 else
2733 m->current_job_id = MAX(m->current_job_id, id);
2734 } else if (startswith(l, "taint-usr=")) {
2735 int b;
2736
2737 if ((b = parse_boolean(l+10)) < 0)
2738 log_debug("Failed to parse taint /usr flag %s", l+10);
2739 else
2740 m->taint_usr = m->taint_usr || b;
2741 } else if (startswith(l, "initrd-timestamp="))
2742 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2743 else if (startswith(l, "startup-timestamp="))
2744 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2745 else if (startswith(l, "finish-timestamp="))
2746 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2747 else
2748 log_debug("Unknown serialization item '%s'", l);
2749 }
2750
2751 for (;;) {
2752 Unit *u;
2753 char name[UNIT_NAME_MAX+2];
2754
2755 /* Start marker */
2756 if (!fgets(name, sizeof(name), f)) {
2757 if (feof(f))
2758 r = 0;
2759 else
2760 r = -errno;
2761
2762 goto finish;
2763 }
2764
2765 char_array_0(name);
2766
2767 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2768 goto finish;
2769
2770 if ((r = unit_deserialize(u, f, fds)) < 0)
2771 goto finish;
2772 }
2773
2774 finish:
2775 if (ferror(f)) {
2776 r = -EIO;
2777 goto finish;
2778 }
2779
2780 assert(m->n_deserializing > 0);
2781 m->n_deserializing --;
2782
2783 return r;
2784 }
2785
2786 int manager_reload(Manager *m) {
2787 int r, q;
2788 FILE *f;
2789 FDSet *fds;
2790
2791 assert(m);
2792
2793 if ((r = manager_open_serialization(m, &f)) < 0)
2794 return r;
2795
2796 m->n_serializing ++;
2797
2798 if (!(fds = fdset_new())) {
2799 m->n_serializing --;
2800 r = -ENOMEM;
2801 goto finish;
2802 }
2803
2804 if ((r = manager_serialize(m, f, fds)) < 0) {
2805 m->n_serializing --;
2806 goto finish;
2807 }
2808
2809 if (fseeko(f, 0, SEEK_SET) < 0) {
2810 m->n_serializing --;
2811 r = -errno;
2812 goto finish;
2813 }
2814
2815 /* From here on there is no way back. */
2816 manager_clear_jobs_and_units(m);
2817 manager_undo_generators(m);
2818
2819 assert(m->n_serializing > 0);
2820 m->n_serializing --;
2821
2822 /* Find new unit paths */
2823 lookup_paths_free(&m->lookup_paths);
2824 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2825 r = q;
2826
2827 manager_run_generators(m);
2828
2829 manager_build_unit_path_cache(m);
2830
2831 m->n_deserializing ++;
2832
2833 /* First, enumerate what we can from all config files */
2834 if ((q = manager_enumerate(m)) < 0)
2835 r = q;
2836
2837 /* Second, deserialize our stored data */
2838 if ((q = manager_deserialize(m, f, fds)) < 0)
2839 r = q;
2840
2841 fclose(f);
2842 f = NULL;
2843
2844 /* Third, fire things up! */
2845 if ((q = manager_coldplug(m)) < 0)
2846 r = q;
2847
2848 assert(m->n_deserializing > 0);
2849 m->n_deserializing--;
2850
2851 finish:
2852 if (f)
2853 fclose(f);
2854
2855 if (fds)
2856 fdset_free(fds);
2857
2858 return r;
2859 }
2860
2861 bool manager_is_booting_or_shutting_down(Manager *m) {
2862 Unit *u;
2863
2864 assert(m);
2865
2866 /* Is the initial job still around? */
2867 if (manager_get_job(m, 1))
2868 return true;
2869
2870 /* Is there a job for the shutdown target? */
2871 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2872 return !!u->meta.job;
2873
2874 return false;
2875 }
2876
2877 void manager_reset_failed(Manager *m) {
2878 Unit *u;
2879 Iterator i;
2880
2881 assert(m);
2882
2883 HASHMAP_FOREACH(u, m->units, i)
2884 unit_reset_failed(u);
2885 }
2886
2887 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2888 Unit *u;
2889
2890 assert(m);
2891 assert(name);
2892
2893 /* Returns true if the unit is inactive or going down */
2894 if (!(u = manager_get_unit(m, name)))
2895 return true;
2896
2897 return unit_pending_inactive(u);
2898 }
2899
2900 void manager_check_finished(Manager *m) {
2901 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2902
2903 assert(m);
2904
2905 if (dual_timestamp_is_set(&m->finish_timestamp))
2906 return;
2907
2908 if (hashmap_size(m->jobs) > 0)
2909 return;
2910
2911 dual_timestamp_get(&m->finish_timestamp);
2912
2913 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2914
2915 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2916 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2917 format_timespan(kernel, sizeof(kernel),
2918 m->initrd_timestamp.monotonic),
2919 format_timespan(initrd, sizeof(initrd),
2920 m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic),
2921 format_timespan(userspace, sizeof(userspace),
2922 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
2923 format_timespan(sum, sizeof(sum),
2924 m->finish_timestamp.monotonic));
2925 } else
2926 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2927 format_timespan(kernel, sizeof(kernel),
2928 m->startup_timestamp.monotonic),
2929 format_timespan(userspace, sizeof(userspace),
2930 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
2931 format_timespan(sum, sizeof(sum),
2932 m->finish_timestamp.monotonic));
2933 } else
2934 log_debug("Startup finished in %s.",
2935 format_timespan(userspace, sizeof(userspace),
2936 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic));
2937
2938 }
2939
2940 void manager_run_generators(Manager *m) {
2941 DIR *d = NULL;
2942 const char *generator_path;
2943 const char *argv[3];
2944
2945 assert(m);
2946
2947 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2948 if (!(d = opendir(generator_path))) {
2949
2950 if (errno == ENOENT)
2951 return;
2952
2953 log_error("Failed to enumerate generator directory: %m");
2954 return;
2955 }
2956
2957 if (!m->generator_unit_path) {
2958 const char *p;
2959 char user_path[] = "/tmp/systemd-generator-XXXXXX";
2960
2961 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
2962 p = "/run/systemd/generator";
2963
2964 if (mkdir_p(p, 0755) < 0) {
2965 log_error("Failed to create generator directory: %m");
2966 goto finish;
2967 }
2968
2969 } else {
2970 if (!(p = mkdtemp(user_path))) {
2971 log_error("Failed to create generator directory: %m");
2972 goto finish;
2973 }
2974 }
2975
2976 if (!(m->generator_unit_path = strdup(p))) {
2977 log_error("Failed to allocate generator unit path.");
2978 goto finish;
2979 }
2980 }
2981
2982 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
2983 argv[1] = m->generator_unit_path;
2984 argv[2] = NULL;
2985
2986 execute_directory(generator_path, d, (char**) argv);
2987
2988 if (rmdir(m->generator_unit_path) >= 0) {
2989 /* Uh? we were able to remove this dir? I guess that
2990 * means the directory was empty, hence let's shortcut
2991 * this */
2992
2993 free(m->generator_unit_path);
2994 m->generator_unit_path = NULL;
2995 goto finish;
2996 }
2997
2998 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
2999 char **l;
3000
3001 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3002 log_error("Failed to add generator directory to unit search path: %m");
3003 goto finish;
3004 }
3005
3006 strv_free(m->lookup_paths.unit_path);
3007 m->lookup_paths.unit_path = l;
3008
3009 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3010 }
3011
3012 finish:
3013 if (d)
3014 closedir(d);
3015 }
3016
3017 void manager_undo_generators(Manager *m) {
3018 assert(m);
3019
3020 if (!m->generator_unit_path)
3021 return;
3022
3023 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3024 rm_rf(m->generator_unit_path, false, true);
3025
3026 free(m->generator_unit_path);
3027 m->generator_unit_path = NULL;
3028 }
3029
3030 int manager_set_default_controllers(Manager *m, char **controllers) {
3031 char **l;
3032
3033 assert(m);
3034
3035 if (!(l = strv_copy(controllers)))
3036 return -ENOMEM;
3037
3038 strv_free(m->default_controllers);
3039 m->default_controllers = l;
3040
3041 return 0;
3042 }
3043
3044 void manager_recheck_syslog(Manager *m) {
3045 Unit *u;
3046
3047 assert(m);
3048
3049 if (m->running_as != MANAGER_SYSTEM)
3050 return;
3051
3052 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
3053 SocketState state;
3054
3055 state = SOCKET(u)->state;
3056
3057 if (state != SOCKET_DEAD &&
3058 state != SOCKET_FAILED &&
3059 state != SOCKET_RUNNING) {
3060
3061 /* Hmm, the socket is not set up, or is still
3062 * listening, let's better not try to use
3063 * it. Note that we have no problem if the
3064 * socket is completely down, since there
3065 * might be a foreign /dev/log socket around
3066 * and we want to make use of that.
3067 */
3068
3069 log_close_syslog();
3070 return;
3071 }
3072 }
3073
3074 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3075 if (TARGET(u)->state != TARGET_ACTIVE) {
3076 log_close_syslog();
3077 return;
3078 }
3079
3080 /* Hmm, OK, so the socket is either fully up, or fully down,
3081 * and the target is up, then let's make use of the socket */
3082 log_open();
3083 }
3084
3085 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3086 [MANAGER_SYSTEM] = "system",
3087 [MANAGER_USER] = "user"
3088 };
3089
3090 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);