]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
manager: merge serialization and desrialization counter into one, and increase it...
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61 #include "sd-daemon.h"
62
63 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
64 #define GC_QUEUE_ENTRIES_MAX 16
65
66 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
67 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
68
69 /* Where clients shall send notification messages to */
70 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
71 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
72
73 static int manager_setup_notify(Manager *m) {
74 union {
75 struct sockaddr sa;
76 struct sockaddr_un un;
77 } sa;
78 struct epoll_event ev;
79 int one = 1;
80
81 assert(m);
82
83 m->notify_watch.type = WATCH_NOTIFY;
84 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
85 log_error("Failed to allocate notification socket: %m");
86 return -errno;
87 }
88
89 zero(sa);
90 sa.sa.sa_family = AF_UNIX;
91
92 if (getpid() != 1)
93 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
94 else {
95 unlink(NOTIFY_SOCKET_SYSTEM);
96 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
97 }
98
99 if (sa.un.sun_path[0] == '@')
100 sa.un.sun_path[0] = 0;
101
102 if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
103 log_error("bind() failed: %m");
104 return -errno;
105 }
106
107 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
108 log_error("SO_PASSCRED failed: %m");
109 return -errno;
110 }
111
112 zero(ev);
113 ev.events = EPOLLIN;
114 ev.data.ptr = &m->notify_watch;
115
116 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
117 return -errno;
118
119 if (sa.un.sun_path[0] == 0)
120 sa.un.sun_path[0] = '@';
121
122 if (!(m->notify_socket = strdup(sa.un.sun_path)))
123 return -ENOMEM;
124
125 log_debug("Using notification socket %s", m->notify_socket);
126
127 return 0;
128 }
129
130 static int enable_special_signals(Manager *m) {
131 int fd;
132
133 assert(m);
134
135 /* Enable that we get SIGINT on control-alt-del */
136 if (reboot(RB_DISABLE_CAD) < 0)
137 log_warning("Failed to enable ctrl-alt-del handling: %m");
138
139 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC)) < 0)
140 log_warning("Failed to open /dev/tty0: %m");
141 else {
142 /* Enable that we get SIGWINCH on kbrequest */
143 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
144 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
145
146 close_nointr_nofail(fd);
147 }
148
149 return 0;
150 }
151
152 static int manager_setup_signals(Manager *m) {
153 sigset_t mask;
154 struct epoll_event ev;
155 struct sigaction sa;
156
157 assert(m);
158
159 /* We are not interested in SIGSTOP and friends. */
160 zero(sa);
161 sa.sa_handler = SIG_DFL;
162 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
163 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
164
165 assert_se(sigemptyset(&mask) == 0);
166
167 sigset_add_many(&mask,
168 SIGCHLD, /* Child died */
169 SIGTERM, /* Reexecute daemon */
170 SIGHUP, /* Reload configuration */
171 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
172 SIGUSR2, /* systemd: dump status */
173 SIGINT, /* Kernel sends us this on control-alt-del */
174 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
175 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
176 SIGRTMIN+0, /* systemd: start default.target */
177 SIGRTMIN+1, /* systemd: isolate rescue.target */
178 SIGRTMIN+2, /* systemd: isolate emergency.target */
179 SIGRTMIN+3, /* systemd: start halt.target */
180 SIGRTMIN+4, /* systemd: start poweroff.target */
181 SIGRTMIN+5, /* systemd: start reboot.target */
182 SIGRTMIN+6, /* systemd: start kexec.target */
183 SIGRTMIN+13, /* systemd: Immediate halt */
184 SIGRTMIN+14, /* systemd: Immediate poweroff */
185 SIGRTMIN+15, /* systemd: Immediate reboot */
186 SIGRTMIN+16, /* systemd: Immediate kexec */
187 SIGRTMIN+20, /* systemd: enable status messages */
188 SIGRTMIN+21, /* systemd: disable status messages */
189 -1);
190 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
191
192 m->signal_watch.type = WATCH_SIGNAL;
193 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
194 return -errno;
195
196 zero(ev);
197 ev.events = EPOLLIN;
198 ev.data.ptr = &m->signal_watch;
199
200 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
201 return -errno;
202
203 if (m->running_as == MANAGER_SYSTEM)
204 return enable_special_signals(m);
205
206 return 0;
207 }
208
209 int manager_new(ManagerRunningAs running_as, Manager **_m) {
210 Manager *m;
211 int r = -ENOMEM;
212
213 assert(_m);
214 assert(running_as >= 0);
215 assert(running_as < _MANAGER_RUNNING_AS_MAX);
216
217 if (!(m = new0(Manager, 1)))
218 return -ENOMEM;
219
220 dual_timestamp_get(&m->startup_timestamp);
221
222 m->running_as = running_as;
223 m->name_data_slot = m->subscribed_data_slot = -1;
224 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
225 m->pin_cgroupfs_fd = -1;
226
227 #ifdef HAVE_AUDIT
228 m->audit_fd = -1;
229 #endif
230
231 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
232 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
233
234 if (!(m->environment = strv_copy(environ)))
235 goto fail;
236
237 if (!(m->default_controllers = strv_new("cpu", NULL)))
238 goto fail;
239
240 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
241 goto fail;
242
243 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
244 goto fail;
245
246 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
247 goto fail;
248
249 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
250 goto fail;
251
252 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
253 goto fail;
254
255 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
256 goto fail;
257
258 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
259 goto fail;
260
261 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
262 goto fail;
263
264 if ((r = manager_setup_signals(m)) < 0)
265 goto fail;
266
267 if ((r = manager_setup_cgroup(m)) < 0)
268 goto fail;
269
270 if ((r = manager_setup_notify(m)) < 0)
271 goto fail;
272
273 /* Try to connect to the busses, if possible. */
274 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
275 goto fail;
276
277 #ifdef HAVE_AUDIT
278 if ((m->audit_fd = audit_open()) < 0)
279 log_error("Failed to connect to audit log: %m");
280 #endif
281
282 m->taint_usr = dir_is_empty("/usr") > 0;
283
284 *_m = m;
285 return 0;
286
287 fail:
288 manager_free(m);
289 return r;
290 }
291
292 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
293 Meta *meta;
294 unsigned n = 0;
295
296 assert(m);
297
298 while ((meta = m->cleanup_queue)) {
299 assert(meta->in_cleanup_queue);
300
301 unit_free((Unit*) meta);
302 n++;
303 }
304
305 return n;
306 }
307
308 enum {
309 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
310 GC_OFFSET_UNSURE, /* No clue */
311 GC_OFFSET_GOOD, /* We still need this unit */
312 GC_OFFSET_BAD, /* We don't need this unit anymore */
313 _GC_OFFSET_MAX
314 };
315
316 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
317 Iterator i;
318 Unit *other;
319 bool is_bad;
320
321 assert(u);
322
323 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
324 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
325 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
326 return;
327
328 if (u->meta.in_cleanup_queue)
329 goto bad;
330
331 if (unit_check_gc(u))
332 goto good;
333
334 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
335
336 is_bad = true;
337
338 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
339 unit_gc_sweep(other, gc_marker);
340
341 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
342 goto good;
343
344 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
345 is_bad = false;
346 }
347
348 if (is_bad)
349 goto bad;
350
351 /* We were unable to find anything out about this entry, so
352 * let's investigate it later */
353 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
354 unit_add_to_gc_queue(u);
355 return;
356
357 bad:
358 /* We definitely know that this one is not useful anymore, so
359 * let's mark it for deletion */
360 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
361 unit_add_to_cleanup_queue(u);
362 return;
363
364 good:
365 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
366 }
367
368 static unsigned manager_dispatch_gc_queue(Manager *m) {
369 Meta *meta;
370 unsigned n = 0;
371 unsigned gc_marker;
372
373 assert(m);
374
375 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
376 (m->gc_queue_timestamp <= 0 ||
377 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
378 return 0;
379
380 log_debug("Running GC...");
381
382 m->gc_marker += _GC_OFFSET_MAX;
383 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
384 m->gc_marker = 1;
385
386 gc_marker = m->gc_marker;
387
388 while ((meta = m->gc_queue)) {
389 assert(meta->in_gc_queue);
390
391 unit_gc_sweep((Unit*) meta, gc_marker);
392
393 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
394 meta->in_gc_queue = false;
395
396 n++;
397
398 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
399 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
400 log_debug("Collecting %s", meta->id);
401 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
402 unit_add_to_cleanup_queue((Unit*) meta);
403 }
404 }
405
406 m->n_in_gc_queue = 0;
407 m->gc_queue_timestamp = 0;
408
409 return n;
410 }
411
412 static void manager_clear_jobs_and_units(Manager *m) {
413 Job *j;
414 Unit *u;
415
416 assert(m);
417
418 while ((j = hashmap_first(m->transaction_jobs)))
419 job_free(j);
420
421 while ((u = hashmap_first(m->units)))
422 unit_free(u);
423
424 manager_dispatch_cleanup_queue(m);
425
426 assert(!m->load_queue);
427 assert(!m->run_queue);
428 assert(!m->dbus_unit_queue);
429 assert(!m->dbus_job_queue);
430 assert(!m->cleanup_queue);
431 assert(!m->gc_queue);
432
433 assert(hashmap_isempty(m->transaction_jobs));
434 assert(hashmap_isempty(m->jobs));
435 assert(hashmap_isempty(m->units));
436 }
437
438 void manager_free(Manager *m) {
439 UnitType c;
440
441 assert(m);
442
443 manager_clear_jobs_and_units(m);
444
445 for (c = 0; c < _UNIT_TYPE_MAX; c++)
446 if (unit_vtable[c]->shutdown)
447 unit_vtable[c]->shutdown(m);
448
449 /* If we reexecute ourselves, we keep the root cgroup
450 * around */
451 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
452
453 manager_undo_generators(m);
454
455 bus_done(m);
456
457 hashmap_free(m->units);
458 hashmap_free(m->jobs);
459 hashmap_free(m->transaction_jobs);
460 hashmap_free(m->watch_pids);
461 hashmap_free(m->watch_bus);
462
463 if (m->epoll_fd >= 0)
464 close_nointr_nofail(m->epoll_fd);
465 if (m->signal_watch.fd >= 0)
466 close_nointr_nofail(m->signal_watch.fd);
467 if (m->notify_watch.fd >= 0)
468 close_nointr_nofail(m->notify_watch.fd);
469
470 #ifdef HAVE_AUDIT
471 if (m->audit_fd >= 0)
472 audit_close(m->audit_fd);
473 #endif
474
475 free(m->notify_socket);
476
477 lookup_paths_free(&m->lookup_paths);
478 strv_free(m->environment);
479
480 strv_free(m->default_controllers);
481
482 hashmap_free(m->cgroup_bondings);
483 set_free_free(m->unit_path_cache);
484
485 free(m);
486 }
487
488 int manager_enumerate(Manager *m) {
489 int r = 0, q;
490 UnitType c;
491
492 assert(m);
493
494 /* Let's ask every type to load all units from disk/kernel
495 * that it might know */
496 for (c = 0; c < _UNIT_TYPE_MAX; c++)
497 if (unit_vtable[c]->enumerate)
498 if ((q = unit_vtable[c]->enumerate(m)) < 0)
499 r = q;
500
501 manager_dispatch_load_queue(m);
502 return r;
503 }
504
505 int manager_coldplug(Manager *m) {
506 int r = 0, q;
507 Iterator i;
508 Unit *u;
509 char *k;
510
511 assert(m);
512
513 /* Then, let's set up their initial state. */
514 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
515
516 /* ignore aliases */
517 if (u->meta.id != k)
518 continue;
519
520 if ((q = unit_coldplug(u)) < 0)
521 r = q;
522 }
523
524 return r;
525 }
526
527 static void manager_build_unit_path_cache(Manager *m) {
528 char **i;
529 DIR *d = NULL;
530 int r;
531
532 assert(m);
533
534 set_free_free(m->unit_path_cache);
535
536 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
537 log_error("Failed to allocate unit path cache.");
538 return;
539 }
540
541 /* This simply builds a list of files we know exist, so that
542 * we don't always have to go to disk */
543
544 STRV_FOREACH(i, m->lookup_paths.unit_path) {
545 struct dirent *de;
546
547 if (!(d = opendir(*i))) {
548 log_error("Failed to open directory: %m");
549 continue;
550 }
551
552 while ((de = readdir(d))) {
553 char *p;
554
555 if (ignore_file(de->d_name))
556 continue;
557
558 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
559 r = -ENOMEM;
560 goto fail;
561 }
562
563 if ((r = set_put(m->unit_path_cache, p)) < 0) {
564 free(p);
565 goto fail;
566 }
567 }
568
569 closedir(d);
570 d = NULL;
571 }
572
573 return;
574
575 fail:
576 log_error("Failed to build unit path cache: %s", strerror(-r));
577
578 set_free_free(m->unit_path_cache);
579 m->unit_path_cache = NULL;
580
581 if (d)
582 closedir(d);
583 }
584
585 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
586 int r, q;
587
588 assert(m);
589
590 manager_run_generators(m);
591
592 manager_build_unit_path_cache(m);
593
594 /* If we will deserialize make sure that during enumeration
595 * this is already known, so we increase the counter here
596 * already */
597 if (serialization)
598 m->n_reloading ++;
599
600 /* First, enumerate what we can from all config files */
601 r = manager_enumerate(m);
602
603 /* Second, deserialize if there is something to deserialize */
604 if (serialization)
605 if ((q = manager_deserialize(m, serialization, fds)) < 0)
606 r = q;
607
608 /* Third, fire things up! */
609 if ((q = manager_coldplug(m)) < 0)
610 r = q;
611
612 if (serialization) {
613 assert(m->n_reloading > 0);
614 m->n_reloading --;
615 }
616
617 return r;
618 }
619
620 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
621 assert(m);
622 assert(j);
623
624 /* Deletes one job from the transaction */
625
626 manager_transaction_unlink_job(m, j, delete_dependencies);
627
628 if (!j->installed)
629 job_free(j);
630 }
631
632 static void transaction_delete_unit(Manager *m, Unit *u) {
633 Job *j;
634
635 /* Deletes all jobs associated with a certain unit from the
636 * transaction */
637
638 while ((j = hashmap_get(m->transaction_jobs, u)))
639 transaction_delete_job(m, j, true);
640 }
641
642 static void transaction_clean_dependencies(Manager *m) {
643 Iterator i;
644 Job *j;
645
646 assert(m);
647
648 /* Drops all dependencies of all installed jobs */
649
650 HASHMAP_FOREACH(j, m->jobs, i) {
651 while (j->subject_list)
652 job_dependency_free(j->subject_list);
653 while (j->object_list)
654 job_dependency_free(j->object_list);
655 }
656
657 assert(!m->transaction_anchor);
658 }
659
660 static void transaction_abort(Manager *m) {
661 Job *j;
662
663 assert(m);
664
665 while ((j = hashmap_first(m->transaction_jobs)))
666 if (j->installed)
667 transaction_delete_job(m, j, true);
668 else
669 job_free(j);
670
671 assert(hashmap_isempty(m->transaction_jobs));
672
673 transaction_clean_dependencies(m);
674 }
675
676 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
677 JobDependency *l;
678
679 assert(m);
680
681 /* A recursive sweep through the graph that marks all units
682 * that matter to the anchor job, i.e. are directly or
683 * indirectly a dependency of the anchor job via paths that
684 * are fully marked as mattering. */
685
686 if (j)
687 l = j->subject_list;
688 else
689 l = m->transaction_anchor;
690
691 LIST_FOREACH(subject, l, l) {
692
693 /* This link does not matter */
694 if (!l->matters)
695 continue;
696
697 /* This unit has already been marked */
698 if (l->object->generation == generation)
699 continue;
700
701 l->object->matters_to_anchor = true;
702 l->object->generation = generation;
703
704 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
705 }
706 }
707
708 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
709 JobDependency *l, *last;
710
711 assert(j);
712 assert(other);
713 assert(j->unit == other->unit);
714 assert(!j->installed);
715
716 /* Merges 'other' into 'j' and then deletes j. */
717
718 j->type = t;
719 j->state = JOB_WAITING;
720 j->override = j->override || other->override;
721
722 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
723
724 /* Patch us in as new owner of the JobDependency objects */
725 last = NULL;
726 LIST_FOREACH(subject, l, other->subject_list) {
727 assert(l->subject == other);
728 l->subject = j;
729 last = l;
730 }
731
732 /* Merge both lists */
733 if (last) {
734 last->subject_next = j->subject_list;
735 if (j->subject_list)
736 j->subject_list->subject_prev = last;
737 j->subject_list = other->subject_list;
738 }
739
740 /* Patch us in as new owner of the JobDependency objects */
741 last = NULL;
742 LIST_FOREACH(object, l, other->object_list) {
743 assert(l->object == other);
744 l->object = j;
745 last = l;
746 }
747
748 /* Merge both lists */
749 if (last) {
750 last->object_next = j->object_list;
751 if (j->object_list)
752 j->object_list->object_prev = last;
753 j->object_list = other->object_list;
754 }
755
756 /* Kill the other job */
757 other->subject_list = NULL;
758 other->object_list = NULL;
759 transaction_delete_job(m, other, true);
760 }
761 static bool job_is_conflicted_by(Job *j) {
762 JobDependency *l;
763
764 assert(j);
765
766 /* Returns true if this job is pulled in by a least one
767 * ConflictedBy dependency. */
768
769 LIST_FOREACH(object, l, j->object_list)
770 if (l->conflicts)
771 return true;
772
773 return false;
774 }
775
776 static int delete_one_unmergeable_job(Manager *m, Job *j) {
777 Job *k;
778
779 assert(j);
780
781 /* Tries to delete one item in the linked list
782 * j->transaction_next->transaction_next->... that conflicts
783 * with another one, in an attempt to make an inconsistent
784 * transaction work. */
785
786 /* We rely here on the fact that if a merged with b does not
787 * merge with c, either a or b merge with c neither */
788 LIST_FOREACH(transaction, j, j)
789 LIST_FOREACH(transaction, k, j->transaction_next) {
790 Job *d;
791
792 /* Is this one mergeable? Then skip it */
793 if (job_type_is_mergeable(j->type, k->type))
794 continue;
795
796 /* Ok, we found two that conflict, let's see if we can
797 * drop one of them */
798 if (!j->matters_to_anchor && !k->matters_to_anchor) {
799
800 /* Both jobs don't matter, so let's
801 * find the one that is smarter to
802 * remove. Let's think positive and
803 * rather remove stops then starts --
804 * except if something is being
805 * stopped because it is conflicted by
806 * another unit in which case we
807 * rather remove the start. */
808
809 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
810 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
811
812 if (j->type == JOB_STOP) {
813
814 if (job_is_conflicted_by(j))
815 d = k;
816 else
817 d = j;
818
819 } else if (k->type == JOB_STOP) {
820
821 if (job_is_conflicted_by(k))
822 d = j;
823 else
824 d = k;
825 } else
826 d = j;
827
828 } else if (!j->matters_to_anchor)
829 d = j;
830 else if (!k->matters_to_anchor)
831 d = k;
832 else
833 return -ENOEXEC;
834
835 /* Ok, we can drop one, so let's do so. */
836 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
837 transaction_delete_job(m, d, true);
838 return 0;
839 }
840
841 return -EINVAL;
842 }
843
844 static int transaction_merge_jobs(Manager *m, DBusError *e) {
845 Job *j;
846 Iterator i;
847 int r;
848
849 assert(m);
850
851 /* First step, check whether any of the jobs for one specific
852 * task conflict. If so, try to drop one of them. */
853 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
854 JobType t;
855 Job *k;
856
857 t = j->type;
858 LIST_FOREACH(transaction, k, j->transaction_next) {
859 if (job_type_merge(&t, k->type) >= 0)
860 continue;
861
862 /* OK, we could not merge all jobs for this
863 * action. Let's see if we can get rid of one
864 * of them */
865
866 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
867 /* Ok, we managed to drop one, now
868 * let's ask our callers to call us
869 * again after garbage collecting */
870 return -EAGAIN;
871
872 /* We couldn't merge anything. Failure */
873 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
874 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
875 return r;
876 }
877 }
878
879 /* Second step, merge the jobs. */
880 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
881 JobType t = j->type;
882 Job *k;
883
884 /* Merge all transactions */
885 LIST_FOREACH(transaction, k, j->transaction_next)
886 assert_se(job_type_merge(&t, k->type) == 0);
887
888 /* If an active job is mergeable, merge it too */
889 if (j->unit->meta.job)
890 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
891
892 while ((k = j->transaction_next)) {
893 if (j->installed) {
894 transaction_merge_and_delete_job(m, k, j, t);
895 j = k;
896 } else
897 transaction_merge_and_delete_job(m, j, k, t);
898 }
899
900 if (j->unit->meta.job && !j->installed)
901 transaction_merge_and_delete_job(m, j, j->unit->meta.job, t);
902
903 assert(!j->transaction_next);
904 assert(!j->transaction_prev);
905 }
906
907 return 0;
908 }
909
910 static void transaction_drop_redundant(Manager *m) {
911 bool again;
912
913 assert(m);
914
915 /* Goes through the transaction and removes all jobs that are
916 * a noop */
917
918 do {
919 Job *j;
920 Iterator i;
921
922 again = false;
923
924 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
925 bool changes_something = false;
926 Job *k;
927
928 LIST_FOREACH(transaction, k, j) {
929
930 if (!job_is_anchor(k) &&
931 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
932 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
933 continue;
934
935 changes_something = true;
936 break;
937 }
938
939 if (changes_something)
940 continue;
941
942 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
943 transaction_delete_job(m, j, false);
944 again = true;
945 break;
946 }
947
948 } while (again);
949 }
950
951 static bool unit_matters_to_anchor(Unit *u, Job *j) {
952 assert(u);
953 assert(!j->transaction_prev);
954
955 /* Checks whether at least one of the jobs for this unit
956 * matters to the anchor. */
957
958 LIST_FOREACH(transaction, j, j)
959 if (j->matters_to_anchor)
960 return true;
961
962 return false;
963 }
964
965 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
966 Iterator i;
967 Unit *u;
968 int r;
969
970 assert(m);
971 assert(j);
972 assert(!j->transaction_prev);
973
974 /* Does a recursive sweep through the ordering graph, looking
975 * for a cycle. If we find cycle we try to break it. */
976
977 /* Have we seen this before? */
978 if (j->generation == generation) {
979 Job *k, *delete;
980
981 /* If the marker is NULL we have been here already and
982 * decided the job was loop-free from here. Hence
983 * shortcut things and return right-away. */
984 if (!j->marker)
985 return 0;
986
987 /* So, the marker is not NULL and we already have been
988 * here. We have a cycle. Let's try to break it. We go
989 * backwards in our path and try to find a suitable
990 * job to remove. We use the marker to find our way
991 * back, since smart how we are we stored our way back
992 * in there. */
993 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
994
995 delete = NULL;
996 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
997
998 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
999
1000 if (!delete &&
1001 !k->installed &&
1002 !unit_matters_to_anchor(k->unit, k)) {
1003 /* Ok, we can drop this one, so let's
1004 * do so. */
1005 delete = k;
1006 }
1007
1008 /* Check if this in fact was the beginning of
1009 * the cycle */
1010 if (k == j)
1011 break;
1012 }
1013
1014
1015 if (delete) {
1016 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1017 transaction_delete_unit(m, delete->unit);
1018 return -EAGAIN;
1019 }
1020
1021 log_error("Unable to break cycle");
1022
1023 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1024 return -ENOEXEC;
1025 }
1026
1027 /* Make the marker point to where we come from, so that we can
1028 * find our way backwards if we want to break a cycle. We use
1029 * a special marker for the beginning: we point to
1030 * ourselves. */
1031 j->marker = from ? from : j;
1032 j->generation = generation;
1033
1034 /* We assume that the the dependencies are bidirectional, and
1035 * hence can ignore UNIT_AFTER */
1036 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1037 Job *o;
1038
1039 /* Is there a job for this unit? */
1040 if (!(o = hashmap_get(m->transaction_jobs, u)))
1041
1042 /* Ok, there is no job for this in the
1043 * transaction, but maybe there is already one
1044 * running? */
1045 if (!(o = u->meta.job))
1046 continue;
1047
1048 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1049 return r;
1050 }
1051
1052 /* Ok, let's backtrack, and remember that this entry is not on
1053 * our path anymore. */
1054 j->marker = NULL;
1055
1056 return 0;
1057 }
1058
1059 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1060 Job *j;
1061 int r;
1062 Iterator i;
1063 unsigned g;
1064
1065 assert(m);
1066 assert(generation);
1067
1068 /* Check if the ordering graph is cyclic. If it is, try to fix
1069 * that up by dropping one of the jobs. */
1070
1071 g = (*generation)++;
1072
1073 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1074 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1075 return r;
1076
1077 return 0;
1078 }
1079
1080 static void transaction_collect_garbage(Manager *m) {
1081 bool again;
1082
1083 assert(m);
1084
1085 /* Drop jobs that are not required by any other job */
1086
1087 do {
1088 Iterator i;
1089 Job *j;
1090
1091 again = false;
1092
1093 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1094 if (j->object_list) {
1095 /* log_debug("Keeping job %s/%s because of %s/%s", */
1096 /* j->unit->meta.id, job_type_to_string(j->type), */
1097 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1098 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1099 continue;
1100 }
1101
1102 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1103 transaction_delete_job(m, j, true);
1104 again = true;
1105 break;
1106 }
1107
1108 } while (again);
1109 }
1110
1111 static int transaction_is_destructive(Manager *m, DBusError *e) {
1112 Iterator i;
1113 Job *j;
1114
1115 assert(m);
1116
1117 /* Checks whether applying this transaction means that
1118 * existing jobs would be replaced */
1119
1120 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1121
1122 /* Assume merged */
1123 assert(!j->transaction_prev);
1124 assert(!j->transaction_next);
1125
1126 if (j->unit->meta.job &&
1127 j->unit->meta.job != j &&
1128 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1129
1130 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1131 return -EEXIST;
1132 }
1133 }
1134
1135 return 0;
1136 }
1137
1138 static void transaction_minimize_impact(Manager *m) {
1139 bool again;
1140 assert(m);
1141
1142 /* Drops all unnecessary jobs that reverse already active jobs
1143 * or that stop a running service. */
1144
1145 do {
1146 Job *j;
1147 Iterator i;
1148
1149 again = false;
1150
1151 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1152 LIST_FOREACH(transaction, j, j) {
1153 bool stops_running_service, changes_existing_job;
1154
1155 /* If it matters, we shouldn't drop it */
1156 if (j->matters_to_anchor)
1157 continue;
1158
1159 /* Would this stop a running service?
1160 * Would this change an existing job?
1161 * If so, let's drop this entry */
1162
1163 stops_running_service =
1164 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1165
1166 changes_existing_job =
1167 j->unit->meta.job &&
1168 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1169
1170 if (!stops_running_service && !changes_existing_job)
1171 continue;
1172
1173 if (stops_running_service)
1174 log_debug("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1175
1176 if (changes_existing_job)
1177 log_debug("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1178
1179 /* Ok, let's get rid of this */
1180 log_debug("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1181
1182 transaction_delete_job(m, j, true);
1183 again = true;
1184 break;
1185 }
1186
1187 if (again)
1188 break;
1189 }
1190
1191 } while (again);
1192 }
1193
1194 static int transaction_apply(Manager *m, JobMode mode) {
1195 Iterator i;
1196 Job *j;
1197 int r;
1198
1199 /* Moves the transaction jobs to the set of active jobs */
1200
1201 if (mode == JOB_ISOLATE) {
1202
1203 /* When isolating first kill all installed jobs which
1204 * aren't part of the new transaction */
1205 HASHMAP_FOREACH(j, m->jobs, i) {
1206 assert(j->installed);
1207
1208 if (hashmap_get(m->transaction_jobs, j->unit))
1209 continue;
1210
1211 job_finish_and_invalidate(j, JOB_CANCELED);
1212 }
1213 }
1214
1215 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1216 /* Assume merged */
1217 assert(!j->transaction_prev);
1218 assert(!j->transaction_next);
1219
1220 if (j->installed)
1221 continue;
1222
1223 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1224 goto rollback;
1225 }
1226
1227 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1228 if (j->installed) {
1229 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1230 continue;
1231 }
1232
1233 if (j->unit->meta.job)
1234 job_free(j->unit->meta.job);
1235
1236 j->unit->meta.job = j;
1237 j->installed = true;
1238 m->n_installed_jobs ++;
1239
1240 /* We're fully installed. Now let's free data we don't
1241 * need anymore. */
1242
1243 assert(!j->transaction_next);
1244 assert(!j->transaction_prev);
1245
1246 job_add_to_run_queue(j);
1247 job_add_to_dbus_queue(j);
1248 job_start_timer(j);
1249
1250 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1251 }
1252
1253 /* As last step, kill all remaining job dependencies. */
1254 transaction_clean_dependencies(m);
1255
1256 return 0;
1257
1258 rollback:
1259
1260 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1261 if (j->installed)
1262 continue;
1263
1264 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1265 }
1266
1267 return r;
1268 }
1269
1270 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1271 int r;
1272 unsigned generation = 1;
1273
1274 assert(m);
1275
1276 /* This applies the changes recorded in transaction_jobs to
1277 * the actual list of jobs, if possible. */
1278
1279 /* First step: figure out which jobs matter */
1280 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1281
1282 /* Second step: Try not to stop any running services if
1283 * we don't have to. Don't try to reverse running
1284 * jobs if we don't have to. */
1285 if (mode == JOB_FAIL)
1286 transaction_minimize_impact(m);
1287
1288 /* Third step: Drop redundant jobs */
1289 transaction_drop_redundant(m);
1290
1291 for (;;) {
1292 /* Fourth step: Let's remove unneeded jobs that might
1293 * be lurking. */
1294 if (mode != JOB_ISOLATE)
1295 transaction_collect_garbage(m);
1296
1297 /* Fifth step: verify order makes sense and correct
1298 * cycles if necessary and possible */
1299 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1300 break;
1301
1302 if (r != -EAGAIN) {
1303 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1304 goto rollback;
1305 }
1306
1307 /* Let's see if the resulting transaction ordering
1308 * graph is still cyclic... */
1309 }
1310
1311 for (;;) {
1312 /* Sixth step: let's drop unmergeable entries if
1313 * necessary and possible, merge entries we can
1314 * merge */
1315 if ((r = transaction_merge_jobs(m, e)) >= 0)
1316 break;
1317
1318 if (r != -EAGAIN) {
1319 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1320 goto rollback;
1321 }
1322
1323 /* Seventh step: an entry got dropped, let's garbage
1324 * collect its dependencies. */
1325 if (mode != JOB_ISOLATE)
1326 transaction_collect_garbage(m);
1327
1328 /* Let's see if the resulting transaction still has
1329 * unmergeable entries ... */
1330 }
1331
1332 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1333 transaction_drop_redundant(m);
1334
1335 /* Ninth step: check whether we can actually apply this */
1336 if (mode == JOB_FAIL)
1337 if ((r = transaction_is_destructive(m, e)) < 0) {
1338 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1339 goto rollback;
1340 }
1341
1342 /* Tenth step: apply changes */
1343 if ((r = transaction_apply(m, mode)) < 0) {
1344 log_warning("Failed to apply transaction: %s", strerror(-r));
1345 goto rollback;
1346 }
1347
1348 assert(hashmap_isempty(m->transaction_jobs));
1349 assert(!m->transaction_anchor);
1350
1351 return 0;
1352
1353 rollback:
1354 transaction_abort(m);
1355 return r;
1356 }
1357
1358 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1359 Job *j, *f;
1360
1361 assert(m);
1362 assert(unit);
1363
1364 /* Looks for an existing prospective job and returns that. If
1365 * it doesn't exist it is created and added to the prospective
1366 * jobs list. */
1367
1368 f = hashmap_get(m->transaction_jobs, unit);
1369
1370 LIST_FOREACH(transaction, j, f) {
1371 assert(j->unit == unit);
1372
1373 if (j->type == type) {
1374 if (is_new)
1375 *is_new = false;
1376 return j;
1377 }
1378 }
1379
1380 if (unit->meta.job && unit->meta.job->type == type)
1381 j = unit->meta.job;
1382 else if (!(j = job_new(m, type, unit)))
1383 return NULL;
1384
1385 j->generation = 0;
1386 j->marker = NULL;
1387 j->matters_to_anchor = false;
1388 j->override = override;
1389
1390 LIST_PREPEND(Job, transaction, f, j);
1391
1392 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1393 job_free(j);
1394 return NULL;
1395 }
1396
1397 if (is_new)
1398 *is_new = true;
1399
1400 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1401
1402 return j;
1403 }
1404
1405 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1406 assert(m);
1407 assert(j);
1408
1409 if (j->transaction_prev)
1410 j->transaction_prev->transaction_next = j->transaction_next;
1411 else if (j->transaction_next)
1412 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1413 else
1414 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1415
1416 if (j->transaction_next)
1417 j->transaction_next->transaction_prev = j->transaction_prev;
1418
1419 j->transaction_prev = j->transaction_next = NULL;
1420
1421 while (j->subject_list)
1422 job_dependency_free(j->subject_list);
1423
1424 while (j->object_list) {
1425 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1426
1427 job_dependency_free(j->object_list);
1428
1429 if (other && delete_dependencies) {
1430 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1431 other->unit->meta.id, job_type_to_string(other->type),
1432 j->unit->meta.id, job_type_to_string(j->type));
1433 transaction_delete_job(m, other, delete_dependencies);
1434 }
1435 }
1436 }
1437
1438 static int transaction_add_job_and_dependencies(
1439 Manager *m,
1440 JobType type,
1441 Unit *unit,
1442 Job *by,
1443 bool matters,
1444 bool override,
1445 bool conflicts,
1446 bool ignore_requirements,
1447 bool ignore_order,
1448 DBusError *e,
1449 Job **_ret) {
1450 Job *ret;
1451 Iterator i;
1452 Unit *dep;
1453 int r;
1454 bool is_new;
1455
1456 assert(m);
1457 assert(type < _JOB_TYPE_MAX);
1458 assert(unit);
1459
1460 /* log_debug("Pulling in %s/%s from %s/%s", */
1461 /* unit->meta.id, job_type_to_string(type), */
1462 /* by ? by->unit->meta.id : "NA", */
1463 /* by ? job_type_to_string(by->type) : "NA"); */
1464
1465 if (unit->meta.load_state != UNIT_LOADED &&
1466 unit->meta.load_state != UNIT_ERROR &&
1467 unit->meta.load_state != UNIT_MASKED) {
1468 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1469 return -EINVAL;
1470 }
1471
1472 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1473 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1474 "Unit %s failed to load: %s. "
1475 "See system logs and 'systemctl status %s' for details.",
1476 unit->meta.id,
1477 strerror(-unit->meta.load_error),
1478 unit->meta.id);
1479 return -EINVAL;
1480 }
1481
1482 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1483 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1484 return -EINVAL;
1485 }
1486
1487 if (!unit_job_is_applicable(unit, type)) {
1488 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1489 return -EBADR;
1490 }
1491
1492 /* First add the job. */
1493 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1494 return -ENOMEM;
1495
1496 ret->ignore_order = ret->ignore_order || ignore_order;
1497
1498 /* Then, add a link to the job. */
1499 if (!job_dependency_new(by, ret, matters, conflicts))
1500 return -ENOMEM;
1501
1502 if (is_new && !ignore_requirements) {
1503 Set *following;
1504
1505 /* If we are following some other unit, make sure we
1506 * add all dependencies of everybody following. */
1507 if (unit_following_set(ret->unit, &following) > 0) {
1508 SET_FOREACH(dep, following, i)
1509 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1510 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1511
1512 if (e)
1513 dbus_error_free(e);
1514 }
1515
1516 set_free(following);
1517 }
1518
1519 /* Finally, recursively add in all dependencies. */
1520 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1521 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1522 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1523 if (r != -EBADR)
1524 goto fail;
1525
1526 if (e)
1527 dbus_error_free(e);
1528 }
1529
1530 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1531 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1532
1533 if (r != -EBADR)
1534 goto fail;
1535
1536 if (e)
1537 dbus_error_free(e);
1538 }
1539
1540 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1541 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1542 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1543
1544 if (e)
1545 dbus_error_free(e);
1546 }
1547
1548 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1549 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1550 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1551
1552 if (e)
1553 dbus_error_free(e);
1554 }
1555
1556 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1557 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1558
1559 if (r != -EBADR)
1560 goto fail;
1561
1562 if (e)
1563 dbus_error_free(e);
1564 }
1565
1566 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1567 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1568 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1569
1570 if (e)
1571 dbus_error_free(e);
1572 }
1573
1574 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1575 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1576
1577 if (r != -EBADR)
1578 goto fail;
1579
1580 if (e)
1581 dbus_error_free(e);
1582 }
1583
1584 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1585 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1586 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1587
1588 if (e)
1589 dbus_error_free(e);
1590 }
1591
1592 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1593
1594 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1595 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1596
1597 if (r != -EBADR)
1598 goto fail;
1599
1600 if (e)
1601 dbus_error_free(e);
1602 }
1603
1604 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1605 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1606
1607 if (r != -EBADR)
1608 goto fail;
1609
1610 if (e)
1611 dbus_error_free(e);
1612 }
1613 }
1614
1615 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1616 }
1617
1618 if (_ret)
1619 *_ret = ret;
1620
1621 return 0;
1622
1623 fail:
1624 return r;
1625 }
1626
1627 static int transaction_add_isolate_jobs(Manager *m) {
1628 Iterator i;
1629 Unit *u;
1630 char *k;
1631 int r;
1632
1633 assert(m);
1634
1635 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1636
1637 /* ignore aliases */
1638 if (u->meta.id != k)
1639 continue;
1640
1641 if (u->meta.ignore_on_isolate)
1642 continue;
1643
1644 /* No need to stop inactive jobs */
1645 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1646 continue;
1647
1648 /* Is there already something listed for this? */
1649 if (hashmap_get(m->transaction_jobs, u))
1650 continue;
1651
1652 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1653 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1654 }
1655
1656 return 0;
1657 }
1658
1659 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1660 int r;
1661 Job *ret;
1662
1663 assert(m);
1664 assert(type < _JOB_TYPE_MAX);
1665 assert(unit);
1666 assert(mode < _JOB_MODE_MAX);
1667
1668 if (mode == JOB_ISOLATE && type != JOB_START) {
1669 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1670 return -EINVAL;
1671 }
1672
1673 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1674 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1675 return -EPERM;
1676 }
1677
1678 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1679
1680 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1681 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1682 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1683 transaction_abort(m);
1684 return r;
1685 }
1686
1687 if (mode == JOB_ISOLATE)
1688 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1689 transaction_abort(m);
1690 return r;
1691 }
1692
1693 if ((r = transaction_activate(m, mode, e)) < 0)
1694 return r;
1695
1696 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1697
1698 if (_ret)
1699 *_ret = ret;
1700
1701 return 0;
1702 }
1703
1704 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1705 Unit *unit;
1706 int r;
1707
1708 assert(m);
1709 assert(type < _JOB_TYPE_MAX);
1710 assert(name);
1711 assert(mode < _JOB_MODE_MAX);
1712
1713 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1714 return r;
1715
1716 return manager_add_job(m, type, unit, mode, override, e, _ret);
1717 }
1718
1719 Job *manager_get_job(Manager *m, uint32_t id) {
1720 assert(m);
1721
1722 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1723 }
1724
1725 Unit *manager_get_unit(Manager *m, const char *name) {
1726 assert(m);
1727 assert(name);
1728
1729 return hashmap_get(m->units, name);
1730 }
1731
1732 unsigned manager_dispatch_load_queue(Manager *m) {
1733 Meta *meta;
1734 unsigned n = 0;
1735
1736 assert(m);
1737
1738 /* Make sure we are not run recursively */
1739 if (m->dispatching_load_queue)
1740 return 0;
1741
1742 m->dispatching_load_queue = true;
1743
1744 /* Dispatches the load queue. Takes a unit from the queue and
1745 * tries to load its data until the queue is empty */
1746
1747 while ((meta = m->load_queue)) {
1748 assert(meta->in_load_queue);
1749
1750 unit_load((Unit*) meta);
1751 n++;
1752 }
1753
1754 m->dispatching_load_queue = false;
1755 return n;
1756 }
1757
1758 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1759 Unit *ret;
1760 int r;
1761
1762 assert(m);
1763 assert(name || path);
1764
1765 /* This will prepare the unit for loading, but not actually
1766 * load anything from disk. */
1767
1768 if (path && !is_path(path)) {
1769 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1770 return -EINVAL;
1771 }
1772
1773 if (!name)
1774 name = file_name_from_path(path);
1775
1776 if (!unit_name_is_valid(name, false)) {
1777 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1778 return -EINVAL;
1779 }
1780
1781 if ((ret = manager_get_unit(m, name))) {
1782 *_ret = ret;
1783 return 1;
1784 }
1785
1786 if (!(ret = unit_new(m)))
1787 return -ENOMEM;
1788
1789 if (path)
1790 if (!(ret->meta.fragment_path = strdup(path))) {
1791 unit_free(ret);
1792 return -ENOMEM;
1793 }
1794
1795 if ((r = unit_add_name(ret, name)) < 0) {
1796 unit_free(ret);
1797 return r;
1798 }
1799
1800 unit_add_to_load_queue(ret);
1801 unit_add_to_dbus_queue(ret);
1802 unit_add_to_gc_queue(ret);
1803
1804 if (_ret)
1805 *_ret = ret;
1806
1807 return 0;
1808 }
1809
1810 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1811 int r;
1812
1813 assert(m);
1814
1815 /* This will load the service information files, but not actually
1816 * start any services or anything. */
1817
1818 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1819 return r;
1820
1821 manager_dispatch_load_queue(m);
1822
1823 if (_ret)
1824 *_ret = unit_follow_merge(*_ret);
1825
1826 return 0;
1827 }
1828
1829 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1830 Iterator i;
1831 Job *j;
1832
1833 assert(s);
1834 assert(f);
1835
1836 HASHMAP_FOREACH(j, s->jobs, i)
1837 job_dump(j, f, prefix);
1838 }
1839
1840 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1841 Iterator i;
1842 Unit *u;
1843 const char *t;
1844
1845 assert(s);
1846 assert(f);
1847
1848 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1849 if (u->meta.id == t)
1850 unit_dump(u, f, prefix);
1851 }
1852
1853 void manager_clear_jobs(Manager *m) {
1854 Job *j;
1855
1856 assert(m);
1857
1858 transaction_abort(m);
1859
1860 while ((j = hashmap_first(m->jobs)))
1861 job_finish_and_invalidate(j, JOB_CANCELED);
1862 }
1863
1864 unsigned manager_dispatch_run_queue(Manager *m) {
1865 Job *j;
1866 unsigned n = 0;
1867
1868 if (m->dispatching_run_queue)
1869 return 0;
1870
1871 m->dispatching_run_queue = true;
1872
1873 while ((j = m->run_queue)) {
1874 assert(j->installed);
1875 assert(j->in_run_queue);
1876
1877 job_run_and_invalidate(j);
1878 n++;
1879 }
1880
1881 m->dispatching_run_queue = false;
1882 return n;
1883 }
1884
1885 unsigned manager_dispatch_dbus_queue(Manager *m) {
1886 Job *j;
1887 Meta *meta;
1888 unsigned n = 0;
1889
1890 assert(m);
1891
1892 if (m->dispatching_dbus_queue)
1893 return 0;
1894
1895 m->dispatching_dbus_queue = true;
1896
1897 while ((meta = m->dbus_unit_queue)) {
1898 assert(meta->in_dbus_queue);
1899
1900 bus_unit_send_change_signal((Unit*) meta);
1901 n++;
1902 }
1903
1904 while ((j = m->dbus_job_queue)) {
1905 assert(j->in_dbus_queue);
1906
1907 bus_job_send_change_signal(j);
1908 n++;
1909 }
1910
1911 m->dispatching_dbus_queue = false;
1912 return n;
1913 }
1914
1915 static int manager_process_notify_fd(Manager *m) {
1916 ssize_t n;
1917
1918 assert(m);
1919
1920 for (;;) {
1921 char buf[4096];
1922 struct msghdr msghdr;
1923 struct iovec iovec;
1924 struct ucred *ucred;
1925 union {
1926 struct cmsghdr cmsghdr;
1927 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1928 } control;
1929 Unit *u;
1930 char **tags;
1931
1932 zero(iovec);
1933 iovec.iov_base = buf;
1934 iovec.iov_len = sizeof(buf)-1;
1935
1936 zero(control);
1937 zero(msghdr);
1938 msghdr.msg_iov = &iovec;
1939 msghdr.msg_iovlen = 1;
1940 msghdr.msg_control = &control;
1941 msghdr.msg_controllen = sizeof(control);
1942
1943 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1944 if (n >= 0)
1945 return -EIO;
1946
1947 if (errno == EAGAIN || errno == EINTR)
1948 break;
1949
1950 return -errno;
1951 }
1952
1953 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1954 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1955 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1956 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1957 log_warning("Received notify message without credentials. Ignoring.");
1958 continue;
1959 }
1960
1961 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1962
1963 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1964 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1965 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1966 continue;
1967 }
1968
1969 assert((size_t) n < sizeof(buf));
1970 buf[n] = 0;
1971 if (!(tags = strv_split(buf, "\n\r")))
1972 return -ENOMEM;
1973
1974 log_debug("Got notification message for unit %s", u->meta.id);
1975
1976 if (UNIT_VTABLE(u)->notify_message)
1977 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1978
1979 strv_free(tags);
1980 }
1981
1982 return 0;
1983 }
1984
1985 static int manager_dispatch_sigchld(Manager *m) {
1986 assert(m);
1987
1988 for (;;) {
1989 siginfo_t si;
1990 Unit *u;
1991 int r;
1992
1993 zero(si);
1994
1995 /* First we call waitd() for a PID and do not reap the
1996 * zombie. That way we can still access /proc/$PID for
1997 * it while it is a zombie. */
1998 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1999
2000 if (errno == ECHILD)
2001 break;
2002
2003 if (errno == EINTR)
2004 continue;
2005
2006 return -errno;
2007 }
2008
2009 if (si.si_pid <= 0)
2010 break;
2011
2012 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2013 char *name = NULL;
2014
2015 get_process_name(si.si_pid, &name);
2016 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2017 free(name);
2018 }
2019
2020 /* Let's flush any message the dying child might still
2021 * have queued for us. This ensures that the process
2022 * still exists in /proc so that we can figure out
2023 * which cgroup and hence unit it belongs to. */
2024 if ((r = manager_process_notify_fd(m)) < 0)
2025 return r;
2026
2027 /* And now figure out the unit this belongs to */
2028 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2029 u = cgroup_unit_by_pid(m, si.si_pid);
2030
2031 /* And now, we actually reap the zombie. */
2032 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2033 if (errno == EINTR)
2034 continue;
2035
2036 return -errno;
2037 }
2038
2039 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2040 continue;
2041
2042 log_debug("Child %lu died (code=%s, status=%i/%s)",
2043 (long unsigned) si.si_pid,
2044 sigchld_code_to_string(si.si_code),
2045 si.si_status,
2046 strna(si.si_code == CLD_EXITED
2047 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2048 : signal_to_string(si.si_status)));
2049
2050 if (!u)
2051 continue;
2052
2053 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2054
2055 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2056 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2057 }
2058
2059 return 0;
2060 }
2061
2062 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2063 int r;
2064 DBusError error;
2065
2066 dbus_error_init(&error);
2067
2068 log_debug("Activating special unit %s", name);
2069
2070 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2071 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2072
2073 dbus_error_free(&error);
2074
2075 return r;
2076 }
2077
2078 static int manager_process_signal_fd(Manager *m) {
2079 ssize_t n;
2080 struct signalfd_siginfo sfsi;
2081 bool sigchld = false;
2082
2083 assert(m);
2084
2085 for (;;) {
2086 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2087
2088 if (n >= 0)
2089 return -EIO;
2090
2091 if (errno == EINTR || errno == EAGAIN)
2092 break;
2093
2094 return -errno;
2095 }
2096
2097 if (sfsi.ssi_pid > 0) {
2098 char *p = NULL;
2099
2100 get_process_name(sfsi.ssi_pid, &p);
2101
2102 log_debug("Received SIG%s from PID %lu (%s).",
2103 strna(signal_to_string(sfsi.ssi_signo)),
2104 (unsigned long) sfsi.ssi_pid, strna(p));
2105 free(p);
2106 } else
2107 log_debug("Received SIG%s.", strna(signal_to_string(sfsi.ssi_signo)));
2108
2109 switch (sfsi.ssi_signo) {
2110
2111 case SIGCHLD:
2112 sigchld = true;
2113 break;
2114
2115 case SIGTERM:
2116 if (m->running_as == MANAGER_SYSTEM) {
2117 /* This is for compatibility with the
2118 * original sysvinit */
2119 m->exit_code = MANAGER_REEXECUTE;
2120 break;
2121 }
2122
2123 /* Fall through */
2124
2125 case SIGINT:
2126 if (m->running_as == MANAGER_SYSTEM) {
2127 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2128 break;
2129 }
2130
2131 /* Run the exit target if there is one, if not, just exit. */
2132 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2133 m->exit_code = MANAGER_EXIT;
2134 return 0;
2135 }
2136
2137 break;
2138
2139 case SIGWINCH:
2140 if (m->running_as == MANAGER_SYSTEM)
2141 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2142
2143 /* This is a nop on non-init */
2144 break;
2145
2146 case SIGPWR:
2147 if (m->running_as == MANAGER_SYSTEM)
2148 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2149
2150 /* This is a nop on non-init */
2151 break;
2152
2153 case SIGUSR1: {
2154 Unit *u;
2155
2156 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2157
2158 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2159 log_info("Trying to reconnect to bus...");
2160 bus_init(m, true);
2161 }
2162
2163 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2164 log_info("Loading D-Bus service...");
2165 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2166 }
2167
2168 break;
2169 }
2170
2171 case SIGUSR2: {
2172 FILE *f;
2173 char *dump = NULL;
2174 size_t size;
2175
2176 if (!(f = open_memstream(&dump, &size))) {
2177 log_warning("Failed to allocate memory stream.");
2178 break;
2179 }
2180
2181 manager_dump_units(m, f, "\t");
2182 manager_dump_jobs(m, f, "\t");
2183
2184 if (ferror(f)) {
2185 fclose(f);
2186 free(dump);
2187 log_warning("Failed to write status stream");
2188 break;
2189 }
2190
2191 fclose(f);
2192 log_dump(LOG_INFO, dump);
2193 free(dump);
2194
2195 break;
2196 }
2197
2198 case SIGHUP:
2199 m->exit_code = MANAGER_RELOAD;
2200 break;
2201
2202 default: {
2203 /* Starting SIGRTMIN+0 */
2204 static const char * const target_table[] = {
2205 [0] = SPECIAL_DEFAULT_TARGET,
2206 [1] = SPECIAL_RESCUE_TARGET,
2207 [2] = SPECIAL_EMERGENCY_TARGET,
2208 [3] = SPECIAL_HALT_TARGET,
2209 [4] = SPECIAL_POWEROFF_TARGET,
2210 [5] = SPECIAL_REBOOT_TARGET,
2211 [6] = SPECIAL_KEXEC_TARGET
2212 };
2213
2214 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2215 static const ManagerExitCode code_table[] = {
2216 [0] = MANAGER_HALT,
2217 [1] = MANAGER_POWEROFF,
2218 [2] = MANAGER_REBOOT,
2219 [3] = MANAGER_KEXEC
2220 };
2221
2222 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2223 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2224 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2225 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2226 break;
2227 }
2228
2229 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2230 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2231 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2232 break;
2233 }
2234
2235 switch (sfsi.ssi_signo - SIGRTMIN) {
2236
2237 case 20:
2238 log_debug("Enabling showing of status.");
2239 m->show_status = true;
2240 break;
2241
2242 case 21:
2243 log_debug("Disabling showing of status.");
2244 m->show_status = false;
2245 break;
2246
2247 default:
2248 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2249 }
2250 }
2251 }
2252 }
2253
2254 if (sigchld)
2255 return manager_dispatch_sigchld(m);
2256
2257 return 0;
2258 }
2259
2260 static int process_event(Manager *m, struct epoll_event *ev) {
2261 int r;
2262 Watch *w;
2263
2264 assert(m);
2265 assert(ev);
2266
2267 assert_se(w = ev->data.ptr);
2268
2269 if (w->type == WATCH_INVALID)
2270 return 0;
2271
2272 switch (w->type) {
2273
2274 case WATCH_SIGNAL:
2275
2276 /* An incoming signal? */
2277 if (ev->events != EPOLLIN)
2278 return -EINVAL;
2279
2280 if ((r = manager_process_signal_fd(m)) < 0)
2281 return r;
2282
2283 break;
2284
2285 case WATCH_NOTIFY:
2286
2287 /* An incoming daemon notification event? */
2288 if (ev->events != EPOLLIN)
2289 return -EINVAL;
2290
2291 if ((r = manager_process_notify_fd(m)) < 0)
2292 return r;
2293
2294 break;
2295
2296 case WATCH_FD:
2297
2298 /* Some fd event, to be dispatched to the units */
2299 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2300 break;
2301
2302 case WATCH_UNIT_TIMER:
2303 case WATCH_JOB_TIMER: {
2304 uint64_t v;
2305 ssize_t k;
2306
2307 /* Some timer event, to be dispatched to the units */
2308 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2309
2310 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2311 break;
2312
2313 return k < 0 ? -errno : -EIO;
2314 }
2315
2316 if (w->type == WATCH_UNIT_TIMER)
2317 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2318 else
2319 job_timer_event(w->data.job, v, w);
2320 break;
2321 }
2322
2323 case WATCH_MOUNT:
2324 /* Some mount table change, intended for the mount subsystem */
2325 mount_fd_event(m, ev->events);
2326 break;
2327
2328 case WATCH_SWAP:
2329 /* Some swap table change, intended for the swap subsystem */
2330 swap_fd_event(m, ev->events);
2331 break;
2332
2333 case WATCH_UDEV:
2334 /* Some notification from udev, intended for the device subsystem */
2335 device_fd_event(m, ev->events);
2336 break;
2337
2338 case WATCH_DBUS_WATCH:
2339 bus_watch_event(m, w, ev->events);
2340 break;
2341
2342 case WATCH_DBUS_TIMEOUT:
2343 bus_timeout_event(m, w, ev->events);
2344 break;
2345
2346 default:
2347 log_error("event type=%i", w->type);
2348 assert_not_reached("Unknown epoll event type.");
2349 }
2350
2351 return 0;
2352 }
2353
2354 int manager_loop(Manager *m) {
2355 int r;
2356
2357 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2358
2359 assert(m);
2360 m->exit_code = MANAGER_RUNNING;
2361
2362 /* Release the path cache */
2363 set_free_free(m->unit_path_cache);
2364 m->unit_path_cache = NULL;
2365
2366 manager_check_finished(m);
2367
2368 /* There might still be some zombies hanging around from
2369 * before we were exec()'ed. Leat's reap them */
2370 if ((r = manager_dispatch_sigchld(m)) < 0)
2371 return r;
2372
2373 while (m->exit_code == MANAGER_RUNNING) {
2374 struct epoll_event event;
2375 int n;
2376
2377 if (!ratelimit_test(&rl)) {
2378 /* Yay, something is going seriously wrong, pause a little */
2379 log_warning("Looping too fast. Throttling execution a little.");
2380 sleep(1);
2381 }
2382
2383 if (manager_dispatch_load_queue(m) > 0)
2384 continue;
2385
2386 if (manager_dispatch_run_queue(m) > 0)
2387 continue;
2388
2389 if (bus_dispatch(m) > 0)
2390 continue;
2391
2392 if (manager_dispatch_cleanup_queue(m) > 0)
2393 continue;
2394
2395 if (manager_dispatch_gc_queue(m) > 0)
2396 continue;
2397
2398 if (manager_dispatch_dbus_queue(m) > 0)
2399 continue;
2400
2401 if (swap_dispatch_reload(m) > 0)
2402 continue;
2403
2404 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2405
2406 if (errno == EINTR)
2407 continue;
2408
2409 return -errno;
2410 }
2411
2412 assert(n == 1);
2413
2414 if ((r = process_event(m, &event)) < 0)
2415 return r;
2416 }
2417
2418 return m->exit_code;
2419 }
2420
2421 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2422 char *n;
2423 Unit *u;
2424
2425 assert(m);
2426 assert(s);
2427 assert(_u);
2428
2429 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2430 return -EINVAL;
2431
2432 if (!(n = bus_path_unescape(s+31)))
2433 return -ENOMEM;
2434
2435 u = manager_get_unit(m, n);
2436 free(n);
2437
2438 if (!u)
2439 return -ENOENT;
2440
2441 *_u = u;
2442
2443 return 0;
2444 }
2445
2446 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2447 Job *j;
2448 unsigned id;
2449 int r;
2450
2451 assert(m);
2452 assert(s);
2453 assert(_j);
2454
2455 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2456 return -EINVAL;
2457
2458 if ((r = safe_atou(s + 30, &id)) < 0)
2459 return r;
2460
2461 if (!(j = manager_get_job(m, id)))
2462 return -ENOENT;
2463
2464 *_j = j;
2465
2466 return 0;
2467 }
2468
2469 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2470
2471 #ifdef HAVE_AUDIT
2472 char *p;
2473
2474 if (m->audit_fd < 0)
2475 return;
2476
2477 /* Don't generate audit events if the service was already
2478 * started and we're just deserializing */
2479 if (m->n_reloading > 0)
2480 return;
2481
2482 if (m->running_as != MANAGER_SYSTEM)
2483 return;
2484
2485 if (u->meta.type != UNIT_SERVICE)
2486 return;
2487
2488 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2489 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2490 return;
2491 }
2492
2493 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2494 log_warning("Failed to send audit message: %m");
2495
2496 if (errno == EPERM) {
2497 /* We aren't allowed to send audit messages?
2498 * Then let's not retry again, to avoid
2499 * spamming the user with the same and same
2500 * messages over and over. */
2501
2502 audit_close(m->audit_fd);
2503 m->audit_fd = -1;
2504 }
2505 }
2506
2507 free(p);
2508 #endif
2509
2510 }
2511
2512 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2513 int fd = -1;
2514 union sockaddr_union sa;
2515 int n = 0;
2516 char *message = NULL;
2517
2518 /* Don't generate plymouth events if the service was already
2519 * started and we're just deserializing */
2520 if (m->n_reloading > 0)
2521 return;
2522
2523 if (m->running_as != MANAGER_SYSTEM)
2524 return;
2525
2526 if (u->meta.type != UNIT_SERVICE &&
2527 u->meta.type != UNIT_MOUNT &&
2528 u->meta.type != UNIT_SWAP)
2529 return;
2530
2531 /* We set SOCK_NONBLOCK here so that we rather drop the
2532 * message then wait for plymouth */
2533 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2534 log_error("socket() failed: %m");
2535 return;
2536 }
2537
2538 zero(sa);
2539 sa.sa.sa_family = AF_UNIX;
2540 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2541 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2542
2543 if (errno != EPIPE &&
2544 errno != EAGAIN &&
2545 errno != ENOENT &&
2546 errno != ECONNREFUSED &&
2547 errno != ECONNRESET &&
2548 errno != ECONNABORTED)
2549 log_error("connect() failed: %m");
2550
2551 goto finish;
2552 }
2553
2554 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2555 log_error("Out of memory");
2556 goto finish;
2557 }
2558
2559 errno = 0;
2560 if (write(fd, message, n + 1) != n + 1) {
2561
2562 if (errno != EPIPE &&
2563 errno != EAGAIN &&
2564 errno != ENOENT &&
2565 errno != ECONNREFUSED &&
2566 errno != ECONNRESET &&
2567 errno != ECONNABORTED)
2568 log_error("Failed to write Plymouth message: %m");
2569
2570 goto finish;
2571 }
2572
2573 finish:
2574 if (fd >= 0)
2575 close_nointr_nofail(fd);
2576
2577 free(message);
2578 }
2579
2580 void manager_dispatch_bus_name_owner_changed(
2581 Manager *m,
2582 const char *name,
2583 const char* old_owner,
2584 const char *new_owner) {
2585
2586 Unit *u;
2587
2588 assert(m);
2589 assert(name);
2590
2591 if (!(u = hashmap_get(m->watch_bus, name)))
2592 return;
2593
2594 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2595 }
2596
2597 void manager_dispatch_bus_query_pid_done(
2598 Manager *m,
2599 const char *name,
2600 pid_t pid) {
2601
2602 Unit *u;
2603
2604 assert(m);
2605 assert(name);
2606 assert(pid >= 1);
2607
2608 if (!(u = hashmap_get(m->watch_bus, name)))
2609 return;
2610
2611 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2612 }
2613
2614 int manager_open_serialization(Manager *m, FILE **_f) {
2615 char *path = NULL;
2616 mode_t saved_umask;
2617 int fd;
2618 FILE *f;
2619
2620 assert(_f);
2621
2622 if (m->running_as == MANAGER_SYSTEM)
2623 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2624 else
2625 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2626
2627 if (!path)
2628 return -ENOMEM;
2629
2630 saved_umask = umask(0077);
2631 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2632 umask(saved_umask);
2633
2634 if (fd < 0) {
2635 free(path);
2636 return -errno;
2637 }
2638
2639 unlink(path);
2640
2641 log_debug("Serializing state to %s", path);
2642 free(path);
2643
2644 if (!(f = fdopen(fd, "w+")))
2645 return -errno;
2646
2647 *_f = f;
2648
2649 return 0;
2650 }
2651
2652 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2653 Iterator i;
2654 Unit *u;
2655 const char *t;
2656 int r;
2657
2658 assert(m);
2659 assert(f);
2660 assert(fds);
2661
2662 m->n_reloading ++;
2663
2664 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2665 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2666
2667 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2668 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2669 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2670
2671 fputc('\n', f);
2672
2673 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2674 if (u->meta.id != t)
2675 continue;
2676
2677 if (!unit_can_serialize(u))
2678 continue;
2679
2680 /* Start marker */
2681 fputs(u->meta.id, f);
2682 fputc('\n', f);
2683
2684 if ((r = unit_serialize(u, f, fds)) < 0) {
2685 m->n_reloading --;
2686 return r;
2687 }
2688 }
2689
2690 assert(m->n_reloading > 0);
2691 m->n_reloading --;
2692
2693 if (ferror(f))
2694 return -EIO;
2695
2696 r = bus_fdset_add_all(m, fds);
2697 if (r < 0)
2698 return r;
2699
2700 return 0;
2701 }
2702
2703 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2704 int r = 0;
2705
2706 assert(m);
2707 assert(f);
2708
2709 log_debug("Deserializing state...");
2710
2711 m->n_reloading ++;
2712
2713 for (;;) {
2714 char line[LINE_MAX], *l;
2715
2716 if (!fgets(line, sizeof(line), f)) {
2717 if (feof(f))
2718 r = 0;
2719 else
2720 r = -errno;
2721
2722 goto finish;
2723 }
2724
2725 char_array_0(line);
2726 l = strstrip(line);
2727
2728 if (l[0] == 0)
2729 break;
2730
2731 if (startswith(l, "current-job-id=")) {
2732 uint32_t id;
2733
2734 if (safe_atou32(l+15, &id) < 0)
2735 log_debug("Failed to parse current job id value %s", l+15);
2736 else
2737 m->current_job_id = MAX(m->current_job_id, id);
2738 } else if (startswith(l, "taint-usr=")) {
2739 int b;
2740
2741 if ((b = parse_boolean(l+10)) < 0)
2742 log_debug("Failed to parse taint /usr flag %s", l+10);
2743 else
2744 m->taint_usr = m->taint_usr || b;
2745 } else if (startswith(l, "initrd-timestamp="))
2746 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2747 else if (startswith(l, "startup-timestamp="))
2748 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2749 else if (startswith(l, "finish-timestamp="))
2750 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2751 else
2752 log_debug("Unknown serialization item '%s'", l);
2753 }
2754
2755 for (;;) {
2756 Unit *u;
2757 char name[UNIT_NAME_MAX+2];
2758
2759 /* Start marker */
2760 if (!fgets(name, sizeof(name), f)) {
2761 if (feof(f))
2762 r = 0;
2763 else
2764 r = -errno;
2765
2766 goto finish;
2767 }
2768
2769 char_array_0(name);
2770
2771 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2772 goto finish;
2773
2774 if ((r = unit_deserialize(u, f, fds)) < 0)
2775 goto finish;
2776 }
2777
2778 finish:
2779 if (ferror(f)) {
2780 r = -EIO;
2781 goto finish;
2782 }
2783
2784 assert(m->n_reloading > 0);
2785 m->n_reloading --;
2786
2787 return r;
2788 }
2789
2790 int manager_reload(Manager *m) {
2791 int r, q;
2792 FILE *f;
2793 FDSet *fds;
2794
2795 assert(m);
2796
2797 if ((r = manager_open_serialization(m, &f)) < 0)
2798 return r;
2799
2800 m->n_reloading ++;
2801
2802 if (!(fds = fdset_new())) {
2803 m->n_reloading --;
2804 r = -ENOMEM;
2805 goto finish;
2806 }
2807
2808 if ((r = manager_serialize(m, f, fds)) < 0) {
2809 m->n_reloading --;
2810 goto finish;
2811 }
2812
2813 if (fseeko(f, 0, SEEK_SET) < 0) {
2814 m->n_reloading --;
2815 r = -errno;
2816 goto finish;
2817 }
2818
2819 /* From here on there is no way back. */
2820 manager_clear_jobs_and_units(m);
2821 manager_undo_generators(m);
2822
2823 /* Find new unit paths */
2824 lookup_paths_free(&m->lookup_paths);
2825 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2826 r = q;
2827
2828 manager_run_generators(m);
2829
2830 manager_build_unit_path_cache(m);
2831
2832 /* First, enumerate what we can from all config files */
2833 if ((q = manager_enumerate(m)) < 0)
2834 r = q;
2835
2836 /* Second, deserialize our stored data */
2837 if ((q = manager_deserialize(m, f, fds)) < 0)
2838 r = q;
2839
2840 fclose(f);
2841 f = NULL;
2842
2843 /* Third, fire things up! */
2844 if ((q = manager_coldplug(m)) < 0)
2845 r = q;
2846
2847 assert(m->n_reloading > 0);
2848 m->n_reloading--;
2849
2850 finish:
2851 if (f)
2852 fclose(f);
2853
2854 if (fds)
2855 fdset_free(fds);
2856
2857 return r;
2858 }
2859
2860 bool manager_is_booting_or_shutting_down(Manager *m) {
2861 Unit *u;
2862
2863 assert(m);
2864
2865 /* Is the initial job still around? */
2866 if (manager_get_job(m, 1))
2867 return true;
2868
2869 /* Is there a job for the shutdown target? */
2870 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2871 return !!u->meta.job;
2872
2873 return false;
2874 }
2875
2876 void manager_reset_failed(Manager *m) {
2877 Unit *u;
2878 Iterator i;
2879
2880 assert(m);
2881
2882 HASHMAP_FOREACH(u, m->units, i)
2883 unit_reset_failed(u);
2884 }
2885
2886 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2887 Unit *u;
2888
2889 assert(m);
2890 assert(name);
2891
2892 /* Returns true if the unit is inactive or going down */
2893 if (!(u = manager_get_unit(m, name)))
2894 return true;
2895
2896 return unit_pending_inactive(u);
2897 }
2898
2899 void manager_check_finished(Manager *m) {
2900 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2901 usec_t kernel_usec = 0, initrd_usec = 0, userspace_usec = 0, total_usec = 0;
2902
2903 assert(m);
2904
2905 if (dual_timestamp_is_set(&m->finish_timestamp))
2906 return;
2907
2908 if (hashmap_size(m->jobs) > 0)
2909 return;
2910
2911 dual_timestamp_get(&m->finish_timestamp);
2912
2913 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2914
2915 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2916 total_usec = m->finish_timestamp.monotonic;
2917
2918 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2919
2920 kernel_usec = m->initrd_timestamp.monotonic;
2921 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
2922
2923 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2924 format_timespan(kernel, sizeof(kernel), kernel_usec),
2925 format_timespan(initrd, sizeof(initrd), initrd_usec),
2926 format_timespan(userspace, sizeof(userspace), userspace_usec),
2927 format_timespan(sum, sizeof(sum), total_usec));
2928 } else {
2929 kernel_usec = m->startup_timestamp.monotonic;
2930 initrd_usec = 0;
2931
2932 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2933 format_timespan(kernel, sizeof(kernel), kernel_usec),
2934 format_timespan(userspace, sizeof(userspace), userspace_usec),
2935 format_timespan(sum, sizeof(sum), total_usec));
2936 }
2937 } else {
2938 userspace_usec = initrd_usec = kernel_usec = 0;
2939 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2940
2941 log_debug("Startup finished in %s.",
2942 format_timespan(sum, sizeof(sum), total_usec));
2943 }
2944
2945 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
2946
2947 sd_notifyf(false,
2948 "READY=1\nSTATUS=Startup finished in %s.",
2949 format_timespan(sum, sizeof(sum), total_usec));
2950 }
2951
2952 void manager_run_generators(Manager *m) {
2953 DIR *d = NULL;
2954 const char *generator_path;
2955 const char *argv[3];
2956
2957 assert(m);
2958
2959 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2960 if (!(d = opendir(generator_path))) {
2961
2962 if (errno == ENOENT)
2963 return;
2964
2965 log_error("Failed to enumerate generator directory: %m");
2966 return;
2967 }
2968
2969 if (!m->generator_unit_path) {
2970 const char *p;
2971 char user_path[] = "/tmp/systemd-generator-XXXXXX";
2972
2973 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
2974 p = "/run/systemd/generator";
2975
2976 if (mkdir_p(p, 0755) < 0) {
2977 log_error("Failed to create generator directory: %m");
2978 goto finish;
2979 }
2980
2981 } else {
2982 if (!(p = mkdtemp(user_path))) {
2983 log_error("Failed to create generator directory: %m");
2984 goto finish;
2985 }
2986 }
2987
2988 if (!(m->generator_unit_path = strdup(p))) {
2989 log_error("Failed to allocate generator unit path.");
2990 goto finish;
2991 }
2992 }
2993
2994 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
2995 argv[1] = m->generator_unit_path;
2996 argv[2] = NULL;
2997
2998 execute_directory(generator_path, d, (char**) argv);
2999
3000 if (rmdir(m->generator_unit_path) >= 0) {
3001 /* Uh? we were able to remove this dir? I guess that
3002 * means the directory was empty, hence let's shortcut
3003 * this */
3004
3005 free(m->generator_unit_path);
3006 m->generator_unit_path = NULL;
3007 goto finish;
3008 }
3009
3010 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3011 char **l;
3012
3013 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3014 log_error("Failed to add generator directory to unit search path: %m");
3015 goto finish;
3016 }
3017
3018 strv_free(m->lookup_paths.unit_path);
3019 m->lookup_paths.unit_path = l;
3020
3021 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3022 }
3023
3024 finish:
3025 if (d)
3026 closedir(d);
3027 }
3028
3029 void manager_undo_generators(Manager *m) {
3030 assert(m);
3031
3032 if (!m->generator_unit_path)
3033 return;
3034
3035 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3036 rm_rf(m->generator_unit_path, false, true);
3037
3038 free(m->generator_unit_path);
3039 m->generator_unit_path = NULL;
3040 }
3041
3042 int manager_set_default_controllers(Manager *m, char **controllers) {
3043 char **l;
3044
3045 assert(m);
3046
3047 if (!(l = strv_copy(controllers)))
3048 return -ENOMEM;
3049
3050 strv_free(m->default_controllers);
3051 m->default_controllers = l;
3052
3053 return 0;
3054 }
3055
3056 void manager_recheck_syslog(Manager *m) {
3057 Unit *u;
3058
3059 assert(m);
3060
3061 if (m->running_as != MANAGER_SYSTEM)
3062 return;
3063
3064 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
3065 SocketState state;
3066
3067 state = SOCKET(u)->state;
3068
3069 if (state != SOCKET_DEAD &&
3070 state != SOCKET_FAILED &&
3071 state != SOCKET_RUNNING) {
3072
3073 /* Hmm, the socket is not set up, or is still
3074 * listening, let's better not try to use
3075 * it. Note that we have no problem if the
3076 * socket is completely down, since there
3077 * might be a foreign /dev/log socket around
3078 * and we want to make use of that.
3079 */
3080
3081 log_close_syslog();
3082 return;
3083 }
3084 }
3085
3086 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3087 if (TARGET(u)->state != TARGET_ACTIVE) {
3088 log_close_syslog();
3089 return;
3090 }
3091
3092 /* Hmm, OK, so the socket is either fully up, or fully down,
3093 * and the target is up, then let's make use of the socket */
3094 log_open();
3095 }
3096
3097 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3098 [MANAGER_SYSTEM] = "system",
3099 [MANAGER_USER] = "user"
3100 };
3101
3102 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);