]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
umask: set umask of a number of sockets by default, even though we check creds on...
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61 #include "sd-daemon.h"
62
63 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
64 #define GC_QUEUE_ENTRIES_MAX 16
65
66 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
67 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
68
69 /* Where clients shall send notification messages to */
70 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
71 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
72
73 static int manager_setup_notify(Manager *m) {
74 union {
75 struct sockaddr sa;
76 struct sockaddr_un un;
77 } sa;
78 struct epoll_event ev;
79 int one = 1, r;
80 mode_t u;
81
82 assert(m);
83
84 m->notify_watch.type = WATCH_NOTIFY;
85 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
86 log_error("Failed to allocate notification socket: %m");
87 return -errno;
88 }
89
90 zero(sa);
91 sa.sa.sa_family = AF_UNIX;
92
93 if (getpid() != 1)
94 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
95 else {
96 unlink(NOTIFY_SOCKET_SYSTEM);
97 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
98 }
99
100 if (sa.un.sun_path[0] == '@')
101 sa.un.sun_path[0] = 0;
102
103 u = umask(0111);
104 r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1));
105 umask(u);
106
107 if (r < 0) {
108 log_error("bind() failed: %m");
109 return -errno;
110 }
111
112 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
113 log_error("SO_PASSCRED failed: %m");
114 return -errno;
115 }
116
117 zero(ev);
118 ev.events = EPOLLIN;
119 ev.data.ptr = &m->notify_watch;
120
121 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
122 return -errno;
123
124 if (sa.un.sun_path[0] == 0)
125 sa.un.sun_path[0] = '@';
126
127 if (!(m->notify_socket = strdup(sa.un.sun_path)))
128 return -ENOMEM;
129
130 log_debug("Using notification socket %s", m->notify_socket);
131
132 return 0;
133 }
134
135 static int enable_special_signals(Manager *m) {
136 int fd;
137
138 assert(m);
139
140 /* Enable that we get SIGINT on control-alt-del */
141 if (reboot(RB_DISABLE_CAD) < 0)
142 log_warning("Failed to enable ctrl-alt-del handling: %m");
143
144 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC)) < 0)
145 log_warning("Failed to open /dev/tty0: %m");
146 else {
147 /* Enable that we get SIGWINCH on kbrequest */
148 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
149 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
150
151 close_nointr_nofail(fd);
152 }
153
154 return 0;
155 }
156
157 static int manager_setup_signals(Manager *m) {
158 sigset_t mask;
159 struct epoll_event ev;
160 struct sigaction sa;
161
162 assert(m);
163
164 /* We are not interested in SIGSTOP and friends. */
165 zero(sa);
166 sa.sa_handler = SIG_DFL;
167 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
168 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
169
170 assert_se(sigemptyset(&mask) == 0);
171
172 sigset_add_many(&mask,
173 SIGCHLD, /* Child died */
174 SIGTERM, /* Reexecute daemon */
175 SIGHUP, /* Reload configuration */
176 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
177 SIGUSR2, /* systemd: dump status */
178 SIGINT, /* Kernel sends us this on control-alt-del */
179 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
180 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
181 SIGRTMIN+0, /* systemd: start default.target */
182 SIGRTMIN+1, /* systemd: isolate rescue.target */
183 SIGRTMIN+2, /* systemd: isolate emergency.target */
184 SIGRTMIN+3, /* systemd: start halt.target */
185 SIGRTMIN+4, /* systemd: start poweroff.target */
186 SIGRTMIN+5, /* systemd: start reboot.target */
187 SIGRTMIN+6, /* systemd: start kexec.target */
188 SIGRTMIN+13, /* systemd: Immediate halt */
189 SIGRTMIN+14, /* systemd: Immediate poweroff */
190 SIGRTMIN+15, /* systemd: Immediate reboot */
191 SIGRTMIN+16, /* systemd: Immediate kexec */
192 SIGRTMIN+20, /* systemd: enable status messages */
193 SIGRTMIN+21, /* systemd: disable status messages */
194 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
195 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
196 SIGRTMIN+27, /* systemd: set log target to console */
197 SIGRTMIN+28, /* systemd: set log target to kmsg */
198 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
199 -1);
200 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
201
202 m->signal_watch.type = WATCH_SIGNAL;
203 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
204 return -errno;
205
206 zero(ev);
207 ev.events = EPOLLIN;
208 ev.data.ptr = &m->signal_watch;
209
210 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
211 return -errno;
212
213 if (m->running_as == MANAGER_SYSTEM)
214 return enable_special_signals(m);
215
216 return 0;
217 }
218
219 int manager_new(ManagerRunningAs running_as, Manager **_m) {
220 Manager *m;
221 int r = -ENOMEM;
222
223 assert(_m);
224 assert(running_as >= 0);
225 assert(running_as < _MANAGER_RUNNING_AS_MAX);
226
227 if (!(m = new0(Manager, 1)))
228 return -ENOMEM;
229
230 dual_timestamp_get(&m->startup_timestamp);
231
232 m->running_as = running_as;
233 m->name_data_slot = m->subscribed_data_slot = -1;
234 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
235 m->pin_cgroupfs_fd = -1;
236
237 #ifdef HAVE_AUDIT
238 m->audit_fd = -1;
239 #endif
240
241 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
242 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
243
244 if (!(m->environment = strv_copy(environ)))
245 goto fail;
246
247 if (!(m->default_controllers = strv_new("cpu", NULL)))
248 goto fail;
249
250 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
251 goto fail;
252
253 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
254 goto fail;
255
256 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
257 goto fail;
258
259 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
260 goto fail;
261
262 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
263 goto fail;
264
265 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
266 goto fail;
267
268 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
269 goto fail;
270
271 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
272 goto fail;
273
274 if ((r = manager_setup_signals(m)) < 0)
275 goto fail;
276
277 if ((r = manager_setup_cgroup(m)) < 0)
278 goto fail;
279
280 if ((r = manager_setup_notify(m)) < 0)
281 goto fail;
282
283 /* Try to connect to the busses, if possible. */
284 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
285 goto fail;
286
287 #ifdef HAVE_AUDIT
288 if ((m->audit_fd = audit_open()) < 0)
289 log_error("Failed to connect to audit log: %m");
290 #endif
291
292 m->taint_usr = dir_is_empty("/usr") > 0;
293
294 *_m = m;
295 return 0;
296
297 fail:
298 manager_free(m);
299 return r;
300 }
301
302 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
303 Meta *meta;
304 unsigned n = 0;
305
306 assert(m);
307
308 while ((meta = m->cleanup_queue)) {
309 assert(meta->in_cleanup_queue);
310
311 unit_free((Unit*) meta);
312 n++;
313 }
314
315 return n;
316 }
317
318 enum {
319 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
320 GC_OFFSET_UNSURE, /* No clue */
321 GC_OFFSET_GOOD, /* We still need this unit */
322 GC_OFFSET_BAD, /* We don't need this unit anymore */
323 _GC_OFFSET_MAX
324 };
325
326 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
327 Iterator i;
328 Unit *other;
329 bool is_bad;
330
331 assert(u);
332
333 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
334 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
335 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
336 return;
337
338 if (u->meta.in_cleanup_queue)
339 goto bad;
340
341 if (unit_check_gc(u))
342 goto good;
343
344 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
345
346 is_bad = true;
347
348 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
349 unit_gc_sweep(other, gc_marker);
350
351 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
352 goto good;
353
354 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
355 is_bad = false;
356 }
357
358 if (is_bad)
359 goto bad;
360
361 /* We were unable to find anything out about this entry, so
362 * let's investigate it later */
363 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
364 unit_add_to_gc_queue(u);
365 return;
366
367 bad:
368 /* We definitely know that this one is not useful anymore, so
369 * let's mark it for deletion */
370 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
371 unit_add_to_cleanup_queue(u);
372 return;
373
374 good:
375 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
376 }
377
378 static unsigned manager_dispatch_gc_queue(Manager *m) {
379 Meta *meta;
380 unsigned n = 0;
381 unsigned gc_marker;
382
383 assert(m);
384
385 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
386 (m->gc_queue_timestamp <= 0 ||
387 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
388 return 0;
389
390 log_debug("Running GC...");
391
392 m->gc_marker += _GC_OFFSET_MAX;
393 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
394 m->gc_marker = 1;
395
396 gc_marker = m->gc_marker;
397
398 while ((meta = m->gc_queue)) {
399 assert(meta->in_gc_queue);
400
401 unit_gc_sweep((Unit*) meta, gc_marker);
402
403 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
404 meta->in_gc_queue = false;
405
406 n++;
407
408 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
409 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
410 log_debug("Collecting %s", meta->id);
411 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
412 unit_add_to_cleanup_queue((Unit*) meta);
413 }
414 }
415
416 m->n_in_gc_queue = 0;
417 m->gc_queue_timestamp = 0;
418
419 return n;
420 }
421
422 static void manager_clear_jobs_and_units(Manager *m) {
423 Job *j;
424 Unit *u;
425
426 assert(m);
427
428 while ((j = hashmap_first(m->transaction_jobs)))
429 job_free(j);
430
431 while ((u = hashmap_first(m->units)))
432 unit_free(u);
433
434 manager_dispatch_cleanup_queue(m);
435
436 assert(!m->load_queue);
437 assert(!m->run_queue);
438 assert(!m->dbus_unit_queue);
439 assert(!m->dbus_job_queue);
440 assert(!m->cleanup_queue);
441 assert(!m->gc_queue);
442
443 assert(hashmap_isempty(m->transaction_jobs));
444 assert(hashmap_isempty(m->jobs));
445 assert(hashmap_isempty(m->units));
446 }
447
448 void manager_free(Manager *m) {
449 UnitType c;
450
451 assert(m);
452
453 manager_clear_jobs_and_units(m);
454
455 for (c = 0; c < _UNIT_TYPE_MAX; c++)
456 if (unit_vtable[c]->shutdown)
457 unit_vtable[c]->shutdown(m);
458
459 /* If we reexecute ourselves, we keep the root cgroup
460 * around */
461 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
462
463 manager_undo_generators(m);
464
465 bus_done(m);
466
467 hashmap_free(m->units);
468 hashmap_free(m->jobs);
469 hashmap_free(m->transaction_jobs);
470 hashmap_free(m->watch_pids);
471 hashmap_free(m->watch_bus);
472
473 if (m->epoll_fd >= 0)
474 close_nointr_nofail(m->epoll_fd);
475 if (m->signal_watch.fd >= 0)
476 close_nointr_nofail(m->signal_watch.fd);
477 if (m->notify_watch.fd >= 0)
478 close_nointr_nofail(m->notify_watch.fd);
479
480 #ifdef HAVE_AUDIT
481 if (m->audit_fd >= 0)
482 audit_close(m->audit_fd);
483 #endif
484
485 free(m->notify_socket);
486
487 lookup_paths_free(&m->lookup_paths);
488 strv_free(m->environment);
489
490 strv_free(m->default_controllers);
491
492 hashmap_free(m->cgroup_bondings);
493 set_free_free(m->unit_path_cache);
494
495 free(m);
496 }
497
498 int manager_enumerate(Manager *m) {
499 int r = 0, q;
500 UnitType c;
501
502 assert(m);
503
504 /* Let's ask every type to load all units from disk/kernel
505 * that it might know */
506 for (c = 0; c < _UNIT_TYPE_MAX; c++)
507 if (unit_vtable[c]->enumerate)
508 if ((q = unit_vtable[c]->enumerate(m)) < 0)
509 r = q;
510
511 manager_dispatch_load_queue(m);
512 return r;
513 }
514
515 int manager_coldplug(Manager *m) {
516 int r = 0, q;
517 Iterator i;
518 Unit *u;
519 char *k;
520
521 assert(m);
522
523 /* Then, let's set up their initial state. */
524 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
525
526 /* ignore aliases */
527 if (u->meta.id != k)
528 continue;
529
530 if ((q = unit_coldplug(u)) < 0)
531 r = q;
532 }
533
534 return r;
535 }
536
537 static void manager_build_unit_path_cache(Manager *m) {
538 char **i;
539 DIR *d = NULL;
540 int r;
541
542 assert(m);
543
544 set_free_free(m->unit_path_cache);
545
546 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
547 log_error("Failed to allocate unit path cache.");
548 return;
549 }
550
551 /* This simply builds a list of files we know exist, so that
552 * we don't always have to go to disk */
553
554 STRV_FOREACH(i, m->lookup_paths.unit_path) {
555 struct dirent *de;
556
557 if (!(d = opendir(*i))) {
558 log_error("Failed to open directory: %m");
559 continue;
560 }
561
562 while ((de = readdir(d))) {
563 char *p;
564
565 if (ignore_file(de->d_name))
566 continue;
567
568 p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL);
569 if (!p) {
570 r = -ENOMEM;
571 goto fail;
572 }
573
574 if ((r = set_put(m->unit_path_cache, p)) < 0) {
575 free(p);
576 goto fail;
577 }
578 }
579
580 closedir(d);
581 d = NULL;
582 }
583
584 return;
585
586 fail:
587 log_error("Failed to build unit path cache: %s", strerror(-r));
588
589 set_free_free(m->unit_path_cache);
590 m->unit_path_cache = NULL;
591
592 if (d)
593 closedir(d);
594 }
595
596 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
597 int r, q;
598
599 assert(m);
600
601 manager_run_generators(m);
602
603 manager_build_unit_path_cache(m);
604
605 /* If we will deserialize make sure that during enumeration
606 * this is already known, so we increase the counter here
607 * already */
608 if (serialization)
609 m->n_reloading ++;
610
611 /* First, enumerate what we can from all config files */
612 r = manager_enumerate(m);
613
614 /* Second, deserialize if there is something to deserialize */
615 if (serialization)
616 if ((q = manager_deserialize(m, serialization, fds)) < 0)
617 r = q;
618
619 /* Third, fire things up! */
620 if ((q = manager_coldplug(m)) < 0)
621 r = q;
622
623 if (serialization) {
624 assert(m->n_reloading > 0);
625 m->n_reloading --;
626 }
627
628 return r;
629 }
630
631 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
632 assert(m);
633 assert(j);
634
635 /* Deletes one job from the transaction */
636
637 manager_transaction_unlink_job(m, j, delete_dependencies);
638
639 if (!j->installed)
640 job_free(j);
641 }
642
643 static void transaction_delete_unit(Manager *m, Unit *u) {
644 Job *j;
645
646 /* Deletes all jobs associated with a certain unit from the
647 * transaction */
648
649 while ((j = hashmap_get(m->transaction_jobs, u)))
650 transaction_delete_job(m, j, true);
651 }
652
653 static void transaction_clean_dependencies(Manager *m) {
654 Iterator i;
655 Job *j;
656
657 assert(m);
658
659 /* Drops all dependencies of all installed jobs */
660
661 HASHMAP_FOREACH(j, m->jobs, i) {
662 while (j->subject_list)
663 job_dependency_free(j->subject_list);
664 while (j->object_list)
665 job_dependency_free(j->object_list);
666 }
667
668 assert(!m->transaction_anchor);
669 }
670
671 static void transaction_abort(Manager *m) {
672 Job *j;
673
674 assert(m);
675
676 while ((j = hashmap_first(m->transaction_jobs)))
677 if (j->installed)
678 transaction_delete_job(m, j, true);
679 else
680 job_free(j);
681
682 assert(hashmap_isempty(m->transaction_jobs));
683
684 transaction_clean_dependencies(m);
685 }
686
687 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
688 JobDependency *l;
689
690 assert(m);
691
692 /* A recursive sweep through the graph that marks all units
693 * that matter to the anchor job, i.e. are directly or
694 * indirectly a dependency of the anchor job via paths that
695 * are fully marked as mattering. */
696
697 if (j)
698 l = j->subject_list;
699 else
700 l = m->transaction_anchor;
701
702 LIST_FOREACH(subject, l, l) {
703
704 /* This link does not matter */
705 if (!l->matters)
706 continue;
707
708 /* This unit has already been marked */
709 if (l->object->generation == generation)
710 continue;
711
712 l->object->matters_to_anchor = true;
713 l->object->generation = generation;
714
715 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
716 }
717 }
718
719 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
720 JobDependency *l, *last;
721
722 assert(j);
723 assert(other);
724 assert(j->unit == other->unit);
725 assert(!j->installed);
726
727 /* Merges 'other' into 'j' and then deletes j. */
728
729 j->type = t;
730 j->state = JOB_WAITING;
731 j->override = j->override || other->override;
732
733 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
734
735 /* Patch us in as new owner of the JobDependency objects */
736 last = NULL;
737 LIST_FOREACH(subject, l, other->subject_list) {
738 assert(l->subject == other);
739 l->subject = j;
740 last = l;
741 }
742
743 /* Merge both lists */
744 if (last) {
745 last->subject_next = j->subject_list;
746 if (j->subject_list)
747 j->subject_list->subject_prev = last;
748 j->subject_list = other->subject_list;
749 }
750
751 /* Patch us in as new owner of the JobDependency objects */
752 last = NULL;
753 LIST_FOREACH(object, l, other->object_list) {
754 assert(l->object == other);
755 l->object = j;
756 last = l;
757 }
758
759 /* Merge both lists */
760 if (last) {
761 last->object_next = j->object_list;
762 if (j->object_list)
763 j->object_list->object_prev = last;
764 j->object_list = other->object_list;
765 }
766
767 /* Kill the other job */
768 other->subject_list = NULL;
769 other->object_list = NULL;
770 transaction_delete_job(m, other, true);
771 }
772 static bool job_is_conflicted_by(Job *j) {
773 JobDependency *l;
774
775 assert(j);
776
777 /* Returns true if this job is pulled in by a least one
778 * ConflictedBy dependency. */
779
780 LIST_FOREACH(object, l, j->object_list)
781 if (l->conflicts)
782 return true;
783
784 return false;
785 }
786
787 static int delete_one_unmergeable_job(Manager *m, Job *j) {
788 Job *k;
789
790 assert(j);
791
792 /* Tries to delete one item in the linked list
793 * j->transaction_next->transaction_next->... that conflicts
794 * with another one, in an attempt to make an inconsistent
795 * transaction work. */
796
797 /* We rely here on the fact that if a merged with b does not
798 * merge with c, either a or b merge with c neither */
799 LIST_FOREACH(transaction, j, j)
800 LIST_FOREACH(transaction, k, j->transaction_next) {
801 Job *d;
802
803 /* Is this one mergeable? Then skip it */
804 if (job_type_is_mergeable(j->type, k->type))
805 continue;
806
807 /* Ok, we found two that conflict, let's see if we can
808 * drop one of them */
809 if (!j->matters_to_anchor && !k->matters_to_anchor) {
810
811 /* Both jobs don't matter, so let's
812 * find the one that is smarter to
813 * remove. Let's think positive and
814 * rather remove stops then starts --
815 * except if something is being
816 * stopped because it is conflicted by
817 * another unit in which case we
818 * rather remove the start. */
819
820 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
821 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
822
823 if (j->type == JOB_STOP) {
824
825 if (job_is_conflicted_by(j))
826 d = k;
827 else
828 d = j;
829
830 } else if (k->type == JOB_STOP) {
831
832 if (job_is_conflicted_by(k))
833 d = j;
834 else
835 d = k;
836 } else
837 d = j;
838
839 } else if (!j->matters_to_anchor)
840 d = j;
841 else if (!k->matters_to_anchor)
842 d = k;
843 else
844 return -ENOEXEC;
845
846 /* Ok, we can drop one, so let's do so. */
847 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
848 transaction_delete_job(m, d, true);
849 return 0;
850 }
851
852 return -EINVAL;
853 }
854
855 static int transaction_merge_jobs(Manager *m, DBusError *e) {
856 Job *j;
857 Iterator i;
858 int r;
859
860 assert(m);
861
862 /* First step, check whether any of the jobs for one specific
863 * task conflict. If so, try to drop one of them. */
864 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
865 JobType t;
866 Job *k;
867
868 t = j->type;
869 LIST_FOREACH(transaction, k, j->transaction_next) {
870 if (job_type_merge(&t, k->type) >= 0)
871 continue;
872
873 /* OK, we could not merge all jobs for this
874 * action. Let's see if we can get rid of one
875 * of them */
876
877 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
878 /* Ok, we managed to drop one, now
879 * let's ask our callers to call us
880 * again after garbage collecting */
881 return -EAGAIN;
882
883 /* We couldn't merge anything. Failure */
884 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
885 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
886 return r;
887 }
888 }
889
890 /* Second step, merge the jobs. */
891 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
892 JobType t = j->type;
893 Job *k;
894
895 /* Merge all transactions */
896 LIST_FOREACH(transaction, k, j->transaction_next)
897 assert_se(job_type_merge(&t, k->type) == 0);
898
899 /* If an active job is mergeable, merge it too */
900 if (j->unit->meta.job)
901 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
902
903 while ((k = j->transaction_next)) {
904 if (j->installed) {
905 transaction_merge_and_delete_job(m, k, j, t);
906 j = k;
907 } else
908 transaction_merge_and_delete_job(m, j, k, t);
909 }
910
911 if (j->unit->meta.job && !j->installed)
912 transaction_merge_and_delete_job(m, j, j->unit->meta.job, t);
913
914 assert(!j->transaction_next);
915 assert(!j->transaction_prev);
916 }
917
918 return 0;
919 }
920
921 static void transaction_drop_redundant(Manager *m) {
922 bool again;
923
924 assert(m);
925
926 /* Goes through the transaction and removes all jobs that are
927 * a noop */
928
929 do {
930 Job *j;
931 Iterator i;
932
933 again = false;
934
935 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
936 bool changes_something = false;
937 Job *k;
938
939 LIST_FOREACH(transaction, k, j) {
940
941 if (!job_is_anchor(k) &&
942 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
943 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
944 continue;
945
946 changes_something = true;
947 break;
948 }
949
950 if (changes_something)
951 continue;
952
953 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
954 transaction_delete_job(m, j, false);
955 again = true;
956 break;
957 }
958
959 } while (again);
960 }
961
962 static bool unit_matters_to_anchor(Unit *u, Job *j) {
963 assert(u);
964 assert(!j->transaction_prev);
965
966 /* Checks whether at least one of the jobs for this unit
967 * matters to the anchor. */
968
969 LIST_FOREACH(transaction, j, j)
970 if (j->matters_to_anchor)
971 return true;
972
973 return false;
974 }
975
976 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
977 Iterator i;
978 Unit *u;
979 int r;
980
981 assert(m);
982 assert(j);
983 assert(!j->transaction_prev);
984
985 /* Does a recursive sweep through the ordering graph, looking
986 * for a cycle. If we find cycle we try to break it. */
987
988 /* Have we seen this before? */
989 if (j->generation == generation) {
990 Job *k, *delete;
991
992 /* If the marker is NULL we have been here already and
993 * decided the job was loop-free from here. Hence
994 * shortcut things and return right-away. */
995 if (!j->marker)
996 return 0;
997
998 /* So, the marker is not NULL and we already have been
999 * here. We have a cycle. Let's try to break it. We go
1000 * backwards in our path and try to find a suitable
1001 * job to remove. We use the marker to find our way
1002 * back, since smart how we are we stored our way back
1003 * in there. */
1004 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
1005
1006 delete = NULL;
1007 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1008
1009 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
1010
1011 if (!delete &&
1012 !k->installed &&
1013 !unit_matters_to_anchor(k->unit, k)) {
1014 /* Ok, we can drop this one, so let's
1015 * do so. */
1016 delete = k;
1017 }
1018
1019 /* Check if this in fact was the beginning of
1020 * the cycle */
1021 if (k == j)
1022 break;
1023 }
1024
1025
1026 if (delete) {
1027 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1028 transaction_delete_unit(m, delete->unit);
1029 return -EAGAIN;
1030 }
1031
1032 log_error("Unable to break cycle");
1033
1034 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1035 return -ENOEXEC;
1036 }
1037
1038 /* Make the marker point to where we come from, so that we can
1039 * find our way backwards if we want to break a cycle. We use
1040 * a special marker for the beginning: we point to
1041 * ourselves. */
1042 j->marker = from ? from : j;
1043 j->generation = generation;
1044
1045 /* We assume that the the dependencies are bidirectional, and
1046 * hence can ignore UNIT_AFTER */
1047 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1048 Job *o;
1049
1050 /* Is there a job for this unit? */
1051 if (!(o = hashmap_get(m->transaction_jobs, u)))
1052
1053 /* Ok, there is no job for this in the
1054 * transaction, but maybe there is already one
1055 * running? */
1056 if (!(o = u->meta.job))
1057 continue;
1058
1059 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1060 return r;
1061 }
1062
1063 /* Ok, let's backtrack, and remember that this entry is not on
1064 * our path anymore. */
1065 j->marker = NULL;
1066
1067 return 0;
1068 }
1069
1070 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1071 Job *j;
1072 int r;
1073 Iterator i;
1074 unsigned g;
1075
1076 assert(m);
1077 assert(generation);
1078
1079 /* Check if the ordering graph is cyclic. If it is, try to fix
1080 * that up by dropping one of the jobs. */
1081
1082 g = (*generation)++;
1083
1084 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1085 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1086 return r;
1087
1088 return 0;
1089 }
1090
1091 static void transaction_collect_garbage(Manager *m) {
1092 bool again;
1093
1094 assert(m);
1095
1096 /* Drop jobs that are not required by any other job */
1097
1098 do {
1099 Iterator i;
1100 Job *j;
1101
1102 again = false;
1103
1104 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1105 if (j->object_list) {
1106 /* log_debug("Keeping job %s/%s because of %s/%s", */
1107 /* j->unit->meta.id, job_type_to_string(j->type), */
1108 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1109 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1110 continue;
1111 }
1112
1113 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1114 transaction_delete_job(m, j, true);
1115 again = true;
1116 break;
1117 }
1118
1119 } while (again);
1120 }
1121
1122 static int transaction_is_destructive(Manager *m, DBusError *e) {
1123 Iterator i;
1124 Job *j;
1125
1126 assert(m);
1127
1128 /* Checks whether applying this transaction means that
1129 * existing jobs would be replaced */
1130
1131 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1132
1133 /* Assume merged */
1134 assert(!j->transaction_prev);
1135 assert(!j->transaction_next);
1136
1137 if (j->unit->meta.job &&
1138 j->unit->meta.job != j &&
1139 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1140
1141 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1142 return -EEXIST;
1143 }
1144 }
1145
1146 return 0;
1147 }
1148
1149 static void transaction_minimize_impact(Manager *m) {
1150 bool again;
1151 assert(m);
1152
1153 /* Drops all unnecessary jobs that reverse already active jobs
1154 * or that stop a running service. */
1155
1156 do {
1157 Job *j;
1158 Iterator i;
1159
1160 again = false;
1161
1162 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1163 LIST_FOREACH(transaction, j, j) {
1164 bool stops_running_service, changes_existing_job;
1165
1166 /* If it matters, we shouldn't drop it */
1167 if (j->matters_to_anchor)
1168 continue;
1169
1170 /* Would this stop a running service?
1171 * Would this change an existing job?
1172 * If so, let's drop this entry */
1173
1174 stops_running_service =
1175 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1176
1177 changes_existing_job =
1178 j->unit->meta.job &&
1179 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1180
1181 if (!stops_running_service && !changes_existing_job)
1182 continue;
1183
1184 if (stops_running_service)
1185 log_debug("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1186
1187 if (changes_existing_job)
1188 log_debug("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1189
1190 /* Ok, let's get rid of this */
1191 log_debug("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1192
1193 transaction_delete_job(m, j, true);
1194 again = true;
1195 break;
1196 }
1197
1198 if (again)
1199 break;
1200 }
1201
1202 } while (again);
1203 }
1204
1205 static int transaction_apply(Manager *m, JobMode mode) {
1206 Iterator i;
1207 Job *j;
1208 int r;
1209
1210 /* Moves the transaction jobs to the set of active jobs */
1211
1212 if (mode == JOB_ISOLATE) {
1213
1214 /* When isolating first kill all installed jobs which
1215 * aren't part of the new transaction */
1216 HASHMAP_FOREACH(j, m->jobs, i) {
1217 assert(j->installed);
1218
1219 if (hashmap_get(m->transaction_jobs, j->unit))
1220 continue;
1221
1222 job_finish_and_invalidate(j, JOB_CANCELED);
1223 }
1224 }
1225
1226 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1227 /* Assume merged */
1228 assert(!j->transaction_prev);
1229 assert(!j->transaction_next);
1230
1231 if (j->installed)
1232 continue;
1233
1234 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1235 goto rollback;
1236 }
1237
1238 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1239 if (j->installed) {
1240 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1241 continue;
1242 }
1243
1244 if (j->unit->meta.job)
1245 job_free(j->unit->meta.job);
1246
1247 j->unit->meta.job = j;
1248 j->installed = true;
1249 m->n_installed_jobs ++;
1250
1251 /* We're fully installed. Now let's free data we don't
1252 * need anymore. */
1253
1254 assert(!j->transaction_next);
1255 assert(!j->transaction_prev);
1256
1257 job_add_to_run_queue(j);
1258 job_add_to_dbus_queue(j);
1259 job_start_timer(j);
1260
1261 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1262 }
1263
1264 /* As last step, kill all remaining job dependencies. */
1265 transaction_clean_dependencies(m);
1266
1267 return 0;
1268
1269 rollback:
1270
1271 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1272 if (j->installed)
1273 continue;
1274
1275 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1276 }
1277
1278 return r;
1279 }
1280
1281 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1282 int r;
1283 unsigned generation = 1;
1284
1285 assert(m);
1286
1287 /* This applies the changes recorded in transaction_jobs to
1288 * the actual list of jobs, if possible. */
1289
1290 /* First step: figure out which jobs matter */
1291 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1292
1293 /* Second step: Try not to stop any running services if
1294 * we don't have to. Don't try to reverse running
1295 * jobs if we don't have to. */
1296 if (mode == JOB_FAIL)
1297 transaction_minimize_impact(m);
1298
1299 /* Third step: Drop redundant jobs */
1300 transaction_drop_redundant(m);
1301
1302 for (;;) {
1303 /* Fourth step: Let's remove unneeded jobs that might
1304 * be lurking. */
1305 if (mode != JOB_ISOLATE)
1306 transaction_collect_garbage(m);
1307
1308 /* Fifth step: verify order makes sense and correct
1309 * cycles if necessary and possible */
1310 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1311 break;
1312
1313 if (r != -EAGAIN) {
1314 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1315 goto rollback;
1316 }
1317
1318 /* Let's see if the resulting transaction ordering
1319 * graph is still cyclic... */
1320 }
1321
1322 for (;;) {
1323 /* Sixth step: let's drop unmergeable entries if
1324 * necessary and possible, merge entries we can
1325 * merge */
1326 if ((r = transaction_merge_jobs(m, e)) >= 0)
1327 break;
1328
1329 if (r != -EAGAIN) {
1330 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1331 goto rollback;
1332 }
1333
1334 /* Seventh step: an entry got dropped, let's garbage
1335 * collect its dependencies. */
1336 if (mode != JOB_ISOLATE)
1337 transaction_collect_garbage(m);
1338
1339 /* Let's see if the resulting transaction still has
1340 * unmergeable entries ... */
1341 }
1342
1343 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1344 transaction_drop_redundant(m);
1345
1346 /* Ninth step: check whether we can actually apply this */
1347 if (mode == JOB_FAIL)
1348 if ((r = transaction_is_destructive(m, e)) < 0) {
1349 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1350 goto rollback;
1351 }
1352
1353 /* Tenth step: apply changes */
1354 if ((r = transaction_apply(m, mode)) < 0) {
1355 log_warning("Failed to apply transaction: %s", strerror(-r));
1356 goto rollback;
1357 }
1358
1359 assert(hashmap_isempty(m->transaction_jobs));
1360 assert(!m->transaction_anchor);
1361
1362 return 0;
1363
1364 rollback:
1365 transaction_abort(m);
1366 return r;
1367 }
1368
1369 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1370 Job *j, *f;
1371
1372 assert(m);
1373 assert(unit);
1374
1375 /* Looks for an existing prospective job and returns that. If
1376 * it doesn't exist it is created and added to the prospective
1377 * jobs list. */
1378
1379 f = hashmap_get(m->transaction_jobs, unit);
1380
1381 LIST_FOREACH(transaction, j, f) {
1382 assert(j->unit == unit);
1383
1384 if (j->type == type) {
1385 if (is_new)
1386 *is_new = false;
1387 return j;
1388 }
1389 }
1390
1391 if (unit->meta.job && unit->meta.job->type == type)
1392 j = unit->meta.job;
1393 else if (!(j = job_new(m, type, unit)))
1394 return NULL;
1395
1396 j->generation = 0;
1397 j->marker = NULL;
1398 j->matters_to_anchor = false;
1399 j->override = override;
1400
1401 LIST_PREPEND(Job, transaction, f, j);
1402
1403 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1404 job_free(j);
1405 return NULL;
1406 }
1407
1408 if (is_new)
1409 *is_new = true;
1410
1411 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1412
1413 return j;
1414 }
1415
1416 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1417 assert(m);
1418 assert(j);
1419
1420 if (j->transaction_prev)
1421 j->transaction_prev->transaction_next = j->transaction_next;
1422 else if (j->transaction_next)
1423 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1424 else
1425 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1426
1427 if (j->transaction_next)
1428 j->transaction_next->transaction_prev = j->transaction_prev;
1429
1430 j->transaction_prev = j->transaction_next = NULL;
1431
1432 while (j->subject_list)
1433 job_dependency_free(j->subject_list);
1434
1435 while (j->object_list) {
1436 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1437
1438 job_dependency_free(j->object_list);
1439
1440 if (other && delete_dependencies) {
1441 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1442 other->unit->meta.id, job_type_to_string(other->type),
1443 j->unit->meta.id, job_type_to_string(j->type));
1444 transaction_delete_job(m, other, delete_dependencies);
1445 }
1446 }
1447 }
1448
1449 static int transaction_add_job_and_dependencies(
1450 Manager *m,
1451 JobType type,
1452 Unit *unit,
1453 Job *by,
1454 bool matters,
1455 bool override,
1456 bool conflicts,
1457 bool ignore_requirements,
1458 bool ignore_order,
1459 DBusError *e,
1460 Job **_ret) {
1461 Job *ret;
1462 Iterator i;
1463 Unit *dep;
1464 int r;
1465 bool is_new;
1466
1467 assert(m);
1468 assert(type < _JOB_TYPE_MAX);
1469 assert(unit);
1470
1471 /* log_debug("Pulling in %s/%s from %s/%s", */
1472 /* unit->meta.id, job_type_to_string(type), */
1473 /* by ? by->unit->meta.id : "NA", */
1474 /* by ? job_type_to_string(by->type) : "NA"); */
1475
1476 if (unit->meta.load_state != UNIT_LOADED &&
1477 unit->meta.load_state != UNIT_ERROR &&
1478 unit->meta.load_state != UNIT_MASKED) {
1479 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1480 return -EINVAL;
1481 }
1482
1483 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1484 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1485 "Unit %s failed to load: %s. "
1486 "See system logs and 'systemctl status %s' for details.",
1487 unit->meta.id,
1488 strerror(-unit->meta.load_error),
1489 unit->meta.id);
1490 return -EINVAL;
1491 }
1492
1493 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1494 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1495 return -EINVAL;
1496 }
1497
1498 if (!unit_job_is_applicable(unit, type)) {
1499 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1500 return -EBADR;
1501 }
1502
1503 /* First add the job. */
1504 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1505 return -ENOMEM;
1506
1507 ret->ignore_order = ret->ignore_order || ignore_order;
1508
1509 /* Then, add a link to the job. */
1510 if (!job_dependency_new(by, ret, matters, conflicts))
1511 return -ENOMEM;
1512
1513 if (is_new && !ignore_requirements) {
1514 Set *following;
1515
1516 /* If we are following some other unit, make sure we
1517 * add all dependencies of everybody following. */
1518 if (unit_following_set(ret->unit, &following) > 0) {
1519 SET_FOREACH(dep, following, i)
1520 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1521 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1522
1523 if (e)
1524 dbus_error_free(e);
1525 }
1526
1527 set_free(following);
1528 }
1529
1530 /* Finally, recursively add in all dependencies. */
1531 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1532 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1533 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1534 if (r != -EBADR)
1535 goto fail;
1536
1537 if (e)
1538 dbus_error_free(e);
1539 }
1540
1541 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1542 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1543
1544 if (r != -EBADR)
1545 goto fail;
1546
1547 if (e)
1548 dbus_error_free(e);
1549 }
1550
1551 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1552 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1553 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1554
1555 if (e)
1556 dbus_error_free(e);
1557 }
1558
1559 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1560 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1561 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1562
1563 if (e)
1564 dbus_error_free(e);
1565 }
1566
1567 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1568 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1569
1570 if (r != -EBADR)
1571 goto fail;
1572
1573 if (e)
1574 dbus_error_free(e);
1575 }
1576
1577 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1578 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1579 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1580
1581 if (e)
1582 dbus_error_free(e);
1583 }
1584
1585 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1586 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1587
1588 if (r != -EBADR)
1589 goto fail;
1590
1591 if (e)
1592 dbus_error_free(e);
1593 }
1594
1595 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1596 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1597 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1598
1599 if (e)
1600 dbus_error_free(e);
1601 }
1602
1603 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1604
1605 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1606 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1607
1608 if (r != -EBADR)
1609 goto fail;
1610
1611 if (e)
1612 dbus_error_free(e);
1613 }
1614
1615 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1616 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1617
1618 if (r != -EBADR)
1619 goto fail;
1620
1621 if (e)
1622 dbus_error_free(e);
1623 }
1624 }
1625
1626 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1627 }
1628
1629 if (_ret)
1630 *_ret = ret;
1631
1632 return 0;
1633
1634 fail:
1635 return r;
1636 }
1637
1638 static int transaction_add_isolate_jobs(Manager *m) {
1639 Iterator i;
1640 Unit *u;
1641 char *k;
1642 int r;
1643
1644 assert(m);
1645
1646 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1647
1648 /* ignore aliases */
1649 if (u->meta.id != k)
1650 continue;
1651
1652 if (u->meta.ignore_on_isolate)
1653 continue;
1654
1655 /* No need to stop inactive jobs */
1656 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1657 continue;
1658
1659 /* Is there already something listed for this? */
1660 if (hashmap_get(m->transaction_jobs, u))
1661 continue;
1662
1663 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1664 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1665 }
1666
1667 return 0;
1668 }
1669
1670 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1671 int r;
1672 Job *ret;
1673
1674 assert(m);
1675 assert(type < _JOB_TYPE_MAX);
1676 assert(unit);
1677 assert(mode < _JOB_MODE_MAX);
1678
1679 if (mode == JOB_ISOLATE && type != JOB_START) {
1680 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1681 return -EINVAL;
1682 }
1683
1684 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1685 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1686 return -EPERM;
1687 }
1688
1689 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1690
1691 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1692 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1693 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1694 transaction_abort(m);
1695 return r;
1696 }
1697
1698 if (mode == JOB_ISOLATE)
1699 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1700 transaction_abort(m);
1701 return r;
1702 }
1703
1704 if ((r = transaction_activate(m, mode, e)) < 0)
1705 return r;
1706
1707 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1708
1709 if (_ret)
1710 *_ret = ret;
1711
1712 return 0;
1713 }
1714
1715 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1716 Unit *unit;
1717 int r;
1718
1719 assert(m);
1720 assert(type < _JOB_TYPE_MAX);
1721 assert(name);
1722 assert(mode < _JOB_MODE_MAX);
1723
1724 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1725 return r;
1726
1727 return manager_add_job(m, type, unit, mode, override, e, _ret);
1728 }
1729
1730 Job *manager_get_job(Manager *m, uint32_t id) {
1731 assert(m);
1732
1733 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1734 }
1735
1736 Unit *manager_get_unit(Manager *m, const char *name) {
1737 assert(m);
1738 assert(name);
1739
1740 return hashmap_get(m->units, name);
1741 }
1742
1743 unsigned manager_dispatch_load_queue(Manager *m) {
1744 Meta *meta;
1745 unsigned n = 0;
1746
1747 assert(m);
1748
1749 /* Make sure we are not run recursively */
1750 if (m->dispatching_load_queue)
1751 return 0;
1752
1753 m->dispatching_load_queue = true;
1754
1755 /* Dispatches the load queue. Takes a unit from the queue and
1756 * tries to load its data until the queue is empty */
1757
1758 while ((meta = m->load_queue)) {
1759 assert(meta->in_load_queue);
1760
1761 unit_load((Unit*) meta);
1762 n++;
1763 }
1764
1765 m->dispatching_load_queue = false;
1766 return n;
1767 }
1768
1769 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1770 Unit *ret;
1771 int r;
1772
1773 assert(m);
1774 assert(name || path);
1775
1776 /* This will prepare the unit for loading, but not actually
1777 * load anything from disk. */
1778
1779 if (path && !is_path(path)) {
1780 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1781 return -EINVAL;
1782 }
1783
1784 if (!name)
1785 name = file_name_from_path(path);
1786
1787 if (!unit_name_is_valid(name, false)) {
1788 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1789 return -EINVAL;
1790 }
1791
1792 if ((ret = manager_get_unit(m, name))) {
1793 *_ret = ret;
1794 return 1;
1795 }
1796
1797 if (!(ret = unit_new(m)))
1798 return -ENOMEM;
1799
1800 if (path)
1801 if (!(ret->meta.fragment_path = strdup(path))) {
1802 unit_free(ret);
1803 return -ENOMEM;
1804 }
1805
1806 if ((r = unit_add_name(ret, name)) < 0) {
1807 unit_free(ret);
1808 return r;
1809 }
1810
1811 unit_add_to_load_queue(ret);
1812 unit_add_to_dbus_queue(ret);
1813 unit_add_to_gc_queue(ret);
1814
1815 if (_ret)
1816 *_ret = ret;
1817
1818 return 0;
1819 }
1820
1821 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1822 int r;
1823
1824 assert(m);
1825
1826 /* This will load the service information files, but not actually
1827 * start any services or anything. */
1828
1829 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1830 return r;
1831
1832 manager_dispatch_load_queue(m);
1833
1834 if (_ret)
1835 *_ret = unit_follow_merge(*_ret);
1836
1837 return 0;
1838 }
1839
1840 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1841 Iterator i;
1842 Job *j;
1843
1844 assert(s);
1845 assert(f);
1846
1847 HASHMAP_FOREACH(j, s->jobs, i)
1848 job_dump(j, f, prefix);
1849 }
1850
1851 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1852 Iterator i;
1853 Unit *u;
1854 const char *t;
1855
1856 assert(s);
1857 assert(f);
1858
1859 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1860 if (u->meta.id == t)
1861 unit_dump(u, f, prefix);
1862 }
1863
1864 void manager_clear_jobs(Manager *m) {
1865 Job *j;
1866
1867 assert(m);
1868
1869 transaction_abort(m);
1870
1871 while ((j = hashmap_first(m->jobs)))
1872 job_finish_and_invalidate(j, JOB_CANCELED);
1873 }
1874
1875 unsigned manager_dispatch_run_queue(Manager *m) {
1876 Job *j;
1877 unsigned n = 0;
1878
1879 if (m->dispatching_run_queue)
1880 return 0;
1881
1882 m->dispatching_run_queue = true;
1883
1884 while ((j = m->run_queue)) {
1885 assert(j->installed);
1886 assert(j->in_run_queue);
1887
1888 job_run_and_invalidate(j);
1889 n++;
1890 }
1891
1892 m->dispatching_run_queue = false;
1893 return n;
1894 }
1895
1896 unsigned manager_dispatch_dbus_queue(Manager *m) {
1897 Job *j;
1898 Meta *meta;
1899 unsigned n = 0;
1900
1901 assert(m);
1902
1903 if (m->dispatching_dbus_queue)
1904 return 0;
1905
1906 m->dispatching_dbus_queue = true;
1907
1908 while ((meta = m->dbus_unit_queue)) {
1909 assert(meta->in_dbus_queue);
1910
1911 bus_unit_send_change_signal((Unit*) meta);
1912 n++;
1913 }
1914
1915 while ((j = m->dbus_job_queue)) {
1916 assert(j->in_dbus_queue);
1917
1918 bus_job_send_change_signal(j);
1919 n++;
1920 }
1921
1922 m->dispatching_dbus_queue = false;
1923 return n;
1924 }
1925
1926 static int manager_process_notify_fd(Manager *m) {
1927 ssize_t n;
1928
1929 assert(m);
1930
1931 for (;;) {
1932 char buf[4096];
1933 struct msghdr msghdr;
1934 struct iovec iovec;
1935 struct ucred *ucred;
1936 union {
1937 struct cmsghdr cmsghdr;
1938 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1939 } control;
1940 Unit *u;
1941 char **tags;
1942
1943 zero(iovec);
1944 iovec.iov_base = buf;
1945 iovec.iov_len = sizeof(buf)-1;
1946
1947 zero(control);
1948 zero(msghdr);
1949 msghdr.msg_iov = &iovec;
1950 msghdr.msg_iovlen = 1;
1951 msghdr.msg_control = &control;
1952 msghdr.msg_controllen = sizeof(control);
1953
1954 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1955 if (n >= 0)
1956 return -EIO;
1957
1958 if (errno == EAGAIN || errno == EINTR)
1959 break;
1960
1961 return -errno;
1962 }
1963
1964 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1965 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1966 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1967 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1968 log_warning("Received notify message without credentials. Ignoring.");
1969 continue;
1970 }
1971
1972 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1973
1974 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1975 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1976 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1977 continue;
1978 }
1979
1980 assert((size_t) n < sizeof(buf));
1981 buf[n] = 0;
1982 if (!(tags = strv_split(buf, "\n\r")))
1983 return -ENOMEM;
1984
1985 log_debug("Got notification message for unit %s", u->meta.id);
1986
1987 if (UNIT_VTABLE(u)->notify_message)
1988 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1989
1990 strv_free(tags);
1991 }
1992
1993 return 0;
1994 }
1995
1996 static int manager_dispatch_sigchld(Manager *m) {
1997 assert(m);
1998
1999 for (;;) {
2000 siginfo_t si;
2001 Unit *u;
2002 int r;
2003
2004 zero(si);
2005
2006 /* First we call waitd() for a PID and do not reap the
2007 * zombie. That way we can still access /proc/$PID for
2008 * it while it is a zombie. */
2009 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2010
2011 if (errno == ECHILD)
2012 break;
2013
2014 if (errno == EINTR)
2015 continue;
2016
2017 return -errno;
2018 }
2019
2020 if (si.si_pid <= 0)
2021 break;
2022
2023 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2024 char *name = NULL;
2025
2026 get_process_name(si.si_pid, &name);
2027 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2028 free(name);
2029 }
2030
2031 /* Let's flush any message the dying child might still
2032 * have queued for us. This ensures that the process
2033 * still exists in /proc so that we can figure out
2034 * which cgroup and hence unit it belongs to. */
2035 if ((r = manager_process_notify_fd(m)) < 0)
2036 return r;
2037
2038 /* And now figure out the unit this belongs to */
2039 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2040 u = cgroup_unit_by_pid(m, si.si_pid);
2041
2042 /* And now, we actually reap the zombie. */
2043 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2044 if (errno == EINTR)
2045 continue;
2046
2047 return -errno;
2048 }
2049
2050 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2051 continue;
2052
2053 log_debug("Child %lu died (code=%s, status=%i/%s)",
2054 (long unsigned) si.si_pid,
2055 sigchld_code_to_string(si.si_code),
2056 si.si_status,
2057 strna(si.si_code == CLD_EXITED
2058 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2059 : signal_to_string(si.si_status)));
2060
2061 if (!u)
2062 continue;
2063
2064 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2065
2066 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2067 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2068 }
2069
2070 return 0;
2071 }
2072
2073 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2074 int r;
2075 DBusError error;
2076
2077 dbus_error_init(&error);
2078
2079 log_debug("Activating special unit %s", name);
2080
2081 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2082 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2083
2084 dbus_error_free(&error);
2085
2086 return r;
2087 }
2088
2089 static int manager_process_signal_fd(Manager *m) {
2090 ssize_t n;
2091 struct signalfd_siginfo sfsi;
2092 bool sigchld = false;
2093
2094 assert(m);
2095
2096 for (;;) {
2097 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2098
2099 if (n >= 0)
2100 return -EIO;
2101
2102 if (errno == EINTR || errno == EAGAIN)
2103 break;
2104
2105 return -errno;
2106 }
2107
2108 if (sfsi.ssi_pid > 0) {
2109 char *p = NULL;
2110
2111 get_process_name(sfsi.ssi_pid, &p);
2112
2113 log_debug("Received SIG%s from PID %lu (%s).",
2114 strna(signal_to_string(sfsi.ssi_signo)),
2115 (unsigned long) sfsi.ssi_pid, strna(p));
2116 free(p);
2117 } else
2118 log_debug("Received SIG%s.", strna(signal_to_string(sfsi.ssi_signo)));
2119
2120 switch (sfsi.ssi_signo) {
2121
2122 case SIGCHLD:
2123 sigchld = true;
2124 break;
2125
2126 case SIGTERM:
2127 if (m->running_as == MANAGER_SYSTEM) {
2128 /* This is for compatibility with the
2129 * original sysvinit */
2130 m->exit_code = MANAGER_REEXECUTE;
2131 break;
2132 }
2133
2134 /* Fall through */
2135
2136 case SIGINT:
2137 if (m->running_as == MANAGER_SYSTEM) {
2138 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2139 break;
2140 }
2141
2142 /* Run the exit target if there is one, if not, just exit. */
2143 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2144 m->exit_code = MANAGER_EXIT;
2145 return 0;
2146 }
2147
2148 break;
2149
2150 case SIGWINCH:
2151 if (m->running_as == MANAGER_SYSTEM)
2152 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2153
2154 /* This is a nop on non-init */
2155 break;
2156
2157 case SIGPWR:
2158 if (m->running_as == MANAGER_SYSTEM)
2159 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2160
2161 /* This is a nop on non-init */
2162 break;
2163
2164 case SIGUSR1: {
2165 Unit *u;
2166
2167 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2168
2169 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2170 log_info("Trying to reconnect to bus...");
2171 bus_init(m, true);
2172 }
2173
2174 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2175 log_info("Loading D-Bus service...");
2176 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2177 }
2178
2179 break;
2180 }
2181
2182 case SIGUSR2: {
2183 FILE *f;
2184 char *dump = NULL;
2185 size_t size;
2186
2187 if (!(f = open_memstream(&dump, &size))) {
2188 log_warning("Failed to allocate memory stream.");
2189 break;
2190 }
2191
2192 manager_dump_units(m, f, "\t");
2193 manager_dump_jobs(m, f, "\t");
2194
2195 if (ferror(f)) {
2196 fclose(f);
2197 free(dump);
2198 log_warning("Failed to write status stream");
2199 break;
2200 }
2201
2202 fclose(f);
2203 log_dump(LOG_INFO, dump);
2204 free(dump);
2205
2206 break;
2207 }
2208
2209 case SIGHUP:
2210 m->exit_code = MANAGER_RELOAD;
2211 break;
2212
2213 default: {
2214
2215 /* Starting SIGRTMIN+0 */
2216 static const char * const target_table[] = {
2217 [0] = SPECIAL_DEFAULT_TARGET,
2218 [1] = SPECIAL_RESCUE_TARGET,
2219 [2] = SPECIAL_EMERGENCY_TARGET,
2220 [3] = SPECIAL_HALT_TARGET,
2221 [4] = SPECIAL_POWEROFF_TARGET,
2222 [5] = SPECIAL_REBOOT_TARGET,
2223 [6] = SPECIAL_KEXEC_TARGET
2224 };
2225
2226 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2227 static const ManagerExitCode code_table[] = {
2228 [0] = MANAGER_HALT,
2229 [1] = MANAGER_POWEROFF,
2230 [2] = MANAGER_REBOOT,
2231 [3] = MANAGER_KEXEC
2232 };
2233
2234 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2235 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2236 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2237 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2238 break;
2239 }
2240
2241 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2242 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2243 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2244 break;
2245 }
2246
2247 switch (sfsi.ssi_signo - SIGRTMIN) {
2248
2249 case 20:
2250 log_debug("Enabling showing of status.");
2251 m->show_status = true;
2252 break;
2253
2254 case 21:
2255 log_debug("Disabling showing of status.");
2256 m->show_status = false;
2257 break;
2258
2259 case 22:
2260 log_set_max_level(LOG_DEBUG);
2261 log_notice("Setting log level to debug.");
2262 break;
2263
2264 case 23:
2265 log_set_max_level(LOG_INFO);
2266 log_notice("Setting log level to info.");
2267 break;
2268
2269 case 27:
2270 log_set_target(LOG_TARGET_CONSOLE);
2271 log_notice("Setting log target to console.");
2272 break;
2273
2274 case 28:
2275 log_set_target(LOG_TARGET_KMSG);
2276 log_notice("Setting log target to kmsg.");
2277 break;
2278
2279 case 29:
2280 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2281 log_notice("Setting log target to syslog-or-kmsg.");
2282 break;
2283
2284 default:
2285 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2286 }
2287 }
2288 }
2289 }
2290
2291 if (sigchld)
2292 return manager_dispatch_sigchld(m);
2293
2294 return 0;
2295 }
2296
2297 static int process_event(Manager *m, struct epoll_event *ev) {
2298 int r;
2299 Watch *w;
2300
2301 assert(m);
2302 assert(ev);
2303
2304 assert_se(w = ev->data.ptr);
2305
2306 if (w->type == WATCH_INVALID)
2307 return 0;
2308
2309 switch (w->type) {
2310
2311 case WATCH_SIGNAL:
2312
2313 /* An incoming signal? */
2314 if (ev->events != EPOLLIN)
2315 return -EINVAL;
2316
2317 if ((r = manager_process_signal_fd(m)) < 0)
2318 return r;
2319
2320 break;
2321
2322 case WATCH_NOTIFY:
2323
2324 /* An incoming daemon notification event? */
2325 if (ev->events != EPOLLIN)
2326 return -EINVAL;
2327
2328 if ((r = manager_process_notify_fd(m)) < 0)
2329 return r;
2330
2331 break;
2332
2333 case WATCH_FD:
2334
2335 /* Some fd event, to be dispatched to the units */
2336 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2337 break;
2338
2339 case WATCH_UNIT_TIMER:
2340 case WATCH_JOB_TIMER: {
2341 uint64_t v;
2342 ssize_t k;
2343
2344 /* Some timer event, to be dispatched to the units */
2345 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2346
2347 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2348 break;
2349
2350 return k < 0 ? -errno : -EIO;
2351 }
2352
2353 if (w->type == WATCH_UNIT_TIMER)
2354 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2355 else
2356 job_timer_event(w->data.job, v, w);
2357 break;
2358 }
2359
2360 case WATCH_MOUNT:
2361 /* Some mount table change, intended for the mount subsystem */
2362 mount_fd_event(m, ev->events);
2363 break;
2364
2365 case WATCH_SWAP:
2366 /* Some swap table change, intended for the swap subsystem */
2367 swap_fd_event(m, ev->events);
2368 break;
2369
2370 case WATCH_UDEV:
2371 /* Some notification from udev, intended for the device subsystem */
2372 device_fd_event(m, ev->events);
2373 break;
2374
2375 case WATCH_DBUS_WATCH:
2376 bus_watch_event(m, w, ev->events);
2377 break;
2378
2379 case WATCH_DBUS_TIMEOUT:
2380 bus_timeout_event(m, w, ev->events);
2381 break;
2382
2383 default:
2384 log_error("event type=%i", w->type);
2385 assert_not_reached("Unknown epoll event type.");
2386 }
2387
2388 return 0;
2389 }
2390
2391 int manager_loop(Manager *m) {
2392 int r;
2393
2394 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2395
2396 assert(m);
2397 m->exit_code = MANAGER_RUNNING;
2398
2399 /* Release the path cache */
2400 set_free_free(m->unit_path_cache);
2401 m->unit_path_cache = NULL;
2402
2403 manager_check_finished(m);
2404
2405 /* There might still be some zombies hanging around from
2406 * before we were exec()'ed. Leat's reap them */
2407 if ((r = manager_dispatch_sigchld(m)) < 0)
2408 return r;
2409
2410 while (m->exit_code == MANAGER_RUNNING) {
2411 struct epoll_event event;
2412 int n;
2413
2414 if (!ratelimit_test(&rl)) {
2415 /* Yay, something is going seriously wrong, pause a little */
2416 log_warning("Looping too fast. Throttling execution a little.");
2417 sleep(1);
2418 }
2419
2420 if (manager_dispatch_load_queue(m) > 0)
2421 continue;
2422
2423 if (manager_dispatch_run_queue(m) > 0)
2424 continue;
2425
2426 if (bus_dispatch(m) > 0)
2427 continue;
2428
2429 if (manager_dispatch_cleanup_queue(m) > 0)
2430 continue;
2431
2432 if (manager_dispatch_gc_queue(m) > 0)
2433 continue;
2434
2435 if (manager_dispatch_dbus_queue(m) > 0)
2436 continue;
2437
2438 if (swap_dispatch_reload(m) > 0)
2439 continue;
2440
2441 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2442
2443 if (errno == EINTR)
2444 continue;
2445
2446 return -errno;
2447 }
2448
2449 assert(n == 1);
2450
2451 if ((r = process_event(m, &event)) < 0)
2452 return r;
2453 }
2454
2455 return m->exit_code;
2456 }
2457
2458 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2459 char *n;
2460 Unit *u;
2461
2462 assert(m);
2463 assert(s);
2464 assert(_u);
2465
2466 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2467 return -EINVAL;
2468
2469 if (!(n = bus_path_unescape(s+31)))
2470 return -ENOMEM;
2471
2472 u = manager_get_unit(m, n);
2473 free(n);
2474
2475 if (!u)
2476 return -ENOENT;
2477
2478 *_u = u;
2479
2480 return 0;
2481 }
2482
2483 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2484 Job *j;
2485 unsigned id;
2486 int r;
2487
2488 assert(m);
2489 assert(s);
2490 assert(_j);
2491
2492 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2493 return -EINVAL;
2494
2495 if ((r = safe_atou(s + 30, &id)) < 0)
2496 return r;
2497
2498 if (!(j = manager_get_job(m, id)))
2499 return -ENOENT;
2500
2501 *_j = j;
2502
2503 return 0;
2504 }
2505
2506 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2507
2508 #ifdef HAVE_AUDIT
2509 char *p;
2510
2511 if (m->audit_fd < 0)
2512 return;
2513
2514 /* Don't generate audit events if the service was already
2515 * started and we're just deserializing */
2516 if (m->n_reloading > 0)
2517 return;
2518
2519 if (m->running_as != MANAGER_SYSTEM)
2520 return;
2521
2522 if (u->meta.type != UNIT_SERVICE)
2523 return;
2524
2525 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2526 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2527 return;
2528 }
2529
2530 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2531 log_warning("Failed to send audit message: %m");
2532
2533 if (errno == EPERM) {
2534 /* We aren't allowed to send audit messages?
2535 * Then let's not retry again, to avoid
2536 * spamming the user with the same and same
2537 * messages over and over. */
2538
2539 audit_close(m->audit_fd);
2540 m->audit_fd = -1;
2541 }
2542 }
2543
2544 free(p);
2545 #endif
2546
2547 }
2548
2549 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2550 int fd = -1;
2551 union sockaddr_union sa;
2552 int n = 0;
2553 char *message = NULL;
2554
2555 /* Don't generate plymouth events if the service was already
2556 * started and we're just deserializing */
2557 if (m->n_reloading > 0)
2558 return;
2559
2560 if (m->running_as != MANAGER_SYSTEM)
2561 return;
2562
2563 if (u->meta.type != UNIT_SERVICE &&
2564 u->meta.type != UNIT_MOUNT &&
2565 u->meta.type != UNIT_SWAP)
2566 return;
2567
2568 /* We set SOCK_NONBLOCK here so that we rather drop the
2569 * message then wait for plymouth */
2570 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2571 log_error("socket() failed: %m");
2572 return;
2573 }
2574
2575 zero(sa);
2576 sa.sa.sa_family = AF_UNIX;
2577 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2578 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2579
2580 if (errno != EPIPE &&
2581 errno != EAGAIN &&
2582 errno != ENOENT &&
2583 errno != ECONNREFUSED &&
2584 errno != ECONNRESET &&
2585 errno != ECONNABORTED)
2586 log_error("connect() failed: %m");
2587
2588 goto finish;
2589 }
2590
2591 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2592 log_error("Out of memory");
2593 goto finish;
2594 }
2595
2596 errno = 0;
2597 if (write(fd, message, n + 1) != n + 1) {
2598
2599 if (errno != EPIPE &&
2600 errno != EAGAIN &&
2601 errno != ENOENT &&
2602 errno != ECONNREFUSED &&
2603 errno != ECONNRESET &&
2604 errno != ECONNABORTED)
2605 log_error("Failed to write Plymouth message: %m");
2606
2607 goto finish;
2608 }
2609
2610 finish:
2611 if (fd >= 0)
2612 close_nointr_nofail(fd);
2613
2614 free(message);
2615 }
2616
2617 void manager_dispatch_bus_name_owner_changed(
2618 Manager *m,
2619 const char *name,
2620 const char* old_owner,
2621 const char *new_owner) {
2622
2623 Unit *u;
2624
2625 assert(m);
2626 assert(name);
2627
2628 if (!(u = hashmap_get(m->watch_bus, name)))
2629 return;
2630
2631 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2632 }
2633
2634 void manager_dispatch_bus_query_pid_done(
2635 Manager *m,
2636 const char *name,
2637 pid_t pid) {
2638
2639 Unit *u;
2640
2641 assert(m);
2642 assert(name);
2643 assert(pid >= 1);
2644
2645 if (!(u = hashmap_get(m->watch_bus, name)))
2646 return;
2647
2648 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2649 }
2650
2651 int manager_open_serialization(Manager *m, FILE **_f) {
2652 char *path = NULL;
2653 mode_t saved_umask;
2654 int fd;
2655 FILE *f;
2656
2657 assert(_f);
2658
2659 if (m->running_as == MANAGER_SYSTEM)
2660 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2661 else
2662 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2663
2664 if (!path)
2665 return -ENOMEM;
2666
2667 saved_umask = umask(0077);
2668 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2669 umask(saved_umask);
2670
2671 if (fd < 0) {
2672 free(path);
2673 return -errno;
2674 }
2675
2676 unlink(path);
2677
2678 log_debug("Serializing state to %s", path);
2679 free(path);
2680
2681 if (!(f = fdopen(fd, "w+")))
2682 return -errno;
2683
2684 *_f = f;
2685
2686 return 0;
2687 }
2688
2689 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2690 Iterator i;
2691 Unit *u;
2692 const char *t;
2693 int r;
2694
2695 assert(m);
2696 assert(f);
2697 assert(fds);
2698
2699 m->n_reloading ++;
2700
2701 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2702 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2703
2704 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2705 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2706 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2707
2708 fputc('\n', f);
2709
2710 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2711 if (u->meta.id != t)
2712 continue;
2713
2714 if (!unit_can_serialize(u))
2715 continue;
2716
2717 /* Start marker */
2718 fputs(u->meta.id, f);
2719 fputc('\n', f);
2720
2721 if ((r = unit_serialize(u, f, fds)) < 0) {
2722 m->n_reloading --;
2723 return r;
2724 }
2725 }
2726
2727 assert(m->n_reloading > 0);
2728 m->n_reloading --;
2729
2730 if (ferror(f))
2731 return -EIO;
2732
2733 r = bus_fdset_add_all(m, fds);
2734 if (r < 0)
2735 return r;
2736
2737 return 0;
2738 }
2739
2740 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2741 int r = 0;
2742
2743 assert(m);
2744 assert(f);
2745
2746 log_debug("Deserializing state...");
2747
2748 m->n_reloading ++;
2749
2750 for (;;) {
2751 char line[LINE_MAX], *l;
2752
2753 if (!fgets(line, sizeof(line), f)) {
2754 if (feof(f))
2755 r = 0;
2756 else
2757 r = -errno;
2758
2759 goto finish;
2760 }
2761
2762 char_array_0(line);
2763 l = strstrip(line);
2764
2765 if (l[0] == 0)
2766 break;
2767
2768 if (startswith(l, "current-job-id=")) {
2769 uint32_t id;
2770
2771 if (safe_atou32(l+15, &id) < 0)
2772 log_debug("Failed to parse current job id value %s", l+15);
2773 else
2774 m->current_job_id = MAX(m->current_job_id, id);
2775 } else if (startswith(l, "taint-usr=")) {
2776 int b;
2777
2778 if ((b = parse_boolean(l+10)) < 0)
2779 log_debug("Failed to parse taint /usr flag %s", l+10);
2780 else
2781 m->taint_usr = m->taint_usr || b;
2782 } else if (startswith(l, "initrd-timestamp="))
2783 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2784 else if (startswith(l, "startup-timestamp="))
2785 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2786 else if (startswith(l, "finish-timestamp="))
2787 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2788 else
2789 log_debug("Unknown serialization item '%s'", l);
2790 }
2791
2792 for (;;) {
2793 Unit *u;
2794 char name[UNIT_NAME_MAX+2];
2795
2796 /* Start marker */
2797 if (!fgets(name, sizeof(name), f)) {
2798 if (feof(f))
2799 r = 0;
2800 else
2801 r = -errno;
2802
2803 goto finish;
2804 }
2805
2806 char_array_0(name);
2807
2808 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2809 goto finish;
2810
2811 if ((r = unit_deserialize(u, f, fds)) < 0)
2812 goto finish;
2813 }
2814
2815 finish:
2816 if (ferror(f)) {
2817 r = -EIO;
2818 goto finish;
2819 }
2820
2821 assert(m->n_reloading > 0);
2822 m->n_reloading --;
2823
2824 return r;
2825 }
2826
2827 int manager_reload(Manager *m) {
2828 int r, q;
2829 FILE *f;
2830 FDSet *fds;
2831
2832 assert(m);
2833
2834 if ((r = manager_open_serialization(m, &f)) < 0)
2835 return r;
2836
2837 m->n_reloading ++;
2838
2839 if (!(fds = fdset_new())) {
2840 m->n_reloading --;
2841 r = -ENOMEM;
2842 goto finish;
2843 }
2844
2845 if ((r = manager_serialize(m, f, fds)) < 0) {
2846 m->n_reloading --;
2847 goto finish;
2848 }
2849
2850 if (fseeko(f, 0, SEEK_SET) < 0) {
2851 m->n_reloading --;
2852 r = -errno;
2853 goto finish;
2854 }
2855
2856 /* From here on there is no way back. */
2857 manager_clear_jobs_and_units(m);
2858 manager_undo_generators(m);
2859
2860 /* Find new unit paths */
2861 lookup_paths_free(&m->lookup_paths);
2862 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2863 r = q;
2864
2865 manager_run_generators(m);
2866
2867 manager_build_unit_path_cache(m);
2868
2869 /* First, enumerate what we can from all config files */
2870 if ((q = manager_enumerate(m)) < 0)
2871 r = q;
2872
2873 /* Second, deserialize our stored data */
2874 if ((q = manager_deserialize(m, f, fds)) < 0)
2875 r = q;
2876
2877 fclose(f);
2878 f = NULL;
2879
2880 /* Third, fire things up! */
2881 if ((q = manager_coldplug(m)) < 0)
2882 r = q;
2883
2884 assert(m->n_reloading > 0);
2885 m->n_reloading--;
2886
2887 finish:
2888 if (f)
2889 fclose(f);
2890
2891 if (fds)
2892 fdset_free(fds);
2893
2894 return r;
2895 }
2896
2897 bool manager_is_booting_or_shutting_down(Manager *m) {
2898 Unit *u;
2899
2900 assert(m);
2901
2902 /* Is the initial job still around? */
2903 if (manager_get_job(m, 1))
2904 return true;
2905
2906 /* Is there a job for the shutdown target? */
2907 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2908 return !!u->meta.job;
2909
2910 return false;
2911 }
2912
2913 void manager_reset_failed(Manager *m) {
2914 Unit *u;
2915 Iterator i;
2916
2917 assert(m);
2918
2919 HASHMAP_FOREACH(u, m->units, i)
2920 unit_reset_failed(u);
2921 }
2922
2923 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2924 Unit *u;
2925
2926 assert(m);
2927 assert(name);
2928
2929 /* Returns true if the unit is inactive or going down */
2930 if (!(u = manager_get_unit(m, name)))
2931 return true;
2932
2933 return unit_pending_inactive(u);
2934 }
2935
2936 void manager_check_finished(Manager *m) {
2937 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2938 usec_t kernel_usec = 0, initrd_usec = 0, userspace_usec = 0, total_usec = 0;
2939
2940 assert(m);
2941
2942 if (dual_timestamp_is_set(&m->finish_timestamp))
2943 return;
2944
2945 if (hashmap_size(m->jobs) > 0)
2946 return;
2947
2948 dual_timestamp_get(&m->finish_timestamp);
2949
2950 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2951
2952 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2953 total_usec = m->finish_timestamp.monotonic;
2954
2955 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2956
2957 kernel_usec = m->initrd_timestamp.monotonic;
2958 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
2959
2960 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2961 format_timespan(kernel, sizeof(kernel), kernel_usec),
2962 format_timespan(initrd, sizeof(initrd), initrd_usec),
2963 format_timespan(userspace, sizeof(userspace), userspace_usec),
2964 format_timespan(sum, sizeof(sum), total_usec));
2965 } else {
2966 kernel_usec = m->startup_timestamp.monotonic;
2967 initrd_usec = 0;
2968
2969 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2970 format_timespan(kernel, sizeof(kernel), kernel_usec),
2971 format_timespan(userspace, sizeof(userspace), userspace_usec),
2972 format_timespan(sum, sizeof(sum), total_usec));
2973 }
2974 } else {
2975 userspace_usec = initrd_usec = kernel_usec = 0;
2976 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2977
2978 log_debug("Startup finished in %s.",
2979 format_timespan(sum, sizeof(sum), total_usec));
2980 }
2981
2982 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
2983
2984 sd_notifyf(false,
2985 "READY=1\nSTATUS=Startup finished in %s.",
2986 format_timespan(sum, sizeof(sum), total_usec));
2987 }
2988
2989 void manager_run_generators(Manager *m) {
2990 DIR *d = NULL;
2991 const char *generator_path;
2992 const char *argv[3];
2993 mode_t u;
2994
2995 assert(m);
2996
2997 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2998 if (!(d = opendir(generator_path))) {
2999
3000 if (errno == ENOENT)
3001 return;
3002
3003 log_error("Failed to enumerate generator directory: %m");
3004 return;
3005 }
3006
3007 if (!m->generator_unit_path) {
3008 const char *p;
3009 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3010
3011 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3012 p = "/run/systemd/generator";
3013
3014 if (mkdir_p(p, 0755) < 0) {
3015 log_error("Failed to create generator directory: %m");
3016 goto finish;
3017 }
3018
3019 } else {
3020 if (!(p = mkdtemp(user_path))) {
3021 log_error("Failed to create generator directory: %m");
3022 goto finish;
3023 }
3024 }
3025
3026 if (!(m->generator_unit_path = strdup(p))) {
3027 log_error("Failed to allocate generator unit path.");
3028 goto finish;
3029 }
3030 }
3031
3032 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3033 argv[1] = m->generator_unit_path;
3034 argv[2] = NULL;
3035
3036 u = umask(0022);
3037 execute_directory(generator_path, d, (char**) argv);
3038 umask(u);
3039
3040 if (rmdir(m->generator_unit_path) >= 0) {
3041 /* Uh? we were able to remove this dir? I guess that
3042 * means the directory was empty, hence let's shortcut
3043 * this */
3044
3045 free(m->generator_unit_path);
3046 m->generator_unit_path = NULL;
3047 goto finish;
3048 }
3049
3050 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3051 char **l;
3052
3053 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3054 log_error("Failed to add generator directory to unit search path: %m");
3055 goto finish;
3056 }
3057
3058 strv_free(m->lookup_paths.unit_path);
3059 m->lookup_paths.unit_path = l;
3060
3061 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3062 }
3063
3064 finish:
3065 if (d)
3066 closedir(d);
3067 }
3068
3069 void manager_undo_generators(Manager *m) {
3070 assert(m);
3071
3072 if (!m->generator_unit_path)
3073 return;
3074
3075 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3076 rm_rf(m->generator_unit_path, false, true);
3077
3078 free(m->generator_unit_path);
3079 m->generator_unit_path = NULL;
3080 }
3081
3082 int manager_set_default_controllers(Manager *m, char **controllers) {
3083 char **l;
3084
3085 assert(m);
3086
3087 if (!(l = strv_copy(controllers)))
3088 return -ENOMEM;
3089
3090 strv_free(m->default_controllers);
3091 m->default_controllers = l;
3092
3093 return 0;
3094 }
3095
3096 void manager_recheck_syslog(Manager *m) {
3097 Unit *u;
3098
3099 assert(m);
3100
3101 if (m->running_as != MANAGER_SYSTEM)
3102 return;
3103
3104 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
3105 SocketState state;
3106
3107 state = SOCKET(u)->state;
3108
3109 if (state != SOCKET_DEAD &&
3110 state != SOCKET_FAILED &&
3111 state != SOCKET_RUNNING) {
3112
3113 /* Hmm, the socket is not set up, or is still
3114 * listening, let's better not try to use
3115 * it. Note that we have no problem if the
3116 * socket is completely down, since there
3117 * might be a foreign /dev/log socket around
3118 * and we want to make use of that.
3119 */
3120
3121 log_close_syslog();
3122 return;
3123 }
3124 }
3125
3126 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3127 if (TARGET(u)->state != TARGET_ACTIVE) {
3128 log_close_syslog();
3129 return;
3130 }
3131
3132 /* Hmm, OK, so the socket is either fully up, or fully down,
3133 * and the target is up, then let's make use of the socket */
3134 log_open();
3135 }
3136
3137 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3138 [MANAGER_SYSTEM] = "system",
3139 [MANAGER_USER] = "user"
3140 };
3141
3142 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);