]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
manager: add log control via RT signals
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61 #include "sd-daemon.h"
62
63 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
64 #define GC_QUEUE_ENTRIES_MAX 16
65
66 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
67 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
68
69 /* Where clients shall send notification messages to */
70 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
71 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
72
73 static int manager_setup_notify(Manager *m) {
74 union {
75 struct sockaddr sa;
76 struct sockaddr_un un;
77 } sa;
78 struct epoll_event ev;
79 int one = 1;
80
81 assert(m);
82
83 m->notify_watch.type = WATCH_NOTIFY;
84 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
85 log_error("Failed to allocate notification socket: %m");
86 return -errno;
87 }
88
89 zero(sa);
90 sa.sa.sa_family = AF_UNIX;
91
92 if (getpid() != 1)
93 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
94 else {
95 unlink(NOTIFY_SOCKET_SYSTEM);
96 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
97 }
98
99 if (sa.un.sun_path[0] == '@')
100 sa.un.sun_path[0] = 0;
101
102 if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
103 log_error("bind() failed: %m");
104 return -errno;
105 }
106
107 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
108 log_error("SO_PASSCRED failed: %m");
109 return -errno;
110 }
111
112 zero(ev);
113 ev.events = EPOLLIN;
114 ev.data.ptr = &m->notify_watch;
115
116 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
117 return -errno;
118
119 if (sa.un.sun_path[0] == 0)
120 sa.un.sun_path[0] = '@';
121
122 if (!(m->notify_socket = strdup(sa.un.sun_path)))
123 return -ENOMEM;
124
125 log_debug("Using notification socket %s", m->notify_socket);
126
127 return 0;
128 }
129
130 static int enable_special_signals(Manager *m) {
131 int fd;
132
133 assert(m);
134
135 /* Enable that we get SIGINT on control-alt-del */
136 if (reboot(RB_DISABLE_CAD) < 0)
137 log_warning("Failed to enable ctrl-alt-del handling: %m");
138
139 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC)) < 0)
140 log_warning("Failed to open /dev/tty0: %m");
141 else {
142 /* Enable that we get SIGWINCH on kbrequest */
143 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
144 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
145
146 close_nointr_nofail(fd);
147 }
148
149 return 0;
150 }
151
152 static int manager_setup_signals(Manager *m) {
153 sigset_t mask;
154 struct epoll_event ev;
155 struct sigaction sa;
156
157 assert(m);
158
159 /* We are not interested in SIGSTOP and friends. */
160 zero(sa);
161 sa.sa_handler = SIG_DFL;
162 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
163 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
164
165 assert_se(sigemptyset(&mask) == 0);
166
167 sigset_add_many(&mask,
168 SIGCHLD, /* Child died */
169 SIGTERM, /* Reexecute daemon */
170 SIGHUP, /* Reload configuration */
171 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
172 SIGUSR2, /* systemd: dump status */
173 SIGINT, /* Kernel sends us this on control-alt-del */
174 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
175 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
176 SIGRTMIN+0, /* systemd: start default.target */
177 SIGRTMIN+1, /* systemd: isolate rescue.target */
178 SIGRTMIN+2, /* systemd: isolate emergency.target */
179 SIGRTMIN+3, /* systemd: start halt.target */
180 SIGRTMIN+4, /* systemd: start poweroff.target */
181 SIGRTMIN+5, /* systemd: start reboot.target */
182 SIGRTMIN+6, /* systemd: start kexec.target */
183 SIGRTMIN+13, /* systemd: Immediate halt */
184 SIGRTMIN+14, /* systemd: Immediate poweroff */
185 SIGRTMIN+15, /* systemd: Immediate reboot */
186 SIGRTMIN+16, /* systemd: Immediate kexec */
187 SIGRTMIN+20, /* systemd: enable status messages */
188 SIGRTMIN+21, /* systemd: disable status messages */
189 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
190 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
191 SIGRTMIN+27, /* systemd: set log target to console */
192 SIGRTMIN+28, /* systemd: set log target to kmsg */
193 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
194 -1);
195 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
196
197 m->signal_watch.type = WATCH_SIGNAL;
198 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
199 return -errno;
200
201 zero(ev);
202 ev.events = EPOLLIN;
203 ev.data.ptr = &m->signal_watch;
204
205 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
206 return -errno;
207
208 if (m->running_as == MANAGER_SYSTEM)
209 return enable_special_signals(m);
210
211 return 0;
212 }
213
214 int manager_new(ManagerRunningAs running_as, Manager **_m) {
215 Manager *m;
216 int r = -ENOMEM;
217
218 assert(_m);
219 assert(running_as >= 0);
220 assert(running_as < _MANAGER_RUNNING_AS_MAX);
221
222 if (!(m = new0(Manager, 1)))
223 return -ENOMEM;
224
225 dual_timestamp_get(&m->startup_timestamp);
226
227 m->running_as = running_as;
228 m->name_data_slot = m->subscribed_data_slot = -1;
229 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
230 m->pin_cgroupfs_fd = -1;
231
232 #ifdef HAVE_AUDIT
233 m->audit_fd = -1;
234 #endif
235
236 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
237 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
238
239 if (!(m->environment = strv_copy(environ)))
240 goto fail;
241
242 if (!(m->default_controllers = strv_new("cpu", NULL)))
243 goto fail;
244
245 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
246 goto fail;
247
248 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
249 goto fail;
250
251 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
252 goto fail;
253
254 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
255 goto fail;
256
257 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
258 goto fail;
259
260 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
261 goto fail;
262
263 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
264 goto fail;
265
266 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
267 goto fail;
268
269 if ((r = manager_setup_signals(m)) < 0)
270 goto fail;
271
272 if ((r = manager_setup_cgroup(m)) < 0)
273 goto fail;
274
275 if ((r = manager_setup_notify(m)) < 0)
276 goto fail;
277
278 /* Try to connect to the busses, if possible. */
279 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
280 goto fail;
281
282 #ifdef HAVE_AUDIT
283 if ((m->audit_fd = audit_open()) < 0)
284 log_error("Failed to connect to audit log: %m");
285 #endif
286
287 m->taint_usr = dir_is_empty("/usr") > 0;
288
289 *_m = m;
290 return 0;
291
292 fail:
293 manager_free(m);
294 return r;
295 }
296
297 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
298 Meta *meta;
299 unsigned n = 0;
300
301 assert(m);
302
303 while ((meta = m->cleanup_queue)) {
304 assert(meta->in_cleanup_queue);
305
306 unit_free((Unit*) meta);
307 n++;
308 }
309
310 return n;
311 }
312
313 enum {
314 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
315 GC_OFFSET_UNSURE, /* No clue */
316 GC_OFFSET_GOOD, /* We still need this unit */
317 GC_OFFSET_BAD, /* We don't need this unit anymore */
318 _GC_OFFSET_MAX
319 };
320
321 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
322 Iterator i;
323 Unit *other;
324 bool is_bad;
325
326 assert(u);
327
328 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
329 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
330 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
331 return;
332
333 if (u->meta.in_cleanup_queue)
334 goto bad;
335
336 if (unit_check_gc(u))
337 goto good;
338
339 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
340
341 is_bad = true;
342
343 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
344 unit_gc_sweep(other, gc_marker);
345
346 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
347 goto good;
348
349 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
350 is_bad = false;
351 }
352
353 if (is_bad)
354 goto bad;
355
356 /* We were unable to find anything out about this entry, so
357 * let's investigate it later */
358 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
359 unit_add_to_gc_queue(u);
360 return;
361
362 bad:
363 /* We definitely know that this one is not useful anymore, so
364 * let's mark it for deletion */
365 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
366 unit_add_to_cleanup_queue(u);
367 return;
368
369 good:
370 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
371 }
372
373 static unsigned manager_dispatch_gc_queue(Manager *m) {
374 Meta *meta;
375 unsigned n = 0;
376 unsigned gc_marker;
377
378 assert(m);
379
380 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
381 (m->gc_queue_timestamp <= 0 ||
382 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
383 return 0;
384
385 log_debug("Running GC...");
386
387 m->gc_marker += _GC_OFFSET_MAX;
388 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
389 m->gc_marker = 1;
390
391 gc_marker = m->gc_marker;
392
393 while ((meta = m->gc_queue)) {
394 assert(meta->in_gc_queue);
395
396 unit_gc_sweep((Unit*) meta, gc_marker);
397
398 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
399 meta->in_gc_queue = false;
400
401 n++;
402
403 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
404 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
405 log_debug("Collecting %s", meta->id);
406 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
407 unit_add_to_cleanup_queue((Unit*) meta);
408 }
409 }
410
411 m->n_in_gc_queue = 0;
412 m->gc_queue_timestamp = 0;
413
414 return n;
415 }
416
417 static void manager_clear_jobs_and_units(Manager *m) {
418 Job *j;
419 Unit *u;
420
421 assert(m);
422
423 while ((j = hashmap_first(m->transaction_jobs)))
424 job_free(j);
425
426 while ((u = hashmap_first(m->units)))
427 unit_free(u);
428
429 manager_dispatch_cleanup_queue(m);
430
431 assert(!m->load_queue);
432 assert(!m->run_queue);
433 assert(!m->dbus_unit_queue);
434 assert(!m->dbus_job_queue);
435 assert(!m->cleanup_queue);
436 assert(!m->gc_queue);
437
438 assert(hashmap_isempty(m->transaction_jobs));
439 assert(hashmap_isempty(m->jobs));
440 assert(hashmap_isempty(m->units));
441 }
442
443 void manager_free(Manager *m) {
444 UnitType c;
445
446 assert(m);
447
448 manager_clear_jobs_and_units(m);
449
450 for (c = 0; c < _UNIT_TYPE_MAX; c++)
451 if (unit_vtable[c]->shutdown)
452 unit_vtable[c]->shutdown(m);
453
454 /* If we reexecute ourselves, we keep the root cgroup
455 * around */
456 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
457
458 manager_undo_generators(m);
459
460 bus_done(m);
461
462 hashmap_free(m->units);
463 hashmap_free(m->jobs);
464 hashmap_free(m->transaction_jobs);
465 hashmap_free(m->watch_pids);
466 hashmap_free(m->watch_bus);
467
468 if (m->epoll_fd >= 0)
469 close_nointr_nofail(m->epoll_fd);
470 if (m->signal_watch.fd >= 0)
471 close_nointr_nofail(m->signal_watch.fd);
472 if (m->notify_watch.fd >= 0)
473 close_nointr_nofail(m->notify_watch.fd);
474
475 #ifdef HAVE_AUDIT
476 if (m->audit_fd >= 0)
477 audit_close(m->audit_fd);
478 #endif
479
480 free(m->notify_socket);
481
482 lookup_paths_free(&m->lookup_paths);
483 strv_free(m->environment);
484
485 strv_free(m->default_controllers);
486
487 hashmap_free(m->cgroup_bondings);
488 set_free_free(m->unit_path_cache);
489
490 free(m);
491 }
492
493 int manager_enumerate(Manager *m) {
494 int r = 0, q;
495 UnitType c;
496
497 assert(m);
498
499 /* Let's ask every type to load all units from disk/kernel
500 * that it might know */
501 for (c = 0; c < _UNIT_TYPE_MAX; c++)
502 if (unit_vtable[c]->enumerate)
503 if ((q = unit_vtable[c]->enumerate(m)) < 0)
504 r = q;
505
506 manager_dispatch_load_queue(m);
507 return r;
508 }
509
510 int manager_coldplug(Manager *m) {
511 int r = 0, q;
512 Iterator i;
513 Unit *u;
514 char *k;
515
516 assert(m);
517
518 /* Then, let's set up their initial state. */
519 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
520
521 /* ignore aliases */
522 if (u->meta.id != k)
523 continue;
524
525 if ((q = unit_coldplug(u)) < 0)
526 r = q;
527 }
528
529 return r;
530 }
531
532 static void manager_build_unit_path_cache(Manager *m) {
533 char **i;
534 DIR *d = NULL;
535 int r;
536
537 assert(m);
538
539 set_free_free(m->unit_path_cache);
540
541 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
542 log_error("Failed to allocate unit path cache.");
543 return;
544 }
545
546 /* This simply builds a list of files we know exist, so that
547 * we don't always have to go to disk */
548
549 STRV_FOREACH(i, m->lookup_paths.unit_path) {
550 struct dirent *de;
551
552 if (!(d = opendir(*i))) {
553 log_error("Failed to open directory: %m");
554 continue;
555 }
556
557 while ((de = readdir(d))) {
558 char *p;
559
560 if (ignore_file(de->d_name))
561 continue;
562
563 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
564 r = -ENOMEM;
565 goto fail;
566 }
567
568 if ((r = set_put(m->unit_path_cache, p)) < 0) {
569 free(p);
570 goto fail;
571 }
572 }
573
574 closedir(d);
575 d = NULL;
576 }
577
578 return;
579
580 fail:
581 log_error("Failed to build unit path cache: %s", strerror(-r));
582
583 set_free_free(m->unit_path_cache);
584 m->unit_path_cache = NULL;
585
586 if (d)
587 closedir(d);
588 }
589
590 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
591 int r, q;
592
593 assert(m);
594
595 manager_run_generators(m);
596
597 manager_build_unit_path_cache(m);
598
599 /* If we will deserialize make sure that during enumeration
600 * this is already known, so we increase the counter here
601 * already */
602 if (serialization)
603 m->n_reloading ++;
604
605 /* First, enumerate what we can from all config files */
606 r = manager_enumerate(m);
607
608 /* Second, deserialize if there is something to deserialize */
609 if (serialization)
610 if ((q = manager_deserialize(m, serialization, fds)) < 0)
611 r = q;
612
613 /* Third, fire things up! */
614 if ((q = manager_coldplug(m)) < 0)
615 r = q;
616
617 if (serialization) {
618 assert(m->n_reloading > 0);
619 m->n_reloading --;
620 }
621
622 return r;
623 }
624
625 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
626 assert(m);
627 assert(j);
628
629 /* Deletes one job from the transaction */
630
631 manager_transaction_unlink_job(m, j, delete_dependencies);
632
633 if (!j->installed)
634 job_free(j);
635 }
636
637 static void transaction_delete_unit(Manager *m, Unit *u) {
638 Job *j;
639
640 /* Deletes all jobs associated with a certain unit from the
641 * transaction */
642
643 while ((j = hashmap_get(m->transaction_jobs, u)))
644 transaction_delete_job(m, j, true);
645 }
646
647 static void transaction_clean_dependencies(Manager *m) {
648 Iterator i;
649 Job *j;
650
651 assert(m);
652
653 /* Drops all dependencies of all installed jobs */
654
655 HASHMAP_FOREACH(j, m->jobs, i) {
656 while (j->subject_list)
657 job_dependency_free(j->subject_list);
658 while (j->object_list)
659 job_dependency_free(j->object_list);
660 }
661
662 assert(!m->transaction_anchor);
663 }
664
665 static void transaction_abort(Manager *m) {
666 Job *j;
667
668 assert(m);
669
670 while ((j = hashmap_first(m->transaction_jobs)))
671 if (j->installed)
672 transaction_delete_job(m, j, true);
673 else
674 job_free(j);
675
676 assert(hashmap_isempty(m->transaction_jobs));
677
678 transaction_clean_dependencies(m);
679 }
680
681 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
682 JobDependency *l;
683
684 assert(m);
685
686 /* A recursive sweep through the graph that marks all units
687 * that matter to the anchor job, i.e. are directly or
688 * indirectly a dependency of the anchor job via paths that
689 * are fully marked as mattering. */
690
691 if (j)
692 l = j->subject_list;
693 else
694 l = m->transaction_anchor;
695
696 LIST_FOREACH(subject, l, l) {
697
698 /* This link does not matter */
699 if (!l->matters)
700 continue;
701
702 /* This unit has already been marked */
703 if (l->object->generation == generation)
704 continue;
705
706 l->object->matters_to_anchor = true;
707 l->object->generation = generation;
708
709 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
710 }
711 }
712
713 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
714 JobDependency *l, *last;
715
716 assert(j);
717 assert(other);
718 assert(j->unit == other->unit);
719 assert(!j->installed);
720
721 /* Merges 'other' into 'j' and then deletes j. */
722
723 j->type = t;
724 j->state = JOB_WAITING;
725 j->override = j->override || other->override;
726
727 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
728
729 /* Patch us in as new owner of the JobDependency objects */
730 last = NULL;
731 LIST_FOREACH(subject, l, other->subject_list) {
732 assert(l->subject == other);
733 l->subject = j;
734 last = l;
735 }
736
737 /* Merge both lists */
738 if (last) {
739 last->subject_next = j->subject_list;
740 if (j->subject_list)
741 j->subject_list->subject_prev = last;
742 j->subject_list = other->subject_list;
743 }
744
745 /* Patch us in as new owner of the JobDependency objects */
746 last = NULL;
747 LIST_FOREACH(object, l, other->object_list) {
748 assert(l->object == other);
749 l->object = j;
750 last = l;
751 }
752
753 /* Merge both lists */
754 if (last) {
755 last->object_next = j->object_list;
756 if (j->object_list)
757 j->object_list->object_prev = last;
758 j->object_list = other->object_list;
759 }
760
761 /* Kill the other job */
762 other->subject_list = NULL;
763 other->object_list = NULL;
764 transaction_delete_job(m, other, true);
765 }
766 static bool job_is_conflicted_by(Job *j) {
767 JobDependency *l;
768
769 assert(j);
770
771 /* Returns true if this job is pulled in by a least one
772 * ConflictedBy dependency. */
773
774 LIST_FOREACH(object, l, j->object_list)
775 if (l->conflicts)
776 return true;
777
778 return false;
779 }
780
781 static int delete_one_unmergeable_job(Manager *m, Job *j) {
782 Job *k;
783
784 assert(j);
785
786 /* Tries to delete one item in the linked list
787 * j->transaction_next->transaction_next->... that conflicts
788 * with another one, in an attempt to make an inconsistent
789 * transaction work. */
790
791 /* We rely here on the fact that if a merged with b does not
792 * merge with c, either a or b merge with c neither */
793 LIST_FOREACH(transaction, j, j)
794 LIST_FOREACH(transaction, k, j->transaction_next) {
795 Job *d;
796
797 /* Is this one mergeable? Then skip it */
798 if (job_type_is_mergeable(j->type, k->type))
799 continue;
800
801 /* Ok, we found two that conflict, let's see if we can
802 * drop one of them */
803 if (!j->matters_to_anchor && !k->matters_to_anchor) {
804
805 /* Both jobs don't matter, so let's
806 * find the one that is smarter to
807 * remove. Let's think positive and
808 * rather remove stops then starts --
809 * except if something is being
810 * stopped because it is conflicted by
811 * another unit in which case we
812 * rather remove the start. */
813
814 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
815 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
816
817 if (j->type == JOB_STOP) {
818
819 if (job_is_conflicted_by(j))
820 d = k;
821 else
822 d = j;
823
824 } else if (k->type == JOB_STOP) {
825
826 if (job_is_conflicted_by(k))
827 d = j;
828 else
829 d = k;
830 } else
831 d = j;
832
833 } else if (!j->matters_to_anchor)
834 d = j;
835 else if (!k->matters_to_anchor)
836 d = k;
837 else
838 return -ENOEXEC;
839
840 /* Ok, we can drop one, so let's do so. */
841 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
842 transaction_delete_job(m, d, true);
843 return 0;
844 }
845
846 return -EINVAL;
847 }
848
849 static int transaction_merge_jobs(Manager *m, DBusError *e) {
850 Job *j;
851 Iterator i;
852 int r;
853
854 assert(m);
855
856 /* First step, check whether any of the jobs for one specific
857 * task conflict. If so, try to drop one of them. */
858 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
859 JobType t;
860 Job *k;
861
862 t = j->type;
863 LIST_FOREACH(transaction, k, j->transaction_next) {
864 if (job_type_merge(&t, k->type) >= 0)
865 continue;
866
867 /* OK, we could not merge all jobs for this
868 * action. Let's see if we can get rid of one
869 * of them */
870
871 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
872 /* Ok, we managed to drop one, now
873 * let's ask our callers to call us
874 * again after garbage collecting */
875 return -EAGAIN;
876
877 /* We couldn't merge anything. Failure */
878 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
879 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
880 return r;
881 }
882 }
883
884 /* Second step, merge the jobs. */
885 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
886 JobType t = j->type;
887 Job *k;
888
889 /* Merge all transactions */
890 LIST_FOREACH(transaction, k, j->transaction_next)
891 assert_se(job_type_merge(&t, k->type) == 0);
892
893 /* If an active job is mergeable, merge it too */
894 if (j->unit->meta.job)
895 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
896
897 while ((k = j->transaction_next)) {
898 if (j->installed) {
899 transaction_merge_and_delete_job(m, k, j, t);
900 j = k;
901 } else
902 transaction_merge_and_delete_job(m, j, k, t);
903 }
904
905 if (j->unit->meta.job && !j->installed)
906 transaction_merge_and_delete_job(m, j, j->unit->meta.job, t);
907
908 assert(!j->transaction_next);
909 assert(!j->transaction_prev);
910 }
911
912 return 0;
913 }
914
915 static void transaction_drop_redundant(Manager *m) {
916 bool again;
917
918 assert(m);
919
920 /* Goes through the transaction and removes all jobs that are
921 * a noop */
922
923 do {
924 Job *j;
925 Iterator i;
926
927 again = false;
928
929 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
930 bool changes_something = false;
931 Job *k;
932
933 LIST_FOREACH(transaction, k, j) {
934
935 if (!job_is_anchor(k) &&
936 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
937 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
938 continue;
939
940 changes_something = true;
941 break;
942 }
943
944 if (changes_something)
945 continue;
946
947 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
948 transaction_delete_job(m, j, false);
949 again = true;
950 break;
951 }
952
953 } while (again);
954 }
955
956 static bool unit_matters_to_anchor(Unit *u, Job *j) {
957 assert(u);
958 assert(!j->transaction_prev);
959
960 /* Checks whether at least one of the jobs for this unit
961 * matters to the anchor. */
962
963 LIST_FOREACH(transaction, j, j)
964 if (j->matters_to_anchor)
965 return true;
966
967 return false;
968 }
969
970 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
971 Iterator i;
972 Unit *u;
973 int r;
974
975 assert(m);
976 assert(j);
977 assert(!j->transaction_prev);
978
979 /* Does a recursive sweep through the ordering graph, looking
980 * for a cycle. If we find cycle we try to break it. */
981
982 /* Have we seen this before? */
983 if (j->generation == generation) {
984 Job *k, *delete;
985
986 /* If the marker is NULL we have been here already and
987 * decided the job was loop-free from here. Hence
988 * shortcut things and return right-away. */
989 if (!j->marker)
990 return 0;
991
992 /* So, the marker is not NULL and we already have been
993 * here. We have a cycle. Let's try to break it. We go
994 * backwards in our path and try to find a suitable
995 * job to remove. We use the marker to find our way
996 * back, since smart how we are we stored our way back
997 * in there. */
998 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
999
1000 delete = NULL;
1001 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1002
1003 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
1004
1005 if (!delete &&
1006 !k->installed &&
1007 !unit_matters_to_anchor(k->unit, k)) {
1008 /* Ok, we can drop this one, so let's
1009 * do so. */
1010 delete = k;
1011 }
1012
1013 /* Check if this in fact was the beginning of
1014 * the cycle */
1015 if (k == j)
1016 break;
1017 }
1018
1019
1020 if (delete) {
1021 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1022 transaction_delete_unit(m, delete->unit);
1023 return -EAGAIN;
1024 }
1025
1026 log_error("Unable to break cycle");
1027
1028 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1029 return -ENOEXEC;
1030 }
1031
1032 /* Make the marker point to where we come from, so that we can
1033 * find our way backwards if we want to break a cycle. We use
1034 * a special marker for the beginning: we point to
1035 * ourselves. */
1036 j->marker = from ? from : j;
1037 j->generation = generation;
1038
1039 /* We assume that the the dependencies are bidirectional, and
1040 * hence can ignore UNIT_AFTER */
1041 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1042 Job *o;
1043
1044 /* Is there a job for this unit? */
1045 if (!(o = hashmap_get(m->transaction_jobs, u)))
1046
1047 /* Ok, there is no job for this in the
1048 * transaction, but maybe there is already one
1049 * running? */
1050 if (!(o = u->meta.job))
1051 continue;
1052
1053 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1054 return r;
1055 }
1056
1057 /* Ok, let's backtrack, and remember that this entry is not on
1058 * our path anymore. */
1059 j->marker = NULL;
1060
1061 return 0;
1062 }
1063
1064 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1065 Job *j;
1066 int r;
1067 Iterator i;
1068 unsigned g;
1069
1070 assert(m);
1071 assert(generation);
1072
1073 /* Check if the ordering graph is cyclic. If it is, try to fix
1074 * that up by dropping one of the jobs. */
1075
1076 g = (*generation)++;
1077
1078 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1079 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1080 return r;
1081
1082 return 0;
1083 }
1084
1085 static void transaction_collect_garbage(Manager *m) {
1086 bool again;
1087
1088 assert(m);
1089
1090 /* Drop jobs that are not required by any other job */
1091
1092 do {
1093 Iterator i;
1094 Job *j;
1095
1096 again = false;
1097
1098 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1099 if (j->object_list) {
1100 /* log_debug("Keeping job %s/%s because of %s/%s", */
1101 /* j->unit->meta.id, job_type_to_string(j->type), */
1102 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1103 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1104 continue;
1105 }
1106
1107 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1108 transaction_delete_job(m, j, true);
1109 again = true;
1110 break;
1111 }
1112
1113 } while (again);
1114 }
1115
1116 static int transaction_is_destructive(Manager *m, DBusError *e) {
1117 Iterator i;
1118 Job *j;
1119
1120 assert(m);
1121
1122 /* Checks whether applying this transaction means that
1123 * existing jobs would be replaced */
1124
1125 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1126
1127 /* Assume merged */
1128 assert(!j->transaction_prev);
1129 assert(!j->transaction_next);
1130
1131 if (j->unit->meta.job &&
1132 j->unit->meta.job != j &&
1133 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1134
1135 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1136 return -EEXIST;
1137 }
1138 }
1139
1140 return 0;
1141 }
1142
1143 static void transaction_minimize_impact(Manager *m) {
1144 bool again;
1145 assert(m);
1146
1147 /* Drops all unnecessary jobs that reverse already active jobs
1148 * or that stop a running service. */
1149
1150 do {
1151 Job *j;
1152 Iterator i;
1153
1154 again = false;
1155
1156 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1157 LIST_FOREACH(transaction, j, j) {
1158 bool stops_running_service, changes_existing_job;
1159
1160 /* If it matters, we shouldn't drop it */
1161 if (j->matters_to_anchor)
1162 continue;
1163
1164 /* Would this stop a running service?
1165 * Would this change an existing job?
1166 * If so, let's drop this entry */
1167
1168 stops_running_service =
1169 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1170
1171 changes_existing_job =
1172 j->unit->meta.job &&
1173 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1174
1175 if (!stops_running_service && !changes_existing_job)
1176 continue;
1177
1178 if (stops_running_service)
1179 log_debug("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1180
1181 if (changes_existing_job)
1182 log_debug("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1183
1184 /* Ok, let's get rid of this */
1185 log_debug("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1186
1187 transaction_delete_job(m, j, true);
1188 again = true;
1189 break;
1190 }
1191
1192 if (again)
1193 break;
1194 }
1195
1196 } while (again);
1197 }
1198
1199 static int transaction_apply(Manager *m, JobMode mode) {
1200 Iterator i;
1201 Job *j;
1202 int r;
1203
1204 /* Moves the transaction jobs to the set of active jobs */
1205
1206 if (mode == JOB_ISOLATE) {
1207
1208 /* When isolating first kill all installed jobs which
1209 * aren't part of the new transaction */
1210 HASHMAP_FOREACH(j, m->jobs, i) {
1211 assert(j->installed);
1212
1213 if (hashmap_get(m->transaction_jobs, j->unit))
1214 continue;
1215
1216 job_finish_and_invalidate(j, JOB_CANCELED);
1217 }
1218 }
1219
1220 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1221 /* Assume merged */
1222 assert(!j->transaction_prev);
1223 assert(!j->transaction_next);
1224
1225 if (j->installed)
1226 continue;
1227
1228 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1229 goto rollback;
1230 }
1231
1232 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1233 if (j->installed) {
1234 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1235 continue;
1236 }
1237
1238 if (j->unit->meta.job)
1239 job_free(j->unit->meta.job);
1240
1241 j->unit->meta.job = j;
1242 j->installed = true;
1243 m->n_installed_jobs ++;
1244
1245 /* We're fully installed. Now let's free data we don't
1246 * need anymore. */
1247
1248 assert(!j->transaction_next);
1249 assert(!j->transaction_prev);
1250
1251 job_add_to_run_queue(j);
1252 job_add_to_dbus_queue(j);
1253 job_start_timer(j);
1254
1255 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1256 }
1257
1258 /* As last step, kill all remaining job dependencies. */
1259 transaction_clean_dependencies(m);
1260
1261 return 0;
1262
1263 rollback:
1264
1265 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1266 if (j->installed)
1267 continue;
1268
1269 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1270 }
1271
1272 return r;
1273 }
1274
1275 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1276 int r;
1277 unsigned generation = 1;
1278
1279 assert(m);
1280
1281 /* This applies the changes recorded in transaction_jobs to
1282 * the actual list of jobs, if possible. */
1283
1284 /* First step: figure out which jobs matter */
1285 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1286
1287 /* Second step: Try not to stop any running services if
1288 * we don't have to. Don't try to reverse running
1289 * jobs if we don't have to. */
1290 if (mode == JOB_FAIL)
1291 transaction_minimize_impact(m);
1292
1293 /* Third step: Drop redundant jobs */
1294 transaction_drop_redundant(m);
1295
1296 for (;;) {
1297 /* Fourth step: Let's remove unneeded jobs that might
1298 * be lurking. */
1299 if (mode != JOB_ISOLATE)
1300 transaction_collect_garbage(m);
1301
1302 /* Fifth step: verify order makes sense and correct
1303 * cycles if necessary and possible */
1304 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1305 break;
1306
1307 if (r != -EAGAIN) {
1308 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1309 goto rollback;
1310 }
1311
1312 /* Let's see if the resulting transaction ordering
1313 * graph is still cyclic... */
1314 }
1315
1316 for (;;) {
1317 /* Sixth step: let's drop unmergeable entries if
1318 * necessary and possible, merge entries we can
1319 * merge */
1320 if ((r = transaction_merge_jobs(m, e)) >= 0)
1321 break;
1322
1323 if (r != -EAGAIN) {
1324 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1325 goto rollback;
1326 }
1327
1328 /* Seventh step: an entry got dropped, let's garbage
1329 * collect its dependencies. */
1330 if (mode != JOB_ISOLATE)
1331 transaction_collect_garbage(m);
1332
1333 /* Let's see if the resulting transaction still has
1334 * unmergeable entries ... */
1335 }
1336
1337 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1338 transaction_drop_redundant(m);
1339
1340 /* Ninth step: check whether we can actually apply this */
1341 if (mode == JOB_FAIL)
1342 if ((r = transaction_is_destructive(m, e)) < 0) {
1343 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1344 goto rollback;
1345 }
1346
1347 /* Tenth step: apply changes */
1348 if ((r = transaction_apply(m, mode)) < 0) {
1349 log_warning("Failed to apply transaction: %s", strerror(-r));
1350 goto rollback;
1351 }
1352
1353 assert(hashmap_isempty(m->transaction_jobs));
1354 assert(!m->transaction_anchor);
1355
1356 return 0;
1357
1358 rollback:
1359 transaction_abort(m);
1360 return r;
1361 }
1362
1363 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1364 Job *j, *f;
1365
1366 assert(m);
1367 assert(unit);
1368
1369 /* Looks for an existing prospective job and returns that. If
1370 * it doesn't exist it is created and added to the prospective
1371 * jobs list. */
1372
1373 f = hashmap_get(m->transaction_jobs, unit);
1374
1375 LIST_FOREACH(transaction, j, f) {
1376 assert(j->unit == unit);
1377
1378 if (j->type == type) {
1379 if (is_new)
1380 *is_new = false;
1381 return j;
1382 }
1383 }
1384
1385 if (unit->meta.job && unit->meta.job->type == type)
1386 j = unit->meta.job;
1387 else if (!(j = job_new(m, type, unit)))
1388 return NULL;
1389
1390 j->generation = 0;
1391 j->marker = NULL;
1392 j->matters_to_anchor = false;
1393 j->override = override;
1394
1395 LIST_PREPEND(Job, transaction, f, j);
1396
1397 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1398 job_free(j);
1399 return NULL;
1400 }
1401
1402 if (is_new)
1403 *is_new = true;
1404
1405 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1406
1407 return j;
1408 }
1409
1410 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1411 assert(m);
1412 assert(j);
1413
1414 if (j->transaction_prev)
1415 j->transaction_prev->transaction_next = j->transaction_next;
1416 else if (j->transaction_next)
1417 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1418 else
1419 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1420
1421 if (j->transaction_next)
1422 j->transaction_next->transaction_prev = j->transaction_prev;
1423
1424 j->transaction_prev = j->transaction_next = NULL;
1425
1426 while (j->subject_list)
1427 job_dependency_free(j->subject_list);
1428
1429 while (j->object_list) {
1430 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1431
1432 job_dependency_free(j->object_list);
1433
1434 if (other && delete_dependencies) {
1435 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1436 other->unit->meta.id, job_type_to_string(other->type),
1437 j->unit->meta.id, job_type_to_string(j->type));
1438 transaction_delete_job(m, other, delete_dependencies);
1439 }
1440 }
1441 }
1442
1443 static int transaction_add_job_and_dependencies(
1444 Manager *m,
1445 JobType type,
1446 Unit *unit,
1447 Job *by,
1448 bool matters,
1449 bool override,
1450 bool conflicts,
1451 bool ignore_requirements,
1452 bool ignore_order,
1453 DBusError *e,
1454 Job **_ret) {
1455 Job *ret;
1456 Iterator i;
1457 Unit *dep;
1458 int r;
1459 bool is_new;
1460
1461 assert(m);
1462 assert(type < _JOB_TYPE_MAX);
1463 assert(unit);
1464
1465 /* log_debug("Pulling in %s/%s from %s/%s", */
1466 /* unit->meta.id, job_type_to_string(type), */
1467 /* by ? by->unit->meta.id : "NA", */
1468 /* by ? job_type_to_string(by->type) : "NA"); */
1469
1470 if (unit->meta.load_state != UNIT_LOADED &&
1471 unit->meta.load_state != UNIT_ERROR &&
1472 unit->meta.load_state != UNIT_MASKED) {
1473 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1474 return -EINVAL;
1475 }
1476
1477 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1478 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1479 "Unit %s failed to load: %s. "
1480 "See system logs and 'systemctl status %s' for details.",
1481 unit->meta.id,
1482 strerror(-unit->meta.load_error),
1483 unit->meta.id);
1484 return -EINVAL;
1485 }
1486
1487 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1488 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1489 return -EINVAL;
1490 }
1491
1492 if (!unit_job_is_applicable(unit, type)) {
1493 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1494 return -EBADR;
1495 }
1496
1497 /* First add the job. */
1498 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1499 return -ENOMEM;
1500
1501 ret->ignore_order = ret->ignore_order || ignore_order;
1502
1503 /* Then, add a link to the job. */
1504 if (!job_dependency_new(by, ret, matters, conflicts))
1505 return -ENOMEM;
1506
1507 if (is_new && !ignore_requirements) {
1508 Set *following;
1509
1510 /* If we are following some other unit, make sure we
1511 * add all dependencies of everybody following. */
1512 if (unit_following_set(ret->unit, &following) > 0) {
1513 SET_FOREACH(dep, following, i)
1514 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1515 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1516
1517 if (e)
1518 dbus_error_free(e);
1519 }
1520
1521 set_free(following);
1522 }
1523
1524 /* Finally, recursively add in all dependencies. */
1525 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1526 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1527 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1528 if (r != -EBADR)
1529 goto fail;
1530
1531 if (e)
1532 dbus_error_free(e);
1533 }
1534
1535 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1536 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1537
1538 if (r != -EBADR)
1539 goto fail;
1540
1541 if (e)
1542 dbus_error_free(e);
1543 }
1544
1545 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1546 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1547 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1548
1549 if (e)
1550 dbus_error_free(e);
1551 }
1552
1553 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1554 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1555 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1556
1557 if (e)
1558 dbus_error_free(e);
1559 }
1560
1561 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1562 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1563
1564 if (r != -EBADR)
1565 goto fail;
1566
1567 if (e)
1568 dbus_error_free(e);
1569 }
1570
1571 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1572 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1573 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1574
1575 if (e)
1576 dbus_error_free(e);
1577 }
1578
1579 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1580 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1581
1582 if (r != -EBADR)
1583 goto fail;
1584
1585 if (e)
1586 dbus_error_free(e);
1587 }
1588
1589 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1590 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1591 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1592
1593 if (e)
1594 dbus_error_free(e);
1595 }
1596
1597 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1598
1599 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1600 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1601
1602 if (r != -EBADR)
1603 goto fail;
1604
1605 if (e)
1606 dbus_error_free(e);
1607 }
1608
1609 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1610 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1611
1612 if (r != -EBADR)
1613 goto fail;
1614
1615 if (e)
1616 dbus_error_free(e);
1617 }
1618 }
1619
1620 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1621 }
1622
1623 if (_ret)
1624 *_ret = ret;
1625
1626 return 0;
1627
1628 fail:
1629 return r;
1630 }
1631
1632 static int transaction_add_isolate_jobs(Manager *m) {
1633 Iterator i;
1634 Unit *u;
1635 char *k;
1636 int r;
1637
1638 assert(m);
1639
1640 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1641
1642 /* ignore aliases */
1643 if (u->meta.id != k)
1644 continue;
1645
1646 if (u->meta.ignore_on_isolate)
1647 continue;
1648
1649 /* No need to stop inactive jobs */
1650 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1651 continue;
1652
1653 /* Is there already something listed for this? */
1654 if (hashmap_get(m->transaction_jobs, u))
1655 continue;
1656
1657 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1658 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1659 }
1660
1661 return 0;
1662 }
1663
1664 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1665 int r;
1666 Job *ret;
1667
1668 assert(m);
1669 assert(type < _JOB_TYPE_MAX);
1670 assert(unit);
1671 assert(mode < _JOB_MODE_MAX);
1672
1673 if (mode == JOB_ISOLATE && type != JOB_START) {
1674 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1675 return -EINVAL;
1676 }
1677
1678 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1679 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1680 return -EPERM;
1681 }
1682
1683 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1684
1685 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1686 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1687 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1688 transaction_abort(m);
1689 return r;
1690 }
1691
1692 if (mode == JOB_ISOLATE)
1693 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1694 transaction_abort(m);
1695 return r;
1696 }
1697
1698 if ((r = transaction_activate(m, mode, e)) < 0)
1699 return r;
1700
1701 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1702
1703 if (_ret)
1704 *_ret = ret;
1705
1706 return 0;
1707 }
1708
1709 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1710 Unit *unit;
1711 int r;
1712
1713 assert(m);
1714 assert(type < _JOB_TYPE_MAX);
1715 assert(name);
1716 assert(mode < _JOB_MODE_MAX);
1717
1718 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1719 return r;
1720
1721 return manager_add_job(m, type, unit, mode, override, e, _ret);
1722 }
1723
1724 Job *manager_get_job(Manager *m, uint32_t id) {
1725 assert(m);
1726
1727 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1728 }
1729
1730 Unit *manager_get_unit(Manager *m, const char *name) {
1731 assert(m);
1732 assert(name);
1733
1734 return hashmap_get(m->units, name);
1735 }
1736
1737 unsigned manager_dispatch_load_queue(Manager *m) {
1738 Meta *meta;
1739 unsigned n = 0;
1740
1741 assert(m);
1742
1743 /* Make sure we are not run recursively */
1744 if (m->dispatching_load_queue)
1745 return 0;
1746
1747 m->dispatching_load_queue = true;
1748
1749 /* Dispatches the load queue. Takes a unit from the queue and
1750 * tries to load its data until the queue is empty */
1751
1752 while ((meta = m->load_queue)) {
1753 assert(meta->in_load_queue);
1754
1755 unit_load((Unit*) meta);
1756 n++;
1757 }
1758
1759 m->dispatching_load_queue = false;
1760 return n;
1761 }
1762
1763 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1764 Unit *ret;
1765 int r;
1766
1767 assert(m);
1768 assert(name || path);
1769
1770 /* This will prepare the unit for loading, but not actually
1771 * load anything from disk. */
1772
1773 if (path && !is_path(path)) {
1774 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1775 return -EINVAL;
1776 }
1777
1778 if (!name)
1779 name = file_name_from_path(path);
1780
1781 if (!unit_name_is_valid(name, false)) {
1782 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1783 return -EINVAL;
1784 }
1785
1786 if ((ret = manager_get_unit(m, name))) {
1787 *_ret = ret;
1788 return 1;
1789 }
1790
1791 if (!(ret = unit_new(m)))
1792 return -ENOMEM;
1793
1794 if (path)
1795 if (!(ret->meta.fragment_path = strdup(path))) {
1796 unit_free(ret);
1797 return -ENOMEM;
1798 }
1799
1800 if ((r = unit_add_name(ret, name)) < 0) {
1801 unit_free(ret);
1802 return r;
1803 }
1804
1805 unit_add_to_load_queue(ret);
1806 unit_add_to_dbus_queue(ret);
1807 unit_add_to_gc_queue(ret);
1808
1809 if (_ret)
1810 *_ret = ret;
1811
1812 return 0;
1813 }
1814
1815 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1816 int r;
1817
1818 assert(m);
1819
1820 /* This will load the service information files, but not actually
1821 * start any services or anything. */
1822
1823 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1824 return r;
1825
1826 manager_dispatch_load_queue(m);
1827
1828 if (_ret)
1829 *_ret = unit_follow_merge(*_ret);
1830
1831 return 0;
1832 }
1833
1834 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1835 Iterator i;
1836 Job *j;
1837
1838 assert(s);
1839 assert(f);
1840
1841 HASHMAP_FOREACH(j, s->jobs, i)
1842 job_dump(j, f, prefix);
1843 }
1844
1845 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1846 Iterator i;
1847 Unit *u;
1848 const char *t;
1849
1850 assert(s);
1851 assert(f);
1852
1853 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1854 if (u->meta.id == t)
1855 unit_dump(u, f, prefix);
1856 }
1857
1858 void manager_clear_jobs(Manager *m) {
1859 Job *j;
1860
1861 assert(m);
1862
1863 transaction_abort(m);
1864
1865 while ((j = hashmap_first(m->jobs)))
1866 job_finish_and_invalidate(j, JOB_CANCELED);
1867 }
1868
1869 unsigned manager_dispatch_run_queue(Manager *m) {
1870 Job *j;
1871 unsigned n = 0;
1872
1873 if (m->dispatching_run_queue)
1874 return 0;
1875
1876 m->dispatching_run_queue = true;
1877
1878 while ((j = m->run_queue)) {
1879 assert(j->installed);
1880 assert(j->in_run_queue);
1881
1882 job_run_and_invalidate(j);
1883 n++;
1884 }
1885
1886 m->dispatching_run_queue = false;
1887 return n;
1888 }
1889
1890 unsigned manager_dispatch_dbus_queue(Manager *m) {
1891 Job *j;
1892 Meta *meta;
1893 unsigned n = 0;
1894
1895 assert(m);
1896
1897 if (m->dispatching_dbus_queue)
1898 return 0;
1899
1900 m->dispatching_dbus_queue = true;
1901
1902 while ((meta = m->dbus_unit_queue)) {
1903 assert(meta->in_dbus_queue);
1904
1905 bus_unit_send_change_signal((Unit*) meta);
1906 n++;
1907 }
1908
1909 while ((j = m->dbus_job_queue)) {
1910 assert(j->in_dbus_queue);
1911
1912 bus_job_send_change_signal(j);
1913 n++;
1914 }
1915
1916 m->dispatching_dbus_queue = false;
1917 return n;
1918 }
1919
1920 static int manager_process_notify_fd(Manager *m) {
1921 ssize_t n;
1922
1923 assert(m);
1924
1925 for (;;) {
1926 char buf[4096];
1927 struct msghdr msghdr;
1928 struct iovec iovec;
1929 struct ucred *ucred;
1930 union {
1931 struct cmsghdr cmsghdr;
1932 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1933 } control;
1934 Unit *u;
1935 char **tags;
1936
1937 zero(iovec);
1938 iovec.iov_base = buf;
1939 iovec.iov_len = sizeof(buf)-1;
1940
1941 zero(control);
1942 zero(msghdr);
1943 msghdr.msg_iov = &iovec;
1944 msghdr.msg_iovlen = 1;
1945 msghdr.msg_control = &control;
1946 msghdr.msg_controllen = sizeof(control);
1947
1948 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1949 if (n >= 0)
1950 return -EIO;
1951
1952 if (errno == EAGAIN || errno == EINTR)
1953 break;
1954
1955 return -errno;
1956 }
1957
1958 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1959 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1960 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1961 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1962 log_warning("Received notify message without credentials. Ignoring.");
1963 continue;
1964 }
1965
1966 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1967
1968 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1969 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1970 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1971 continue;
1972 }
1973
1974 assert((size_t) n < sizeof(buf));
1975 buf[n] = 0;
1976 if (!(tags = strv_split(buf, "\n\r")))
1977 return -ENOMEM;
1978
1979 log_debug("Got notification message for unit %s", u->meta.id);
1980
1981 if (UNIT_VTABLE(u)->notify_message)
1982 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1983
1984 strv_free(tags);
1985 }
1986
1987 return 0;
1988 }
1989
1990 static int manager_dispatch_sigchld(Manager *m) {
1991 assert(m);
1992
1993 for (;;) {
1994 siginfo_t si;
1995 Unit *u;
1996 int r;
1997
1998 zero(si);
1999
2000 /* First we call waitd() for a PID and do not reap the
2001 * zombie. That way we can still access /proc/$PID for
2002 * it while it is a zombie. */
2003 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2004
2005 if (errno == ECHILD)
2006 break;
2007
2008 if (errno == EINTR)
2009 continue;
2010
2011 return -errno;
2012 }
2013
2014 if (si.si_pid <= 0)
2015 break;
2016
2017 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2018 char *name = NULL;
2019
2020 get_process_name(si.si_pid, &name);
2021 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2022 free(name);
2023 }
2024
2025 /* Let's flush any message the dying child might still
2026 * have queued for us. This ensures that the process
2027 * still exists in /proc so that we can figure out
2028 * which cgroup and hence unit it belongs to. */
2029 if ((r = manager_process_notify_fd(m)) < 0)
2030 return r;
2031
2032 /* And now figure out the unit this belongs to */
2033 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2034 u = cgroup_unit_by_pid(m, si.si_pid);
2035
2036 /* And now, we actually reap the zombie. */
2037 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2038 if (errno == EINTR)
2039 continue;
2040
2041 return -errno;
2042 }
2043
2044 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2045 continue;
2046
2047 log_debug("Child %lu died (code=%s, status=%i/%s)",
2048 (long unsigned) si.si_pid,
2049 sigchld_code_to_string(si.si_code),
2050 si.si_status,
2051 strna(si.si_code == CLD_EXITED
2052 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2053 : signal_to_string(si.si_status)));
2054
2055 if (!u)
2056 continue;
2057
2058 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2059
2060 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2061 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2062 }
2063
2064 return 0;
2065 }
2066
2067 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2068 int r;
2069 DBusError error;
2070
2071 dbus_error_init(&error);
2072
2073 log_debug("Activating special unit %s", name);
2074
2075 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2076 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2077
2078 dbus_error_free(&error);
2079
2080 return r;
2081 }
2082
2083 static int manager_process_signal_fd(Manager *m) {
2084 ssize_t n;
2085 struct signalfd_siginfo sfsi;
2086 bool sigchld = false;
2087
2088 assert(m);
2089
2090 for (;;) {
2091 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2092
2093 if (n >= 0)
2094 return -EIO;
2095
2096 if (errno == EINTR || errno == EAGAIN)
2097 break;
2098
2099 return -errno;
2100 }
2101
2102 if (sfsi.ssi_pid > 0) {
2103 char *p = NULL;
2104
2105 get_process_name(sfsi.ssi_pid, &p);
2106
2107 log_debug("Received SIG%s from PID %lu (%s).",
2108 strna(signal_to_string(sfsi.ssi_signo)),
2109 (unsigned long) sfsi.ssi_pid, strna(p));
2110 free(p);
2111 } else
2112 log_debug("Received SIG%s.", strna(signal_to_string(sfsi.ssi_signo)));
2113
2114 switch (sfsi.ssi_signo) {
2115
2116 case SIGCHLD:
2117 sigchld = true;
2118 break;
2119
2120 case SIGTERM:
2121 if (m->running_as == MANAGER_SYSTEM) {
2122 /* This is for compatibility with the
2123 * original sysvinit */
2124 m->exit_code = MANAGER_REEXECUTE;
2125 break;
2126 }
2127
2128 /* Fall through */
2129
2130 case SIGINT:
2131 if (m->running_as == MANAGER_SYSTEM) {
2132 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2133 break;
2134 }
2135
2136 /* Run the exit target if there is one, if not, just exit. */
2137 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2138 m->exit_code = MANAGER_EXIT;
2139 return 0;
2140 }
2141
2142 break;
2143
2144 case SIGWINCH:
2145 if (m->running_as == MANAGER_SYSTEM)
2146 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2147
2148 /* This is a nop on non-init */
2149 break;
2150
2151 case SIGPWR:
2152 if (m->running_as == MANAGER_SYSTEM)
2153 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2154
2155 /* This is a nop on non-init */
2156 break;
2157
2158 case SIGUSR1: {
2159 Unit *u;
2160
2161 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2162
2163 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2164 log_info("Trying to reconnect to bus...");
2165 bus_init(m, true);
2166 }
2167
2168 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2169 log_info("Loading D-Bus service...");
2170 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2171 }
2172
2173 break;
2174 }
2175
2176 case SIGUSR2: {
2177 FILE *f;
2178 char *dump = NULL;
2179 size_t size;
2180
2181 if (!(f = open_memstream(&dump, &size))) {
2182 log_warning("Failed to allocate memory stream.");
2183 break;
2184 }
2185
2186 manager_dump_units(m, f, "\t");
2187 manager_dump_jobs(m, f, "\t");
2188
2189 if (ferror(f)) {
2190 fclose(f);
2191 free(dump);
2192 log_warning("Failed to write status stream");
2193 break;
2194 }
2195
2196 fclose(f);
2197 log_dump(LOG_INFO, dump);
2198 free(dump);
2199
2200 break;
2201 }
2202
2203 case SIGHUP:
2204 m->exit_code = MANAGER_RELOAD;
2205 break;
2206
2207 default: {
2208
2209 /* Starting SIGRTMIN+0 */
2210 static const char * const target_table[] = {
2211 [0] = SPECIAL_DEFAULT_TARGET,
2212 [1] = SPECIAL_RESCUE_TARGET,
2213 [2] = SPECIAL_EMERGENCY_TARGET,
2214 [3] = SPECIAL_HALT_TARGET,
2215 [4] = SPECIAL_POWEROFF_TARGET,
2216 [5] = SPECIAL_REBOOT_TARGET,
2217 [6] = SPECIAL_KEXEC_TARGET
2218 };
2219
2220 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2221 static const ManagerExitCode code_table[] = {
2222 [0] = MANAGER_HALT,
2223 [1] = MANAGER_POWEROFF,
2224 [2] = MANAGER_REBOOT,
2225 [3] = MANAGER_KEXEC
2226 };
2227
2228 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2229 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2230 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2231 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2232 break;
2233 }
2234
2235 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2236 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2237 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2238 break;
2239 }
2240
2241 switch (sfsi.ssi_signo - SIGRTMIN) {
2242
2243 case 20:
2244 log_debug("Enabling showing of status.");
2245 m->show_status = true;
2246 break;
2247
2248 case 21:
2249 log_debug("Disabling showing of status.");
2250 m->show_status = false;
2251 break;
2252
2253 case 22:
2254 log_set_max_level(LOG_DEBUG);
2255 log_notice("Setting log level to debug.");
2256 break;
2257
2258 case 23:
2259 log_set_max_level(LOG_INFO);
2260 log_notice("Setting log level to info.");
2261 break;
2262
2263 case 27:
2264 log_set_target(LOG_TARGET_CONSOLE);
2265 log_notice("Setting log target to console.");
2266 break;
2267
2268 case 28:
2269 log_set_target(LOG_TARGET_KMSG);
2270 log_notice("Setting log target to kmsg.");
2271 break;
2272
2273 case 29:
2274 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2275 log_notice("Setting log target to syslog-or-kmsg.");
2276 break;
2277
2278 default:
2279 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2280 }
2281 }
2282 }
2283 }
2284
2285 if (sigchld)
2286 return manager_dispatch_sigchld(m);
2287
2288 return 0;
2289 }
2290
2291 static int process_event(Manager *m, struct epoll_event *ev) {
2292 int r;
2293 Watch *w;
2294
2295 assert(m);
2296 assert(ev);
2297
2298 assert_se(w = ev->data.ptr);
2299
2300 if (w->type == WATCH_INVALID)
2301 return 0;
2302
2303 switch (w->type) {
2304
2305 case WATCH_SIGNAL:
2306
2307 /* An incoming signal? */
2308 if (ev->events != EPOLLIN)
2309 return -EINVAL;
2310
2311 if ((r = manager_process_signal_fd(m)) < 0)
2312 return r;
2313
2314 break;
2315
2316 case WATCH_NOTIFY:
2317
2318 /* An incoming daemon notification event? */
2319 if (ev->events != EPOLLIN)
2320 return -EINVAL;
2321
2322 if ((r = manager_process_notify_fd(m)) < 0)
2323 return r;
2324
2325 break;
2326
2327 case WATCH_FD:
2328
2329 /* Some fd event, to be dispatched to the units */
2330 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2331 break;
2332
2333 case WATCH_UNIT_TIMER:
2334 case WATCH_JOB_TIMER: {
2335 uint64_t v;
2336 ssize_t k;
2337
2338 /* Some timer event, to be dispatched to the units */
2339 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2340
2341 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2342 break;
2343
2344 return k < 0 ? -errno : -EIO;
2345 }
2346
2347 if (w->type == WATCH_UNIT_TIMER)
2348 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2349 else
2350 job_timer_event(w->data.job, v, w);
2351 break;
2352 }
2353
2354 case WATCH_MOUNT:
2355 /* Some mount table change, intended for the mount subsystem */
2356 mount_fd_event(m, ev->events);
2357 break;
2358
2359 case WATCH_SWAP:
2360 /* Some swap table change, intended for the swap subsystem */
2361 swap_fd_event(m, ev->events);
2362 break;
2363
2364 case WATCH_UDEV:
2365 /* Some notification from udev, intended for the device subsystem */
2366 device_fd_event(m, ev->events);
2367 break;
2368
2369 case WATCH_DBUS_WATCH:
2370 bus_watch_event(m, w, ev->events);
2371 break;
2372
2373 case WATCH_DBUS_TIMEOUT:
2374 bus_timeout_event(m, w, ev->events);
2375 break;
2376
2377 default:
2378 log_error("event type=%i", w->type);
2379 assert_not_reached("Unknown epoll event type.");
2380 }
2381
2382 return 0;
2383 }
2384
2385 int manager_loop(Manager *m) {
2386 int r;
2387
2388 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2389
2390 assert(m);
2391 m->exit_code = MANAGER_RUNNING;
2392
2393 /* Release the path cache */
2394 set_free_free(m->unit_path_cache);
2395 m->unit_path_cache = NULL;
2396
2397 manager_check_finished(m);
2398
2399 /* There might still be some zombies hanging around from
2400 * before we were exec()'ed. Leat's reap them */
2401 if ((r = manager_dispatch_sigchld(m)) < 0)
2402 return r;
2403
2404 while (m->exit_code == MANAGER_RUNNING) {
2405 struct epoll_event event;
2406 int n;
2407
2408 if (!ratelimit_test(&rl)) {
2409 /* Yay, something is going seriously wrong, pause a little */
2410 log_warning("Looping too fast. Throttling execution a little.");
2411 sleep(1);
2412 }
2413
2414 if (manager_dispatch_load_queue(m) > 0)
2415 continue;
2416
2417 if (manager_dispatch_run_queue(m) > 0)
2418 continue;
2419
2420 if (bus_dispatch(m) > 0)
2421 continue;
2422
2423 if (manager_dispatch_cleanup_queue(m) > 0)
2424 continue;
2425
2426 if (manager_dispatch_gc_queue(m) > 0)
2427 continue;
2428
2429 if (manager_dispatch_dbus_queue(m) > 0)
2430 continue;
2431
2432 if (swap_dispatch_reload(m) > 0)
2433 continue;
2434
2435 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2436
2437 if (errno == EINTR)
2438 continue;
2439
2440 return -errno;
2441 }
2442
2443 assert(n == 1);
2444
2445 if ((r = process_event(m, &event)) < 0)
2446 return r;
2447 }
2448
2449 return m->exit_code;
2450 }
2451
2452 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2453 char *n;
2454 Unit *u;
2455
2456 assert(m);
2457 assert(s);
2458 assert(_u);
2459
2460 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2461 return -EINVAL;
2462
2463 if (!(n = bus_path_unescape(s+31)))
2464 return -ENOMEM;
2465
2466 u = manager_get_unit(m, n);
2467 free(n);
2468
2469 if (!u)
2470 return -ENOENT;
2471
2472 *_u = u;
2473
2474 return 0;
2475 }
2476
2477 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2478 Job *j;
2479 unsigned id;
2480 int r;
2481
2482 assert(m);
2483 assert(s);
2484 assert(_j);
2485
2486 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2487 return -EINVAL;
2488
2489 if ((r = safe_atou(s + 30, &id)) < 0)
2490 return r;
2491
2492 if (!(j = manager_get_job(m, id)))
2493 return -ENOENT;
2494
2495 *_j = j;
2496
2497 return 0;
2498 }
2499
2500 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2501
2502 #ifdef HAVE_AUDIT
2503 char *p;
2504
2505 if (m->audit_fd < 0)
2506 return;
2507
2508 /* Don't generate audit events if the service was already
2509 * started and we're just deserializing */
2510 if (m->n_reloading > 0)
2511 return;
2512
2513 if (m->running_as != MANAGER_SYSTEM)
2514 return;
2515
2516 if (u->meta.type != UNIT_SERVICE)
2517 return;
2518
2519 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2520 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2521 return;
2522 }
2523
2524 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2525 log_warning("Failed to send audit message: %m");
2526
2527 if (errno == EPERM) {
2528 /* We aren't allowed to send audit messages?
2529 * Then let's not retry again, to avoid
2530 * spamming the user with the same and same
2531 * messages over and over. */
2532
2533 audit_close(m->audit_fd);
2534 m->audit_fd = -1;
2535 }
2536 }
2537
2538 free(p);
2539 #endif
2540
2541 }
2542
2543 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2544 int fd = -1;
2545 union sockaddr_union sa;
2546 int n = 0;
2547 char *message = NULL;
2548
2549 /* Don't generate plymouth events if the service was already
2550 * started and we're just deserializing */
2551 if (m->n_reloading > 0)
2552 return;
2553
2554 if (m->running_as != MANAGER_SYSTEM)
2555 return;
2556
2557 if (u->meta.type != UNIT_SERVICE &&
2558 u->meta.type != UNIT_MOUNT &&
2559 u->meta.type != UNIT_SWAP)
2560 return;
2561
2562 /* We set SOCK_NONBLOCK here so that we rather drop the
2563 * message then wait for plymouth */
2564 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2565 log_error("socket() failed: %m");
2566 return;
2567 }
2568
2569 zero(sa);
2570 sa.sa.sa_family = AF_UNIX;
2571 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2572 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2573
2574 if (errno != EPIPE &&
2575 errno != EAGAIN &&
2576 errno != ENOENT &&
2577 errno != ECONNREFUSED &&
2578 errno != ECONNRESET &&
2579 errno != ECONNABORTED)
2580 log_error("connect() failed: %m");
2581
2582 goto finish;
2583 }
2584
2585 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2586 log_error("Out of memory");
2587 goto finish;
2588 }
2589
2590 errno = 0;
2591 if (write(fd, message, n + 1) != n + 1) {
2592
2593 if (errno != EPIPE &&
2594 errno != EAGAIN &&
2595 errno != ENOENT &&
2596 errno != ECONNREFUSED &&
2597 errno != ECONNRESET &&
2598 errno != ECONNABORTED)
2599 log_error("Failed to write Plymouth message: %m");
2600
2601 goto finish;
2602 }
2603
2604 finish:
2605 if (fd >= 0)
2606 close_nointr_nofail(fd);
2607
2608 free(message);
2609 }
2610
2611 void manager_dispatch_bus_name_owner_changed(
2612 Manager *m,
2613 const char *name,
2614 const char* old_owner,
2615 const char *new_owner) {
2616
2617 Unit *u;
2618
2619 assert(m);
2620 assert(name);
2621
2622 if (!(u = hashmap_get(m->watch_bus, name)))
2623 return;
2624
2625 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2626 }
2627
2628 void manager_dispatch_bus_query_pid_done(
2629 Manager *m,
2630 const char *name,
2631 pid_t pid) {
2632
2633 Unit *u;
2634
2635 assert(m);
2636 assert(name);
2637 assert(pid >= 1);
2638
2639 if (!(u = hashmap_get(m->watch_bus, name)))
2640 return;
2641
2642 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2643 }
2644
2645 int manager_open_serialization(Manager *m, FILE **_f) {
2646 char *path = NULL;
2647 mode_t saved_umask;
2648 int fd;
2649 FILE *f;
2650
2651 assert(_f);
2652
2653 if (m->running_as == MANAGER_SYSTEM)
2654 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2655 else
2656 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2657
2658 if (!path)
2659 return -ENOMEM;
2660
2661 saved_umask = umask(0077);
2662 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2663 umask(saved_umask);
2664
2665 if (fd < 0) {
2666 free(path);
2667 return -errno;
2668 }
2669
2670 unlink(path);
2671
2672 log_debug("Serializing state to %s", path);
2673 free(path);
2674
2675 if (!(f = fdopen(fd, "w+")))
2676 return -errno;
2677
2678 *_f = f;
2679
2680 return 0;
2681 }
2682
2683 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2684 Iterator i;
2685 Unit *u;
2686 const char *t;
2687 int r;
2688
2689 assert(m);
2690 assert(f);
2691 assert(fds);
2692
2693 m->n_reloading ++;
2694
2695 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2696 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2697
2698 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2699 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2700 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2701
2702 fputc('\n', f);
2703
2704 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2705 if (u->meta.id != t)
2706 continue;
2707
2708 if (!unit_can_serialize(u))
2709 continue;
2710
2711 /* Start marker */
2712 fputs(u->meta.id, f);
2713 fputc('\n', f);
2714
2715 if ((r = unit_serialize(u, f, fds)) < 0) {
2716 m->n_reloading --;
2717 return r;
2718 }
2719 }
2720
2721 assert(m->n_reloading > 0);
2722 m->n_reloading --;
2723
2724 if (ferror(f))
2725 return -EIO;
2726
2727 r = bus_fdset_add_all(m, fds);
2728 if (r < 0)
2729 return r;
2730
2731 return 0;
2732 }
2733
2734 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2735 int r = 0;
2736
2737 assert(m);
2738 assert(f);
2739
2740 log_debug("Deserializing state...");
2741
2742 m->n_reloading ++;
2743
2744 for (;;) {
2745 char line[LINE_MAX], *l;
2746
2747 if (!fgets(line, sizeof(line), f)) {
2748 if (feof(f))
2749 r = 0;
2750 else
2751 r = -errno;
2752
2753 goto finish;
2754 }
2755
2756 char_array_0(line);
2757 l = strstrip(line);
2758
2759 if (l[0] == 0)
2760 break;
2761
2762 if (startswith(l, "current-job-id=")) {
2763 uint32_t id;
2764
2765 if (safe_atou32(l+15, &id) < 0)
2766 log_debug("Failed to parse current job id value %s", l+15);
2767 else
2768 m->current_job_id = MAX(m->current_job_id, id);
2769 } else if (startswith(l, "taint-usr=")) {
2770 int b;
2771
2772 if ((b = parse_boolean(l+10)) < 0)
2773 log_debug("Failed to parse taint /usr flag %s", l+10);
2774 else
2775 m->taint_usr = m->taint_usr || b;
2776 } else if (startswith(l, "initrd-timestamp="))
2777 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2778 else if (startswith(l, "startup-timestamp="))
2779 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2780 else if (startswith(l, "finish-timestamp="))
2781 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2782 else
2783 log_debug("Unknown serialization item '%s'", l);
2784 }
2785
2786 for (;;) {
2787 Unit *u;
2788 char name[UNIT_NAME_MAX+2];
2789
2790 /* Start marker */
2791 if (!fgets(name, sizeof(name), f)) {
2792 if (feof(f))
2793 r = 0;
2794 else
2795 r = -errno;
2796
2797 goto finish;
2798 }
2799
2800 char_array_0(name);
2801
2802 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2803 goto finish;
2804
2805 if ((r = unit_deserialize(u, f, fds)) < 0)
2806 goto finish;
2807 }
2808
2809 finish:
2810 if (ferror(f)) {
2811 r = -EIO;
2812 goto finish;
2813 }
2814
2815 assert(m->n_reloading > 0);
2816 m->n_reloading --;
2817
2818 return r;
2819 }
2820
2821 int manager_reload(Manager *m) {
2822 int r, q;
2823 FILE *f;
2824 FDSet *fds;
2825
2826 assert(m);
2827
2828 if ((r = manager_open_serialization(m, &f)) < 0)
2829 return r;
2830
2831 m->n_reloading ++;
2832
2833 if (!(fds = fdset_new())) {
2834 m->n_reloading --;
2835 r = -ENOMEM;
2836 goto finish;
2837 }
2838
2839 if ((r = manager_serialize(m, f, fds)) < 0) {
2840 m->n_reloading --;
2841 goto finish;
2842 }
2843
2844 if (fseeko(f, 0, SEEK_SET) < 0) {
2845 m->n_reloading --;
2846 r = -errno;
2847 goto finish;
2848 }
2849
2850 /* From here on there is no way back. */
2851 manager_clear_jobs_and_units(m);
2852 manager_undo_generators(m);
2853
2854 /* Find new unit paths */
2855 lookup_paths_free(&m->lookup_paths);
2856 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2857 r = q;
2858
2859 manager_run_generators(m);
2860
2861 manager_build_unit_path_cache(m);
2862
2863 /* First, enumerate what we can from all config files */
2864 if ((q = manager_enumerate(m)) < 0)
2865 r = q;
2866
2867 /* Second, deserialize our stored data */
2868 if ((q = manager_deserialize(m, f, fds)) < 0)
2869 r = q;
2870
2871 fclose(f);
2872 f = NULL;
2873
2874 /* Third, fire things up! */
2875 if ((q = manager_coldplug(m)) < 0)
2876 r = q;
2877
2878 assert(m->n_reloading > 0);
2879 m->n_reloading--;
2880
2881 finish:
2882 if (f)
2883 fclose(f);
2884
2885 if (fds)
2886 fdset_free(fds);
2887
2888 return r;
2889 }
2890
2891 bool manager_is_booting_or_shutting_down(Manager *m) {
2892 Unit *u;
2893
2894 assert(m);
2895
2896 /* Is the initial job still around? */
2897 if (manager_get_job(m, 1))
2898 return true;
2899
2900 /* Is there a job for the shutdown target? */
2901 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2902 return !!u->meta.job;
2903
2904 return false;
2905 }
2906
2907 void manager_reset_failed(Manager *m) {
2908 Unit *u;
2909 Iterator i;
2910
2911 assert(m);
2912
2913 HASHMAP_FOREACH(u, m->units, i)
2914 unit_reset_failed(u);
2915 }
2916
2917 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2918 Unit *u;
2919
2920 assert(m);
2921 assert(name);
2922
2923 /* Returns true if the unit is inactive or going down */
2924 if (!(u = manager_get_unit(m, name)))
2925 return true;
2926
2927 return unit_pending_inactive(u);
2928 }
2929
2930 void manager_check_finished(Manager *m) {
2931 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2932 usec_t kernel_usec = 0, initrd_usec = 0, userspace_usec = 0, total_usec = 0;
2933
2934 assert(m);
2935
2936 if (dual_timestamp_is_set(&m->finish_timestamp))
2937 return;
2938
2939 if (hashmap_size(m->jobs) > 0)
2940 return;
2941
2942 dual_timestamp_get(&m->finish_timestamp);
2943
2944 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2945
2946 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2947 total_usec = m->finish_timestamp.monotonic;
2948
2949 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2950
2951 kernel_usec = m->initrd_timestamp.monotonic;
2952 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
2953
2954 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2955 format_timespan(kernel, sizeof(kernel), kernel_usec),
2956 format_timespan(initrd, sizeof(initrd), initrd_usec),
2957 format_timespan(userspace, sizeof(userspace), userspace_usec),
2958 format_timespan(sum, sizeof(sum), total_usec));
2959 } else {
2960 kernel_usec = m->startup_timestamp.monotonic;
2961 initrd_usec = 0;
2962
2963 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2964 format_timespan(kernel, sizeof(kernel), kernel_usec),
2965 format_timespan(userspace, sizeof(userspace), userspace_usec),
2966 format_timespan(sum, sizeof(sum), total_usec));
2967 }
2968 } else {
2969 userspace_usec = initrd_usec = kernel_usec = 0;
2970 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2971
2972 log_debug("Startup finished in %s.",
2973 format_timespan(sum, sizeof(sum), total_usec));
2974 }
2975
2976 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
2977
2978 sd_notifyf(false,
2979 "READY=1\nSTATUS=Startup finished in %s.",
2980 format_timespan(sum, sizeof(sum), total_usec));
2981 }
2982
2983 void manager_run_generators(Manager *m) {
2984 DIR *d = NULL;
2985 const char *generator_path;
2986 const char *argv[3];
2987
2988 assert(m);
2989
2990 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2991 if (!(d = opendir(generator_path))) {
2992
2993 if (errno == ENOENT)
2994 return;
2995
2996 log_error("Failed to enumerate generator directory: %m");
2997 return;
2998 }
2999
3000 if (!m->generator_unit_path) {
3001 const char *p;
3002 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3003
3004 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3005 p = "/run/systemd/generator";
3006
3007 if (mkdir_p(p, 0755) < 0) {
3008 log_error("Failed to create generator directory: %m");
3009 goto finish;
3010 }
3011
3012 } else {
3013 if (!(p = mkdtemp(user_path))) {
3014 log_error("Failed to create generator directory: %m");
3015 goto finish;
3016 }
3017 }
3018
3019 if (!(m->generator_unit_path = strdup(p))) {
3020 log_error("Failed to allocate generator unit path.");
3021 goto finish;
3022 }
3023 }
3024
3025 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3026 argv[1] = m->generator_unit_path;
3027 argv[2] = NULL;
3028
3029 execute_directory(generator_path, d, (char**) argv);
3030
3031 if (rmdir(m->generator_unit_path) >= 0) {
3032 /* Uh? we were able to remove this dir? I guess that
3033 * means the directory was empty, hence let's shortcut
3034 * this */
3035
3036 free(m->generator_unit_path);
3037 m->generator_unit_path = NULL;
3038 goto finish;
3039 }
3040
3041 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3042 char **l;
3043
3044 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3045 log_error("Failed to add generator directory to unit search path: %m");
3046 goto finish;
3047 }
3048
3049 strv_free(m->lookup_paths.unit_path);
3050 m->lookup_paths.unit_path = l;
3051
3052 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3053 }
3054
3055 finish:
3056 if (d)
3057 closedir(d);
3058 }
3059
3060 void manager_undo_generators(Manager *m) {
3061 assert(m);
3062
3063 if (!m->generator_unit_path)
3064 return;
3065
3066 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3067 rm_rf(m->generator_unit_path, false, true);
3068
3069 free(m->generator_unit_path);
3070 m->generator_unit_path = NULL;
3071 }
3072
3073 int manager_set_default_controllers(Manager *m, char **controllers) {
3074 char **l;
3075
3076 assert(m);
3077
3078 if (!(l = strv_copy(controllers)))
3079 return -ENOMEM;
3080
3081 strv_free(m->default_controllers);
3082 m->default_controllers = l;
3083
3084 return 0;
3085 }
3086
3087 void manager_recheck_syslog(Manager *m) {
3088 Unit *u;
3089
3090 assert(m);
3091
3092 if (m->running_as != MANAGER_SYSTEM)
3093 return;
3094
3095 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
3096 SocketState state;
3097
3098 state = SOCKET(u)->state;
3099
3100 if (state != SOCKET_DEAD &&
3101 state != SOCKET_FAILED &&
3102 state != SOCKET_RUNNING) {
3103
3104 /* Hmm, the socket is not set up, or is still
3105 * listening, let's better not try to use
3106 * it. Note that we have no problem if the
3107 * socket is completely down, since there
3108 * might be a foreign /dev/log socket around
3109 * and we want to make use of that.
3110 */
3111
3112 log_close_syslog();
3113 return;
3114 }
3115 }
3116
3117 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3118 if (TARGET(u)->state != TARGET_ACTIVE) {
3119 log_close_syslog();
3120 return;
3121 }
3122
3123 /* Hmm, OK, so the socket is either fully up, or fully down,
3124 * and the target is up, then let's make use of the socket */
3125 log_open();
3126 }
3127
3128 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3129 [MANAGER_SYSTEM] = "system",
3130 [MANAGER_USER] = "user"
3131 };
3132
3133 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);