]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
Merge branch 'journal'
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61 #include "sd-daemon.h"
62 #include "virt.h"
63
64 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
65 #define GC_QUEUE_ENTRIES_MAX 16
66
67 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
68 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
69
70 /* Where clients shall send notification messages to */
71 #define NOTIFY_SOCKET_SYSTEM "/run/systemd/notify"
72 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
73
74 static int manager_setup_notify(Manager *m) {
75 union {
76 struct sockaddr sa;
77 struct sockaddr_un un;
78 } sa;
79 struct epoll_event ev;
80 int one = 1, r;
81 mode_t u;
82
83 assert(m);
84
85 m->notify_watch.type = WATCH_NOTIFY;
86 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
87 log_error("Failed to allocate notification socket: %m");
88 return -errno;
89 }
90
91 zero(sa);
92 sa.sa.sa_family = AF_UNIX;
93
94 if (getpid() != 1)
95 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
96 else {
97 unlink(NOTIFY_SOCKET_SYSTEM);
98 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
99 }
100
101 if (sa.un.sun_path[0] == '@')
102 sa.un.sun_path[0] = 0;
103
104 u = umask(0111);
105 r = bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1));
106 umask(u);
107
108 if (r < 0) {
109 log_error("bind() failed: %m");
110 return -errno;
111 }
112
113 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
114 log_error("SO_PASSCRED failed: %m");
115 return -errno;
116 }
117
118 zero(ev);
119 ev.events = EPOLLIN;
120 ev.data.ptr = &m->notify_watch;
121
122 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
123 return -errno;
124
125 if (sa.un.sun_path[0] == 0)
126 sa.un.sun_path[0] = '@';
127
128 if (!(m->notify_socket = strdup(sa.un.sun_path)))
129 return -ENOMEM;
130
131 log_debug("Using notification socket %s", m->notify_socket);
132
133 return 0;
134 }
135
136 static int enable_special_signals(Manager *m) {
137 int fd;
138
139 assert(m);
140
141 /* Enable that we get SIGINT on control-alt-del */
142 if (reboot(RB_DISABLE_CAD) < 0)
143 log_warning("Failed to enable ctrl-alt-del handling: %m");
144
145 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC)) < 0)
146 log_warning("Failed to open /dev/tty0: %m");
147 else {
148 /* Enable that we get SIGWINCH on kbrequest */
149 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
150 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
151
152 close_nointr_nofail(fd);
153 }
154
155 return 0;
156 }
157
158 static int manager_setup_signals(Manager *m) {
159 sigset_t mask;
160 struct epoll_event ev;
161 struct sigaction sa;
162
163 assert(m);
164
165 /* We are not interested in SIGSTOP and friends. */
166 zero(sa);
167 sa.sa_handler = SIG_DFL;
168 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
169 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
170
171 assert_se(sigemptyset(&mask) == 0);
172
173 sigset_add_many(&mask,
174 SIGCHLD, /* Child died */
175 SIGTERM, /* Reexecute daemon */
176 SIGHUP, /* Reload configuration */
177 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
178 SIGUSR2, /* systemd: dump status */
179 SIGINT, /* Kernel sends us this on control-alt-del */
180 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
181 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
182 SIGRTMIN+0, /* systemd: start default.target */
183 SIGRTMIN+1, /* systemd: isolate rescue.target */
184 SIGRTMIN+2, /* systemd: isolate emergency.target */
185 SIGRTMIN+3, /* systemd: start halt.target */
186 SIGRTMIN+4, /* systemd: start poweroff.target */
187 SIGRTMIN+5, /* systemd: start reboot.target */
188 SIGRTMIN+6, /* systemd: start kexec.target */
189 SIGRTMIN+13, /* systemd: Immediate halt */
190 SIGRTMIN+14, /* systemd: Immediate poweroff */
191 SIGRTMIN+15, /* systemd: Immediate reboot */
192 SIGRTMIN+16, /* systemd: Immediate kexec */
193 SIGRTMIN+20, /* systemd: enable status messages */
194 SIGRTMIN+21, /* systemd: disable status messages */
195 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
196 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
197 SIGRTMIN+27, /* systemd: set log target to console */
198 SIGRTMIN+28, /* systemd: set log target to kmsg */
199 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg */
200 -1);
201 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
202
203 m->signal_watch.type = WATCH_SIGNAL;
204 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
205 return -errno;
206
207 zero(ev);
208 ev.events = EPOLLIN;
209 ev.data.ptr = &m->signal_watch;
210
211 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
212 return -errno;
213
214 if (m->running_as == MANAGER_SYSTEM)
215 return enable_special_signals(m);
216
217 return 0;
218 }
219
220 int manager_new(ManagerRunningAs running_as, Manager **_m) {
221 Manager *m;
222 int r = -ENOMEM;
223
224 assert(_m);
225 assert(running_as >= 0);
226 assert(running_as < _MANAGER_RUNNING_AS_MAX);
227
228 if (!(m = new0(Manager, 1)))
229 return -ENOMEM;
230
231 dual_timestamp_get(&m->startup_timestamp);
232
233 m->running_as = running_as;
234 m->name_data_slot = m->conn_data_slot = m->subscribed_data_slot = -1;
235 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
236 m->pin_cgroupfs_fd = -1;
237
238 #ifdef HAVE_AUDIT
239 m->audit_fd = -1;
240 #endif
241
242 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
243 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
244
245 if (!(m->environment = strv_copy(environ)))
246 goto fail;
247
248 if (!(m->default_controllers = strv_new("cpu", NULL)))
249 goto fail;
250
251 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
252 goto fail;
253
254 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
255 goto fail;
256
257 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
258 goto fail;
259
260 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
261 goto fail;
262
263 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
264 goto fail;
265
266 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
267 goto fail;
268
269 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
270 goto fail;
271
272 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
273 goto fail;
274
275 if ((r = manager_setup_signals(m)) < 0)
276 goto fail;
277
278 if ((r = manager_setup_cgroup(m)) < 0)
279 goto fail;
280
281 if ((r = manager_setup_notify(m)) < 0)
282 goto fail;
283
284 /* Try to connect to the busses, if possible. */
285 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
286 goto fail;
287
288 #ifdef HAVE_AUDIT
289 if ((m->audit_fd = audit_open()) < 0 &&
290 /* If the kernel lacks netlink or audit support,
291 * don't worry about it. */
292 errno != EAFNOSUPPORT && errno != EPROTONOSUPPORT)
293 log_error("Failed to connect to audit log: %m");
294 #endif
295
296 m->taint_usr = dir_is_empty("/usr") > 0;
297
298 *_m = m;
299 return 0;
300
301 fail:
302 manager_free(m);
303 return r;
304 }
305
306 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
307 Meta *meta;
308 unsigned n = 0;
309
310 assert(m);
311
312 while ((meta = m->cleanup_queue)) {
313 assert(meta->in_cleanup_queue);
314
315 unit_free((Unit*) meta);
316 n++;
317 }
318
319 return n;
320 }
321
322 enum {
323 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
324 GC_OFFSET_UNSURE, /* No clue */
325 GC_OFFSET_GOOD, /* We still need this unit */
326 GC_OFFSET_BAD, /* We don't need this unit anymore */
327 _GC_OFFSET_MAX
328 };
329
330 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
331 Iterator i;
332 Unit *other;
333 bool is_bad;
334
335 assert(u);
336
337 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
338 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
339 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
340 return;
341
342 if (u->meta.in_cleanup_queue)
343 goto bad;
344
345 if (unit_check_gc(u))
346 goto good;
347
348 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
349
350 is_bad = true;
351
352 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
353 unit_gc_sweep(other, gc_marker);
354
355 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
356 goto good;
357
358 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
359 is_bad = false;
360 }
361
362 if (is_bad)
363 goto bad;
364
365 /* We were unable to find anything out about this entry, so
366 * let's investigate it later */
367 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
368 unit_add_to_gc_queue(u);
369 return;
370
371 bad:
372 /* We definitely know that this one is not useful anymore, so
373 * let's mark it for deletion */
374 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
375 unit_add_to_cleanup_queue(u);
376 return;
377
378 good:
379 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
380 }
381
382 static unsigned manager_dispatch_gc_queue(Manager *m) {
383 Meta *meta;
384 unsigned n = 0;
385 unsigned gc_marker;
386
387 assert(m);
388
389 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
390 (m->gc_queue_timestamp <= 0 ||
391 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
392 return 0;
393
394 log_debug("Running GC...");
395
396 m->gc_marker += _GC_OFFSET_MAX;
397 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
398 m->gc_marker = 1;
399
400 gc_marker = m->gc_marker;
401
402 while ((meta = m->gc_queue)) {
403 assert(meta->in_gc_queue);
404
405 unit_gc_sweep((Unit*) meta, gc_marker);
406
407 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
408 meta->in_gc_queue = false;
409
410 n++;
411
412 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
413 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
414 log_debug("Collecting %s", meta->id);
415 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
416 unit_add_to_cleanup_queue((Unit*) meta);
417 }
418 }
419
420 m->n_in_gc_queue = 0;
421 m->gc_queue_timestamp = 0;
422
423 return n;
424 }
425
426 static void manager_clear_jobs_and_units(Manager *m) {
427 Job *j;
428 Unit *u;
429
430 assert(m);
431
432 while ((j = hashmap_first(m->transaction_jobs)))
433 job_free(j);
434
435 while ((u = hashmap_first(m->units)))
436 unit_free(u);
437
438 manager_dispatch_cleanup_queue(m);
439
440 assert(!m->load_queue);
441 assert(!m->run_queue);
442 assert(!m->dbus_unit_queue);
443 assert(!m->dbus_job_queue);
444 assert(!m->cleanup_queue);
445 assert(!m->gc_queue);
446
447 assert(hashmap_isempty(m->transaction_jobs));
448 assert(hashmap_isempty(m->jobs));
449 assert(hashmap_isempty(m->units));
450 }
451
452 void manager_free(Manager *m) {
453 UnitType c;
454
455 assert(m);
456
457 manager_clear_jobs_and_units(m);
458
459 for (c = 0; c < _UNIT_TYPE_MAX; c++)
460 if (unit_vtable[c]->shutdown)
461 unit_vtable[c]->shutdown(m);
462
463 /* If we reexecute ourselves, we keep the root cgroup
464 * around */
465 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
466
467 manager_undo_generators(m);
468
469 bus_done(m);
470
471 hashmap_free(m->units);
472 hashmap_free(m->jobs);
473 hashmap_free(m->transaction_jobs);
474 hashmap_free(m->watch_pids);
475 hashmap_free(m->watch_bus);
476
477 if (m->epoll_fd >= 0)
478 close_nointr_nofail(m->epoll_fd);
479 if (m->signal_watch.fd >= 0)
480 close_nointr_nofail(m->signal_watch.fd);
481 if (m->notify_watch.fd >= 0)
482 close_nointr_nofail(m->notify_watch.fd);
483
484 #ifdef HAVE_AUDIT
485 if (m->audit_fd >= 0)
486 audit_close(m->audit_fd);
487 #endif
488
489 free(m->notify_socket);
490
491 lookup_paths_free(&m->lookup_paths);
492 strv_free(m->environment);
493
494 strv_free(m->default_controllers);
495
496 hashmap_free(m->cgroup_bondings);
497 set_free_free(m->unit_path_cache);
498
499 free(m);
500 }
501
502 int manager_enumerate(Manager *m) {
503 int r = 0, q;
504 UnitType c;
505
506 assert(m);
507
508 /* Let's ask every type to load all units from disk/kernel
509 * that it might know */
510 for (c = 0; c < _UNIT_TYPE_MAX; c++)
511 if (unit_vtable[c]->enumerate)
512 if ((q = unit_vtable[c]->enumerate(m)) < 0)
513 r = q;
514
515 manager_dispatch_load_queue(m);
516 return r;
517 }
518
519 int manager_coldplug(Manager *m) {
520 int r = 0, q;
521 Iterator i;
522 Unit *u;
523 char *k;
524
525 assert(m);
526
527 /* Then, let's set up their initial state. */
528 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
529
530 /* ignore aliases */
531 if (u->meta.id != k)
532 continue;
533
534 if ((q = unit_coldplug(u)) < 0)
535 r = q;
536 }
537
538 return r;
539 }
540
541 static void manager_build_unit_path_cache(Manager *m) {
542 char **i;
543 DIR *d = NULL;
544 int r;
545
546 assert(m);
547
548 set_free_free(m->unit_path_cache);
549
550 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
551 log_error("Failed to allocate unit path cache.");
552 return;
553 }
554
555 /* This simply builds a list of files we know exist, so that
556 * we don't always have to go to disk */
557
558 STRV_FOREACH(i, m->lookup_paths.unit_path) {
559 struct dirent *de;
560
561 if (!(d = opendir(*i))) {
562 log_error("Failed to open directory: %m");
563 continue;
564 }
565
566 while ((de = readdir(d))) {
567 char *p;
568
569 if (ignore_file(de->d_name))
570 continue;
571
572 p = join(streq(*i, "/") ? "" : *i, "/", de->d_name, NULL);
573 if (!p) {
574 r = -ENOMEM;
575 goto fail;
576 }
577
578 if ((r = set_put(m->unit_path_cache, p)) < 0) {
579 free(p);
580 goto fail;
581 }
582 }
583
584 closedir(d);
585 d = NULL;
586 }
587
588 return;
589
590 fail:
591 log_error("Failed to build unit path cache: %s", strerror(-r));
592
593 set_free_free(m->unit_path_cache);
594 m->unit_path_cache = NULL;
595
596 if (d)
597 closedir(d);
598 }
599
600 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
601 int r, q;
602
603 assert(m);
604
605 manager_run_generators(m);
606
607 manager_build_unit_path_cache(m);
608
609 /* If we will deserialize make sure that during enumeration
610 * this is already known, so we increase the counter here
611 * already */
612 if (serialization)
613 m->n_reloading ++;
614
615 /* First, enumerate what we can from all config files */
616 r = manager_enumerate(m);
617
618 /* Second, deserialize if there is something to deserialize */
619 if (serialization)
620 if ((q = manager_deserialize(m, serialization, fds)) < 0)
621 r = q;
622
623 /* Third, fire things up! */
624 if ((q = manager_coldplug(m)) < 0)
625 r = q;
626
627 if (serialization) {
628 assert(m->n_reloading > 0);
629 m->n_reloading --;
630 }
631
632 return r;
633 }
634
635 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
636 assert(m);
637 assert(j);
638
639 /* Deletes one job from the transaction */
640
641 manager_transaction_unlink_job(m, j, delete_dependencies);
642
643 if (!j->installed)
644 job_free(j);
645 }
646
647 static void transaction_delete_unit(Manager *m, Unit *u) {
648 Job *j;
649
650 /* Deletes all jobs associated with a certain unit from the
651 * transaction */
652
653 while ((j = hashmap_get(m->transaction_jobs, u)))
654 transaction_delete_job(m, j, true);
655 }
656
657 static void transaction_clean_dependencies(Manager *m) {
658 Iterator i;
659 Job *j;
660
661 assert(m);
662
663 /* Drops all dependencies of all installed jobs */
664
665 HASHMAP_FOREACH(j, m->jobs, i) {
666 while (j->subject_list)
667 job_dependency_free(j->subject_list);
668 while (j->object_list)
669 job_dependency_free(j->object_list);
670 }
671
672 assert(!m->transaction_anchor);
673 }
674
675 static void transaction_abort(Manager *m) {
676 Job *j;
677
678 assert(m);
679
680 while ((j = hashmap_first(m->transaction_jobs)))
681 if (j->installed)
682 transaction_delete_job(m, j, true);
683 else
684 job_free(j);
685
686 assert(hashmap_isempty(m->transaction_jobs));
687
688 transaction_clean_dependencies(m);
689 }
690
691 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
692 JobDependency *l;
693
694 assert(m);
695
696 /* A recursive sweep through the graph that marks all units
697 * that matter to the anchor job, i.e. are directly or
698 * indirectly a dependency of the anchor job via paths that
699 * are fully marked as mattering. */
700
701 if (j)
702 l = j->subject_list;
703 else
704 l = m->transaction_anchor;
705
706 LIST_FOREACH(subject, l, l) {
707
708 /* This link does not matter */
709 if (!l->matters)
710 continue;
711
712 /* This unit has already been marked */
713 if (l->object->generation == generation)
714 continue;
715
716 l->object->matters_to_anchor = true;
717 l->object->generation = generation;
718
719 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
720 }
721 }
722
723 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
724 JobDependency *l, *last;
725
726 assert(j);
727 assert(other);
728 assert(j->unit == other->unit);
729 assert(!j->installed);
730
731 /* Merges 'other' into 'j' and then deletes j. */
732
733 j->type = t;
734 j->state = JOB_WAITING;
735 j->override = j->override || other->override;
736
737 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
738
739 /* Patch us in as new owner of the JobDependency objects */
740 last = NULL;
741 LIST_FOREACH(subject, l, other->subject_list) {
742 assert(l->subject == other);
743 l->subject = j;
744 last = l;
745 }
746
747 /* Merge both lists */
748 if (last) {
749 last->subject_next = j->subject_list;
750 if (j->subject_list)
751 j->subject_list->subject_prev = last;
752 j->subject_list = other->subject_list;
753 }
754
755 /* Patch us in as new owner of the JobDependency objects */
756 last = NULL;
757 LIST_FOREACH(object, l, other->object_list) {
758 assert(l->object == other);
759 l->object = j;
760 last = l;
761 }
762
763 /* Merge both lists */
764 if (last) {
765 last->object_next = j->object_list;
766 if (j->object_list)
767 j->object_list->object_prev = last;
768 j->object_list = other->object_list;
769 }
770
771 /* Kill the other job */
772 other->subject_list = NULL;
773 other->object_list = NULL;
774 transaction_delete_job(m, other, true);
775 }
776 static bool job_is_conflicted_by(Job *j) {
777 JobDependency *l;
778
779 assert(j);
780
781 /* Returns true if this job is pulled in by a least one
782 * ConflictedBy dependency. */
783
784 LIST_FOREACH(object, l, j->object_list)
785 if (l->conflicts)
786 return true;
787
788 return false;
789 }
790
791 static int delete_one_unmergeable_job(Manager *m, Job *j) {
792 Job *k;
793
794 assert(j);
795
796 /* Tries to delete one item in the linked list
797 * j->transaction_next->transaction_next->... that conflicts
798 * with another one, in an attempt to make an inconsistent
799 * transaction work. */
800
801 /* We rely here on the fact that if a merged with b does not
802 * merge with c, either a or b merge with c neither */
803 LIST_FOREACH(transaction, j, j)
804 LIST_FOREACH(transaction, k, j->transaction_next) {
805 Job *d;
806
807 /* Is this one mergeable? Then skip it */
808 if (job_type_is_mergeable(j->type, k->type))
809 continue;
810
811 /* Ok, we found two that conflict, let's see if we can
812 * drop one of them */
813 if (!j->matters_to_anchor && !k->matters_to_anchor) {
814
815 /* Both jobs don't matter, so let's
816 * find the one that is smarter to
817 * remove. Let's think positive and
818 * rather remove stops then starts --
819 * except if something is being
820 * stopped because it is conflicted by
821 * another unit in which case we
822 * rather remove the start. */
823
824 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
825 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
826
827 if (j->type == JOB_STOP) {
828
829 if (job_is_conflicted_by(j))
830 d = k;
831 else
832 d = j;
833
834 } else if (k->type == JOB_STOP) {
835
836 if (job_is_conflicted_by(k))
837 d = j;
838 else
839 d = k;
840 } else
841 d = j;
842
843 } else if (!j->matters_to_anchor)
844 d = j;
845 else if (!k->matters_to_anchor)
846 d = k;
847 else
848 return -ENOEXEC;
849
850 /* Ok, we can drop one, so let's do so. */
851 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
852 transaction_delete_job(m, d, true);
853 return 0;
854 }
855
856 return -EINVAL;
857 }
858
859 static int transaction_merge_jobs(Manager *m, DBusError *e) {
860 Job *j;
861 Iterator i;
862 int r;
863
864 assert(m);
865
866 /* First step, check whether any of the jobs for one specific
867 * task conflict. If so, try to drop one of them. */
868 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
869 JobType t;
870 Job *k;
871
872 t = j->type;
873 LIST_FOREACH(transaction, k, j->transaction_next) {
874 if (job_type_merge(&t, k->type) >= 0)
875 continue;
876
877 /* OK, we could not merge all jobs for this
878 * action. Let's see if we can get rid of one
879 * of them */
880
881 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
882 /* Ok, we managed to drop one, now
883 * let's ask our callers to call us
884 * again after garbage collecting */
885 return -EAGAIN;
886
887 /* We couldn't merge anything. Failure */
888 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
889 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
890 return r;
891 }
892 }
893
894 /* Second step, merge the jobs. */
895 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
896 JobType t = j->type;
897 Job *k;
898
899 /* Merge all transactions */
900 LIST_FOREACH(transaction, k, j->transaction_next)
901 assert_se(job_type_merge(&t, k->type) == 0);
902
903 /* If an active job is mergeable, merge it too */
904 if (j->unit->meta.job)
905 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
906
907 while ((k = j->transaction_next)) {
908 if (j->installed) {
909 transaction_merge_and_delete_job(m, k, j, t);
910 j = k;
911 } else
912 transaction_merge_and_delete_job(m, j, k, t);
913 }
914
915 if (j->unit->meta.job && !j->installed)
916 transaction_merge_and_delete_job(m, j, j->unit->meta.job, t);
917
918 assert(!j->transaction_next);
919 assert(!j->transaction_prev);
920 }
921
922 return 0;
923 }
924
925 static void transaction_drop_redundant(Manager *m) {
926 bool again;
927
928 assert(m);
929
930 /* Goes through the transaction and removes all jobs that are
931 * a noop */
932
933 do {
934 Job *j;
935 Iterator i;
936
937 again = false;
938
939 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
940 bool changes_something = false;
941 Job *k;
942
943 LIST_FOREACH(transaction, k, j) {
944
945 if (!job_is_anchor(k) &&
946 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
947 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
948 continue;
949
950 changes_something = true;
951 break;
952 }
953
954 if (changes_something)
955 continue;
956
957 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
958 transaction_delete_job(m, j, false);
959 again = true;
960 break;
961 }
962
963 } while (again);
964 }
965
966 static bool unit_matters_to_anchor(Unit *u, Job *j) {
967 assert(u);
968 assert(!j->transaction_prev);
969
970 /* Checks whether at least one of the jobs for this unit
971 * matters to the anchor. */
972
973 LIST_FOREACH(transaction, j, j)
974 if (j->matters_to_anchor)
975 return true;
976
977 return false;
978 }
979
980 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
981 Iterator i;
982 Unit *u;
983 int r;
984
985 assert(m);
986 assert(j);
987 assert(!j->transaction_prev);
988
989 /* Does a recursive sweep through the ordering graph, looking
990 * for a cycle. If we find cycle we try to break it. */
991
992 /* Have we seen this before? */
993 if (j->generation == generation) {
994 Job *k, *delete;
995
996 /* If the marker is NULL we have been here already and
997 * decided the job was loop-free from here. Hence
998 * shortcut things and return right-away. */
999 if (!j->marker)
1000 return 0;
1001
1002 /* So, the marker is not NULL and we already have been
1003 * here. We have a cycle. Let's try to break it. We go
1004 * backwards in our path and try to find a suitable
1005 * job to remove. We use the marker to find our way
1006 * back, since smart how we are we stored our way back
1007 * in there. */
1008 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
1009
1010 delete = NULL;
1011 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
1012
1013 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
1014
1015 if (!delete &&
1016 !k->installed &&
1017 !unit_matters_to_anchor(k->unit, k)) {
1018 /* Ok, we can drop this one, so let's
1019 * do so. */
1020 delete = k;
1021 }
1022
1023 /* Check if this in fact was the beginning of
1024 * the cycle */
1025 if (k == j)
1026 break;
1027 }
1028
1029
1030 if (delete) {
1031 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1032 transaction_delete_unit(m, delete->unit);
1033 return -EAGAIN;
1034 }
1035
1036 log_error("Unable to break cycle");
1037
1038 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1039 return -ENOEXEC;
1040 }
1041
1042 /* Make the marker point to where we come from, so that we can
1043 * find our way backwards if we want to break a cycle. We use
1044 * a special marker for the beginning: we point to
1045 * ourselves. */
1046 j->marker = from ? from : j;
1047 j->generation = generation;
1048
1049 /* We assume that the the dependencies are bidirectional, and
1050 * hence can ignore UNIT_AFTER */
1051 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1052 Job *o;
1053
1054 /* Is there a job for this unit? */
1055 if (!(o = hashmap_get(m->transaction_jobs, u)))
1056
1057 /* Ok, there is no job for this in the
1058 * transaction, but maybe there is already one
1059 * running? */
1060 if (!(o = u->meta.job))
1061 continue;
1062
1063 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1064 return r;
1065 }
1066
1067 /* Ok, let's backtrack, and remember that this entry is not on
1068 * our path anymore. */
1069 j->marker = NULL;
1070
1071 return 0;
1072 }
1073
1074 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1075 Job *j;
1076 int r;
1077 Iterator i;
1078 unsigned g;
1079
1080 assert(m);
1081 assert(generation);
1082
1083 /* Check if the ordering graph is cyclic. If it is, try to fix
1084 * that up by dropping one of the jobs. */
1085
1086 g = (*generation)++;
1087
1088 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1089 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1090 return r;
1091
1092 return 0;
1093 }
1094
1095 static void transaction_collect_garbage(Manager *m) {
1096 bool again;
1097
1098 assert(m);
1099
1100 /* Drop jobs that are not required by any other job */
1101
1102 do {
1103 Iterator i;
1104 Job *j;
1105
1106 again = false;
1107
1108 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1109 if (j->object_list) {
1110 /* log_debug("Keeping job %s/%s because of %s/%s", */
1111 /* j->unit->meta.id, job_type_to_string(j->type), */
1112 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1113 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1114 continue;
1115 }
1116
1117 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1118 transaction_delete_job(m, j, true);
1119 again = true;
1120 break;
1121 }
1122
1123 } while (again);
1124 }
1125
1126 static int transaction_is_destructive(Manager *m, DBusError *e) {
1127 Iterator i;
1128 Job *j;
1129
1130 assert(m);
1131
1132 /* Checks whether applying this transaction means that
1133 * existing jobs would be replaced */
1134
1135 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1136
1137 /* Assume merged */
1138 assert(!j->transaction_prev);
1139 assert(!j->transaction_next);
1140
1141 if (j->unit->meta.job &&
1142 j->unit->meta.job != j &&
1143 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1144
1145 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1146 return -EEXIST;
1147 }
1148 }
1149
1150 return 0;
1151 }
1152
1153 static void transaction_minimize_impact(Manager *m) {
1154 bool again;
1155 assert(m);
1156
1157 /* Drops all unnecessary jobs that reverse already active jobs
1158 * or that stop a running service. */
1159
1160 do {
1161 Job *j;
1162 Iterator i;
1163
1164 again = false;
1165
1166 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1167 LIST_FOREACH(transaction, j, j) {
1168 bool stops_running_service, changes_existing_job;
1169
1170 /* If it matters, we shouldn't drop it */
1171 if (j->matters_to_anchor)
1172 continue;
1173
1174 /* Would this stop a running service?
1175 * Would this change an existing job?
1176 * If so, let's drop this entry */
1177
1178 stops_running_service =
1179 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1180
1181 changes_existing_job =
1182 j->unit->meta.job &&
1183 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1184
1185 if (!stops_running_service && !changes_existing_job)
1186 continue;
1187
1188 if (stops_running_service)
1189 log_debug("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1190
1191 if (changes_existing_job)
1192 log_debug("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1193
1194 /* Ok, let's get rid of this */
1195 log_debug("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1196
1197 transaction_delete_job(m, j, true);
1198 again = true;
1199 break;
1200 }
1201
1202 if (again)
1203 break;
1204 }
1205
1206 } while (again);
1207 }
1208
1209 static int transaction_apply(Manager *m, JobMode mode) {
1210 Iterator i;
1211 Job *j;
1212 int r;
1213
1214 /* Moves the transaction jobs to the set of active jobs */
1215
1216 if (mode == JOB_ISOLATE) {
1217
1218 /* When isolating first kill all installed jobs which
1219 * aren't part of the new transaction */
1220 rescan:
1221 HASHMAP_FOREACH(j, m->jobs, i) {
1222 assert(j->installed);
1223
1224 if (hashmap_get(m->transaction_jobs, j->unit))
1225 continue;
1226
1227 /* 'j' itself is safe to remove, but if other jobs
1228 are invalidated recursively, our iterator may become
1229 invalid and we need to start over. */
1230 if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
1231 goto rescan;
1232 }
1233 }
1234
1235 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1236 /* Assume merged */
1237 assert(!j->transaction_prev);
1238 assert(!j->transaction_next);
1239
1240 if (j->installed)
1241 continue;
1242
1243 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1244 goto rollback;
1245 }
1246
1247 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1248 if (j->installed) {
1249 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1250 continue;
1251 }
1252
1253 if (j->unit->meta.job)
1254 job_free(j->unit->meta.job);
1255
1256 j->unit->meta.job = j;
1257 j->installed = true;
1258 m->n_installed_jobs ++;
1259
1260 /* We're fully installed. Now let's free data we don't
1261 * need anymore. */
1262
1263 assert(!j->transaction_next);
1264 assert(!j->transaction_prev);
1265
1266 job_add_to_run_queue(j);
1267 job_add_to_dbus_queue(j);
1268 job_start_timer(j);
1269
1270 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1271 }
1272
1273 /* As last step, kill all remaining job dependencies. */
1274 transaction_clean_dependencies(m);
1275
1276 return 0;
1277
1278 rollback:
1279
1280 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1281 if (j->installed)
1282 continue;
1283
1284 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1285 }
1286
1287 return r;
1288 }
1289
1290 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1291 int r;
1292 unsigned generation = 1;
1293
1294 assert(m);
1295
1296 /* This applies the changes recorded in transaction_jobs to
1297 * the actual list of jobs, if possible. */
1298
1299 /* First step: figure out which jobs matter */
1300 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1301
1302 /* Second step: Try not to stop any running services if
1303 * we don't have to. Don't try to reverse running
1304 * jobs if we don't have to. */
1305 if (mode == JOB_FAIL)
1306 transaction_minimize_impact(m);
1307
1308 /* Third step: Drop redundant jobs */
1309 transaction_drop_redundant(m);
1310
1311 for (;;) {
1312 /* Fourth step: Let's remove unneeded jobs that might
1313 * be lurking. */
1314 if (mode != JOB_ISOLATE)
1315 transaction_collect_garbage(m);
1316
1317 /* Fifth step: verify order makes sense and correct
1318 * cycles if necessary and possible */
1319 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1320 break;
1321
1322 if (r != -EAGAIN) {
1323 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1324 goto rollback;
1325 }
1326
1327 /* Let's see if the resulting transaction ordering
1328 * graph is still cyclic... */
1329 }
1330
1331 for (;;) {
1332 /* Sixth step: let's drop unmergeable entries if
1333 * necessary and possible, merge entries we can
1334 * merge */
1335 if ((r = transaction_merge_jobs(m, e)) >= 0)
1336 break;
1337
1338 if (r != -EAGAIN) {
1339 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1340 goto rollback;
1341 }
1342
1343 /* Seventh step: an entry got dropped, let's garbage
1344 * collect its dependencies. */
1345 if (mode != JOB_ISOLATE)
1346 transaction_collect_garbage(m);
1347
1348 /* Let's see if the resulting transaction still has
1349 * unmergeable entries ... */
1350 }
1351
1352 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1353 transaction_drop_redundant(m);
1354
1355 /* Ninth step: check whether we can actually apply this */
1356 if (mode == JOB_FAIL)
1357 if ((r = transaction_is_destructive(m, e)) < 0) {
1358 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1359 goto rollback;
1360 }
1361
1362 /* Tenth step: apply changes */
1363 if ((r = transaction_apply(m, mode)) < 0) {
1364 log_warning("Failed to apply transaction: %s", strerror(-r));
1365 goto rollback;
1366 }
1367
1368 assert(hashmap_isempty(m->transaction_jobs));
1369 assert(!m->transaction_anchor);
1370
1371 return 0;
1372
1373 rollback:
1374 transaction_abort(m);
1375 return r;
1376 }
1377
1378 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1379 Job *j, *f;
1380
1381 assert(m);
1382 assert(unit);
1383
1384 /* Looks for an existing prospective job and returns that. If
1385 * it doesn't exist it is created and added to the prospective
1386 * jobs list. */
1387
1388 f = hashmap_get(m->transaction_jobs, unit);
1389
1390 LIST_FOREACH(transaction, j, f) {
1391 assert(j->unit == unit);
1392
1393 if (j->type == type) {
1394 if (is_new)
1395 *is_new = false;
1396 return j;
1397 }
1398 }
1399
1400 if (unit->meta.job && unit->meta.job->type == type)
1401 j = unit->meta.job;
1402 else if (!(j = job_new(m, type, unit)))
1403 return NULL;
1404
1405 j->generation = 0;
1406 j->marker = NULL;
1407 j->matters_to_anchor = false;
1408 j->override = override;
1409
1410 LIST_PREPEND(Job, transaction, f, j);
1411
1412 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1413 job_free(j);
1414 return NULL;
1415 }
1416
1417 if (is_new)
1418 *is_new = true;
1419
1420 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1421
1422 return j;
1423 }
1424
1425 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1426 assert(m);
1427 assert(j);
1428
1429 if (j->transaction_prev)
1430 j->transaction_prev->transaction_next = j->transaction_next;
1431 else if (j->transaction_next)
1432 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1433 else
1434 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1435
1436 if (j->transaction_next)
1437 j->transaction_next->transaction_prev = j->transaction_prev;
1438
1439 j->transaction_prev = j->transaction_next = NULL;
1440
1441 while (j->subject_list)
1442 job_dependency_free(j->subject_list);
1443
1444 while (j->object_list) {
1445 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1446
1447 job_dependency_free(j->object_list);
1448
1449 if (other && delete_dependencies) {
1450 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1451 other->unit->meta.id, job_type_to_string(other->type),
1452 j->unit->meta.id, job_type_to_string(j->type));
1453 transaction_delete_job(m, other, delete_dependencies);
1454 }
1455 }
1456 }
1457
1458 static int transaction_add_job_and_dependencies(
1459 Manager *m,
1460 JobType type,
1461 Unit *unit,
1462 Job *by,
1463 bool matters,
1464 bool override,
1465 bool conflicts,
1466 bool ignore_requirements,
1467 bool ignore_order,
1468 DBusError *e,
1469 Job **_ret) {
1470 Job *ret;
1471 Iterator i;
1472 Unit *dep;
1473 int r;
1474 bool is_new;
1475
1476 assert(m);
1477 assert(type < _JOB_TYPE_MAX);
1478 assert(unit);
1479
1480 /* log_debug("Pulling in %s/%s from %s/%s", */
1481 /* unit->meta.id, job_type_to_string(type), */
1482 /* by ? by->unit->meta.id : "NA", */
1483 /* by ? job_type_to_string(by->type) : "NA"); */
1484
1485 if (unit->meta.load_state != UNIT_LOADED &&
1486 unit->meta.load_state != UNIT_ERROR &&
1487 unit->meta.load_state != UNIT_MASKED) {
1488 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1489 return -EINVAL;
1490 }
1491
1492 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1493 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1494 "Unit %s failed to load: %s. "
1495 "See system logs and 'systemctl status %s' for details.",
1496 unit->meta.id,
1497 strerror(-unit->meta.load_error),
1498 unit->meta.id);
1499 return -EINVAL;
1500 }
1501
1502 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1503 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1504 return -EINVAL;
1505 }
1506
1507 if (!unit_job_is_applicable(unit, type)) {
1508 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1509 return -EBADR;
1510 }
1511
1512 /* First add the job. */
1513 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1514 return -ENOMEM;
1515
1516 ret->ignore_order = ret->ignore_order || ignore_order;
1517
1518 /* Then, add a link to the job. */
1519 if (!job_dependency_new(by, ret, matters, conflicts))
1520 return -ENOMEM;
1521
1522 if (is_new && !ignore_requirements) {
1523 Set *following;
1524
1525 /* If we are following some other unit, make sure we
1526 * add all dependencies of everybody following. */
1527 if (unit_following_set(ret->unit, &following) > 0) {
1528 SET_FOREACH(dep, following, i)
1529 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1530 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1531
1532 if (e)
1533 dbus_error_free(e);
1534 }
1535
1536 set_free(following);
1537 }
1538
1539 /* Finally, recursively add in all dependencies. */
1540 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1541 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1542 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1543 if (r != -EBADR)
1544 goto fail;
1545
1546 if (e)
1547 dbus_error_free(e);
1548 }
1549
1550 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1551 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1552
1553 if (r != -EBADR)
1554 goto fail;
1555
1556 if (e)
1557 dbus_error_free(e);
1558 }
1559
1560 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1561 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1562 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1563
1564 if (e)
1565 dbus_error_free(e);
1566 }
1567
1568 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1569 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
1570 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1571
1572 if (e)
1573 dbus_error_free(e);
1574 }
1575
1576 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1577 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1578
1579 if (r != -EBADR)
1580 goto fail;
1581
1582 if (e)
1583 dbus_error_free(e);
1584 }
1585
1586 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1587 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
1588 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1589
1590 if (e)
1591 dbus_error_free(e);
1592 }
1593
1594 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1595 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
1596
1597 if (r != -EBADR)
1598 goto fail;
1599
1600 if (e)
1601 dbus_error_free(e);
1602 }
1603
1604 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1605 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
1606 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1607
1608 if (e)
1609 dbus_error_free(e);
1610 }
1611
1612 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1613
1614 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1615 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1616
1617 if (r != -EBADR)
1618 goto fail;
1619
1620 if (e)
1621 dbus_error_free(e);
1622 }
1623
1624 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1625 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
1626
1627 if (r != -EBADR)
1628 goto fail;
1629
1630 if (e)
1631 dbus_error_free(e);
1632 }
1633 }
1634
1635 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1636 }
1637
1638 if (_ret)
1639 *_ret = ret;
1640
1641 return 0;
1642
1643 fail:
1644 return r;
1645 }
1646
1647 static int transaction_add_isolate_jobs(Manager *m) {
1648 Iterator i;
1649 Unit *u;
1650 char *k;
1651 int r;
1652
1653 assert(m);
1654
1655 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1656
1657 /* ignore aliases */
1658 if (u->meta.id != k)
1659 continue;
1660
1661 if (u->meta.ignore_on_isolate)
1662 continue;
1663
1664 /* No need to stop inactive jobs */
1665 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1666 continue;
1667
1668 /* Is there already something listed for this? */
1669 if (hashmap_get(m->transaction_jobs, u))
1670 continue;
1671
1672 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
1673 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1674 }
1675
1676 return 0;
1677 }
1678
1679 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1680 int r;
1681 Job *ret;
1682
1683 assert(m);
1684 assert(type < _JOB_TYPE_MAX);
1685 assert(unit);
1686 assert(mode < _JOB_MODE_MAX);
1687
1688 if (mode == JOB_ISOLATE && type != JOB_START) {
1689 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1690 return -EINVAL;
1691 }
1692
1693 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1694 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1695 return -EPERM;
1696 }
1697
1698 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1699
1700 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
1701 mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
1702 mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1703 transaction_abort(m);
1704 return r;
1705 }
1706
1707 if (mode == JOB_ISOLATE)
1708 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1709 transaction_abort(m);
1710 return r;
1711 }
1712
1713 if ((r = transaction_activate(m, mode, e)) < 0)
1714 return r;
1715
1716 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1717
1718 if (_ret)
1719 *_ret = ret;
1720
1721 return 0;
1722 }
1723
1724 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1725 Unit *unit;
1726 int r;
1727
1728 assert(m);
1729 assert(type < _JOB_TYPE_MAX);
1730 assert(name);
1731 assert(mode < _JOB_MODE_MAX);
1732
1733 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1734 return r;
1735
1736 return manager_add_job(m, type, unit, mode, override, e, _ret);
1737 }
1738
1739 Job *manager_get_job(Manager *m, uint32_t id) {
1740 assert(m);
1741
1742 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1743 }
1744
1745 Unit *manager_get_unit(Manager *m, const char *name) {
1746 assert(m);
1747 assert(name);
1748
1749 return hashmap_get(m->units, name);
1750 }
1751
1752 unsigned manager_dispatch_load_queue(Manager *m) {
1753 Meta *meta;
1754 unsigned n = 0;
1755
1756 assert(m);
1757
1758 /* Make sure we are not run recursively */
1759 if (m->dispatching_load_queue)
1760 return 0;
1761
1762 m->dispatching_load_queue = true;
1763
1764 /* Dispatches the load queue. Takes a unit from the queue and
1765 * tries to load its data until the queue is empty */
1766
1767 while ((meta = m->load_queue)) {
1768 assert(meta->in_load_queue);
1769
1770 unit_load((Unit*) meta);
1771 n++;
1772 }
1773
1774 m->dispatching_load_queue = false;
1775 return n;
1776 }
1777
1778 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1779 Unit *ret;
1780 int r;
1781
1782 assert(m);
1783 assert(name || path);
1784
1785 /* This will prepare the unit for loading, but not actually
1786 * load anything from disk. */
1787
1788 if (path && !is_path(path)) {
1789 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1790 return -EINVAL;
1791 }
1792
1793 if (!name)
1794 name = file_name_from_path(path);
1795
1796 if (!unit_name_is_valid(name, false)) {
1797 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1798 return -EINVAL;
1799 }
1800
1801 if ((ret = manager_get_unit(m, name))) {
1802 *_ret = ret;
1803 return 1;
1804 }
1805
1806 if (!(ret = unit_new(m)))
1807 return -ENOMEM;
1808
1809 if (path)
1810 if (!(ret->meta.fragment_path = strdup(path))) {
1811 unit_free(ret);
1812 return -ENOMEM;
1813 }
1814
1815 if ((r = unit_add_name(ret, name)) < 0) {
1816 unit_free(ret);
1817 return r;
1818 }
1819
1820 unit_add_to_load_queue(ret);
1821 unit_add_to_dbus_queue(ret);
1822 unit_add_to_gc_queue(ret);
1823
1824 if (_ret)
1825 *_ret = ret;
1826
1827 return 0;
1828 }
1829
1830 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1831 int r;
1832
1833 assert(m);
1834
1835 /* This will load the service information files, but not actually
1836 * start any services or anything. */
1837
1838 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1839 return r;
1840
1841 manager_dispatch_load_queue(m);
1842
1843 if (_ret)
1844 *_ret = unit_follow_merge(*_ret);
1845
1846 return 0;
1847 }
1848
1849 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1850 Iterator i;
1851 Job *j;
1852
1853 assert(s);
1854 assert(f);
1855
1856 HASHMAP_FOREACH(j, s->jobs, i)
1857 job_dump(j, f, prefix);
1858 }
1859
1860 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1861 Iterator i;
1862 Unit *u;
1863 const char *t;
1864
1865 assert(s);
1866 assert(f);
1867
1868 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1869 if (u->meta.id == t)
1870 unit_dump(u, f, prefix);
1871 }
1872
1873 void manager_clear_jobs(Manager *m) {
1874 Job *j;
1875
1876 assert(m);
1877
1878 transaction_abort(m);
1879
1880 while ((j = hashmap_first(m->jobs)))
1881 job_finish_and_invalidate(j, JOB_CANCELED);
1882 }
1883
1884 unsigned manager_dispatch_run_queue(Manager *m) {
1885 Job *j;
1886 unsigned n = 0;
1887
1888 if (m->dispatching_run_queue)
1889 return 0;
1890
1891 m->dispatching_run_queue = true;
1892
1893 while ((j = m->run_queue)) {
1894 assert(j->installed);
1895 assert(j->in_run_queue);
1896
1897 job_run_and_invalidate(j);
1898 n++;
1899 }
1900
1901 m->dispatching_run_queue = false;
1902 return n;
1903 }
1904
1905 unsigned manager_dispatch_dbus_queue(Manager *m) {
1906 Job *j;
1907 Meta *meta;
1908 unsigned n = 0;
1909
1910 assert(m);
1911
1912 if (m->dispatching_dbus_queue)
1913 return 0;
1914
1915 m->dispatching_dbus_queue = true;
1916
1917 while ((meta = m->dbus_unit_queue)) {
1918 assert(meta->in_dbus_queue);
1919
1920 bus_unit_send_change_signal((Unit*) meta);
1921 n++;
1922 }
1923
1924 while ((j = m->dbus_job_queue)) {
1925 assert(j->in_dbus_queue);
1926
1927 bus_job_send_change_signal(j);
1928 n++;
1929 }
1930
1931 m->dispatching_dbus_queue = false;
1932 return n;
1933 }
1934
1935 static int manager_process_notify_fd(Manager *m) {
1936 ssize_t n;
1937
1938 assert(m);
1939
1940 for (;;) {
1941 char buf[4096];
1942 struct msghdr msghdr;
1943 struct iovec iovec;
1944 struct ucred *ucred;
1945 union {
1946 struct cmsghdr cmsghdr;
1947 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1948 } control;
1949 Unit *u;
1950 char **tags;
1951
1952 zero(iovec);
1953 iovec.iov_base = buf;
1954 iovec.iov_len = sizeof(buf)-1;
1955
1956 zero(control);
1957 zero(msghdr);
1958 msghdr.msg_iov = &iovec;
1959 msghdr.msg_iovlen = 1;
1960 msghdr.msg_control = &control;
1961 msghdr.msg_controllen = sizeof(control);
1962
1963 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1964 if (n >= 0)
1965 return -EIO;
1966
1967 if (errno == EAGAIN || errno == EINTR)
1968 break;
1969
1970 return -errno;
1971 }
1972
1973 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1974 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1975 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1976 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1977 log_warning("Received notify message without credentials. Ignoring.");
1978 continue;
1979 }
1980
1981 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1982
1983 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1984 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1985 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1986 continue;
1987 }
1988
1989 assert((size_t) n < sizeof(buf));
1990 buf[n] = 0;
1991 if (!(tags = strv_split(buf, "\n\r")))
1992 return -ENOMEM;
1993
1994 log_debug("Got notification message for unit %s", u->meta.id);
1995
1996 if (UNIT_VTABLE(u)->notify_message)
1997 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1998
1999 strv_free(tags);
2000 }
2001
2002 return 0;
2003 }
2004
2005 static int manager_dispatch_sigchld(Manager *m) {
2006 assert(m);
2007
2008 for (;;) {
2009 siginfo_t si;
2010 Unit *u;
2011 int r;
2012
2013 zero(si);
2014
2015 /* First we call waitd() for a PID and do not reap the
2016 * zombie. That way we can still access /proc/$PID for
2017 * it while it is a zombie. */
2018 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2019
2020 if (errno == ECHILD)
2021 break;
2022
2023 if (errno == EINTR)
2024 continue;
2025
2026 return -errno;
2027 }
2028
2029 if (si.si_pid <= 0)
2030 break;
2031
2032 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
2033 char *name = NULL;
2034
2035 get_process_comm(si.si_pid, &name);
2036 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
2037 free(name);
2038 }
2039
2040 /* Let's flush any message the dying child might still
2041 * have queued for us. This ensures that the process
2042 * still exists in /proc so that we can figure out
2043 * which cgroup and hence unit it belongs to. */
2044 if ((r = manager_process_notify_fd(m)) < 0)
2045 return r;
2046
2047 /* And now figure out the unit this belongs to */
2048 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2049 u = cgroup_unit_by_pid(m, si.si_pid);
2050
2051 /* And now, we actually reap the zombie. */
2052 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2053 if (errno == EINTR)
2054 continue;
2055
2056 return -errno;
2057 }
2058
2059 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2060 continue;
2061
2062 log_debug("Child %lu died (code=%s, status=%i/%s)",
2063 (long unsigned) si.si_pid,
2064 sigchld_code_to_string(si.si_code),
2065 si.si_status,
2066 strna(si.si_code == CLD_EXITED
2067 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2068 : signal_to_string(si.si_status)));
2069
2070 if (!u)
2071 continue;
2072
2073 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2074
2075 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2076 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2077 }
2078
2079 return 0;
2080 }
2081
2082 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2083 int r;
2084 DBusError error;
2085
2086 dbus_error_init(&error);
2087
2088 log_debug("Activating special unit %s", name);
2089
2090 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2091 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2092
2093 dbus_error_free(&error);
2094
2095 return r;
2096 }
2097
2098 static int manager_process_signal_fd(Manager *m) {
2099 ssize_t n;
2100 struct signalfd_siginfo sfsi;
2101 bool sigchld = false;
2102
2103 assert(m);
2104
2105 for (;;) {
2106 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2107
2108 if (n >= 0)
2109 return -EIO;
2110
2111 if (errno == EINTR || errno == EAGAIN)
2112 break;
2113
2114 return -errno;
2115 }
2116
2117 if (sfsi.ssi_pid > 0) {
2118 char *p = NULL;
2119
2120 get_process_comm(sfsi.ssi_pid, &p);
2121
2122 log_debug("Received SIG%s from PID %lu (%s).",
2123 signal_to_string(sfsi.ssi_signo),
2124 (unsigned long) sfsi.ssi_pid, strna(p));
2125 free(p);
2126 } else
2127 log_debug("Received SIG%s.", signal_to_string(sfsi.ssi_signo));
2128
2129 switch (sfsi.ssi_signo) {
2130
2131 case SIGCHLD:
2132 sigchld = true;
2133 break;
2134
2135 case SIGTERM:
2136 if (m->running_as == MANAGER_SYSTEM) {
2137 /* This is for compatibility with the
2138 * original sysvinit */
2139 m->exit_code = MANAGER_REEXECUTE;
2140 break;
2141 }
2142
2143 /* Fall through */
2144
2145 case SIGINT:
2146 if (m->running_as == MANAGER_SYSTEM) {
2147 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2148 break;
2149 }
2150
2151 /* Run the exit target if there is one, if not, just exit. */
2152 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2153 m->exit_code = MANAGER_EXIT;
2154 return 0;
2155 }
2156
2157 break;
2158
2159 case SIGWINCH:
2160 if (m->running_as == MANAGER_SYSTEM)
2161 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2162
2163 /* This is a nop on non-init */
2164 break;
2165
2166 case SIGPWR:
2167 if (m->running_as == MANAGER_SYSTEM)
2168 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2169
2170 /* This is a nop on non-init */
2171 break;
2172
2173 case SIGUSR1: {
2174 Unit *u;
2175
2176 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2177
2178 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2179 log_info("Trying to reconnect to bus...");
2180 bus_init(m, true);
2181 }
2182
2183 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2184 log_info("Loading D-Bus service...");
2185 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2186 }
2187
2188 break;
2189 }
2190
2191 case SIGUSR2: {
2192 FILE *f;
2193 char *dump = NULL;
2194 size_t size;
2195
2196 if (!(f = open_memstream(&dump, &size))) {
2197 log_warning("Failed to allocate memory stream.");
2198 break;
2199 }
2200
2201 manager_dump_units(m, f, "\t");
2202 manager_dump_jobs(m, f, "\t");
2203
2204 if (ferror(f)) {
2205 fclose(f);
2206 free(dump);
2207 log_warning("Failed to write status stream");
2208 break;
2209 }
2210
2211 fclose(f);
2212 log_dump(LOG_INFO, dump);
2213 free(dump);
2214
2215 break;
2216 }
2217
2218 case SIGHUP:
2219 m->exit_code = MANAGER_RELOAD;
2220 break;
2221
2222 default: {
2223
2224 /* Starting SIGRTMIN+0 */
2225 static const char * const target_table[] = {
2226 [0] = SPECIAL_DEFAULT_TARGET,
2227 [1] = SPECIAL_RESCUE_TARGET,
2228 [2] = SPECIAL_EMERGENCY_TARGET,
2229 [3] = SPECIAL_HALT_TARGET,
2230 [4] = SPECIAL_POWEROFF_TARGET,
2231 [5] = SPECIAL_REBOOT_TARGET,
2232 [6] = SPECIAL_KEXEC_TARGET
2233 };
2234
2235 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2236 static const ManagerExitCode code_table[] = {
2237 [0] = MANAGER_HALT,
2238 [1] = MANAGER_POWEROFF,
2239 [2] = MANAGER_REBOOT,
2240 [3] = MANAGER_KEXEC
2241 };
2242
2243 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2244 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2245 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2246 manager_start_target(m, target_table[idx],
2247 (idx == 1 || idx == 2) ? JOB_ISOLATE : JOB_REPLACE);
2248 break;
2249 }
2250
2251 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2252 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2253 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2254 break;
2255 }
2256
2257 switch (sfsi.ssi_signo - SIGRTMIN) {
2258
2259 case 20:
2260 log_debug("Enabling showing of status.");
2261 manager_set_show_status(m, true);
2262 break;
2263
2264 case 21:
2265 log_debug("Disabling showing of status.");
2266 manager_set_show_status(m, false);
2267 break;
2268
2269 case 22:
2270 log_set_max_level(LOG_DEBUG);
2271 log_notice("Setting log level to debug.");
2272 break;
2273
2274 case 23:
2275 log_set_max_level(LOG_INFO);
2276 log_notice("Setting log level to info.");
2277 break;
2278
2279 case 27:
2280 log_set_target(LOG_TARGET_CONSOLE);
2281 log_notice("Setting log target to console.");
2282 break;
2283
2284 case 28:
2285 log_set_target(LOG_TARGET_KMSG);
2286 log_notice("Setting log target to kmsg.");
2287 break;
2288
2289 case 29:
2290 log_set_target(LOG_TARGET_SYSLOG_OR_KMSG);
2291 log_notice("Setting log target to syslog-or-kmsg.");
2292 break;
2293
2294 default:
2295 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2296 }
2297 }
2298 }
2299 }
2300
2301 if (sigchld)
2302 return manager_dispatch_sigchld(m);
2303
2304 return 0;
2305 }
2306
2307 static int process_event(Manager *m, struct epoll_event *ev) {
2308 int r;
2309 Watch *w;
2310
2311 assert(m);
2312 assert(ev);
2313
2314 assert_se(w = ev->data.ptr);
2315
2316 if (w->type == WATCH_INVALID)
2317 return 0;
2318
2319 switch (w->type) {
2320
2321 case WATCH_SIGNAL:
2322
2323 /* An incoming signal? */
2324 if (ev->events != EPOLLIN)
2325 return -EINVAL;
2326
2327 if ((r = manager_process_signal_fd(m)) < 0)
2328 return r;
2329
2330 break;
2331
2332 case WATCH_NOTIFY:
2333
2334 /* An incoming daemon notification event? */
2335 if (ev->events != EPOLLIN)
2336 return -EINVAL;
2337
2338 if ((r = manager_process_notify_fd(m)) < 0)
2339 return r;
2340
2341 break;
2342
2343 case WATCH_FD:
2344
2345 /* Some fd event, to be dispatched to the units */
2346 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2347 break;
2348
2349 case WATCH_UNIT_TIMER:
2350 case WATCH_JOB_TIMER: {
2351 uint64_t v;
2352 ssize_t k;
2353
2354 /* Some timer event, to be dispatched to the units */
2355 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2356
2357 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2358 break;
2359
2360 return k < 0 ? -errno : -EIO;
2361 }
2362
2363 if (w->type == WATCH_UNIT_TIMER)
2364 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2365 else
2366 job_timer_event(w->data.job, v, w);
2367 break;
2368 }
2369
2370 case WATCH_MOUNT:
2371 /* Some mount table change, intended for the mount subsystem */
2372 mount_fd_event(m, ev->events);
2373 break;
2374
2375 case WATCH_SWAP:
2376 /* Some swap table change, intended for the swap subsystem */
2377 swap_fd_event(m, ev->events);
2378 break;
2379
2380 case WATCH_UDEV:
2381 /* Some notification from udev, intended for the device subsystem */
2382 device_fd_event(m, ev->events);
2383 break;
2384
2385 case WATCH_DBUS_WATCH:
2386 bus_watch_event(m, w, ev->events);
2387 break;
2388
2389 case WATCH_DBUS_TIMEOUT:
2390 bus_timeout_event(m, w, ev->events);
2391 break;
2392
2393 default:
2394 log_error("event type=%i", w->type);
2395 assert_not_reached("Unknown epoll event type.");
2396 }
2397
2398 return 0;
2399 }
2400
2401 int manager_loop(Manager *m) {
2402 int r;
2403
2404 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2405
2406 assert(m);
2407 m->exit_code = MANAGER_RUNNING;
2408
2409 /* Release the path cache */
2410 set_free_free(m->unit_path_cache);
2411 m->unit_path_cache = NULL;
2412
2413 manager_check_finished(m);
2414
2415 /* There might still be some zombies hanging around from
2416 * before we were exec()'ed. Leat's reap them */
2417 if ((r = manager_dispatch_sigchld(m)) < 0)
2418 return r;
2419
2420 while (m->exit_code == MANAGER_RUNNING) {
2421 struct epoll_event event;
2422 int n;
2423
2424 if (!ratelimit_test(&rl)) {
2425 /* Yay, something is going seriously wrong, pause a little */
2426 log_warning("Looping too fast. Throttling execution a little.");
2427 sleep(1);
2428 }
2429
2430 if (manager_dispatch_load_queue(m) > 0)
2431 continue;
2432
2433 if (manager_dispatch_run_queue(m) > 0)
2434 continue;
2435
2436 if (bus_dispatch(m) > 0)
2437 continue;
2438
2439 if (manager_dispatch_cleanup_queue(m) > 0)
2440 continue;
2441
2442 if (manager_dispatch_gc_queue(m) > 0)
2443 continue;
2444
2445 if (manager_dispatch_dbus_queue(m) > 0)
2446 continue;
2447
2448 if (swap_dispatch_reload(m) > 0)
2449 continue;
2450
2451 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2452
2453 if (errno == EINTR)
2454 continue;
2455
2456 return -errno;
2457 }
2458
2459 assert(n == 1);
2460
2461 if ((r = process_event(m, &event)) < 0)
2462 return r;
2463 }
2464
2465 return m->exit_code;
2466 }
2467
2468 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2469 char *n;
2470 Unit *u;
2471
2472 assert(m);
2473 assert(s);
2474 assert(_u);
2475
2476 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2477 return -EINVAL;
2478
2479 if (!(n = bus_path_unescape(s+31)))
2480 return -ENOMEM;
2481
2482 u = manager_get_unit(m, n);
2483 free(n);
2484
2485 if (!u)
2486 return -ENOENT;
2487
2488 *_u = u;
2489
2490 return 0;
2491 }
2492
2493 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2494 Job *j;
2495 unsigned id;
2496 int r;
2497
2498 assert(m);
2499 assert(s);
2500 assert(_j);
2501
2502 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2503 return -EINVAL;
2504
2505 if ((r = safe_atou(s + 30, &id)) < 0)
2506 return r;
2507
2508 if (!(j = manager_get_job(m, id)))
2509 return -ENOENT;
2510
2511 *_j = j;
2512
2513 return 0;
2514 }
2515
2516 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2517
2518 #ifdef HAVE_AUDIT
2519 char *p;
2520
2521 if (m->audit_fd < 0)
2522 return;
2523
2524 /* Don't generate audit events if the service was already
2525 * started and we're just deserializing */
2526 if (m->n_reloading > 0)
2527 return;
2528
2529 if (m->running_as != MANAGER_SYSTEM)
2530 return;
2531
2532 if (u->meta.type != UNIT_SERVICE)
2533 return;
2534
2535 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2536 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2537 return;
2538 }
2539
2540 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2541 log_warning("Failed to send audit message: %m");
2542
2543 if (errno == EPERM) {
2544 /* We aren't allowed to send audit messages?
2545 * Then let's not retry again, to avoid
2546 * spamming the user with the same and same
2547 * messages over and over. */
2548
2549 audit_close(m->audit_fd);
2550 m->audit_fd = -1;
2551 }
2552 }
2553
2554 free(p);
2555 #endif
2556
2557 }
2558
2559 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2560 int fd = -1;
2561 union sockaddr_union sa;
2562 int n = 0;
2563 char *message = NULL;
2564
2565 /* Don't generate plymouth events if the service was already
2566 * started and we're just deserializing */
2567 if (m->n_reloading > 0)
2568 return;
2569
2570 if (m->running_as != MANAGER_SYSTEM)
2571 return;
2572
2573 if (u->meta.type != UNIT_SERVICE &&
2574 u->meta.type != UNIT_MOUNT &&
2575 u->meta.type != UNIT_SWAP)
2576 return;
2577
2578 /* We set SOCK_NONBLOCK here so that we rather drop the
2579 * message then wait for plymouth */
2580 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2581 log_error("socket() failed: %m");
2582 return;
2583 }
2584
2585 zero(sa);
2586 sa.sa.sa_family = AF_UNIX;
2587 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2588 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2589
2590 if (errno != EPIPE &&
2591 errno != EAGAIN &&
2592 errno != ENOENT &&
2593 errno != ECONNREFUSED &&
2594 errno != ECONNRESET &&
2595 errno != ECONNABORTED)
2596 log_error("connect() failed: %m");
2597
2598 goto finish;
2599 }
2600
2601 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2602 log_error("Out of memory");
2603 goto finish;
2604 }
2605
2606 errno = 0;
2607 if (write(fd, message, n + 1) != n + 1) {
2608
2609 if (errno != EPIPE &&
2610 errno != EAGAIN &&
2611 errno != ENOENT &&
2612 errno != ECONNREFUSED &&
2613 errno != ECONNRESET &&
2614 errno != ECONNABORTED)
2615 log_error("Failed to write Plymouth message: %m");
2616
2617 goto finish;
2618 }
2619
2620 finish:
2621 if (fd >= 0)
2622 close_nointr_nofail(fd);
2623
2624 free(message);
2625 }
2626
2627 void manager_dispatch_bus_name_owner_changed(
2628 Manager *m,
2629 const char *name,
2630 const char* old_owner,
2631 const char *new_owner) {
2632
2633 Unit *u;
2634
2635 assert(m);
2636 assert(name);
2637
2638 if (!(u = hashmap_get(m->watch_bus, name)))
2639 return;
2640
2641 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2642 }
2643
2644 void manager_dispatch_bus_query_pid_done(
2645 Manager *m,
2646 const char *name,
2647 pid_t pid) {
2648
2649 Unit *u;
2650
2651 assert(m);
2652 assert(name);
2653 assert(pid >= 1);
2654
2655 if (!(u = hashmap_get(m->watch_bus, name)))
2656 return;
2657
2658 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2659 }
2660
2661 int manager_open_serialization(Manager *m, FILE **_f) {
2662 char *path = NULL;
2663 mode_t saved_umask;
2664 int fd;
2665 FILE *f;
2666
2667 assert(_f);
2668
2669 if (m->running_as == MANAGER_SYSTEM)
2670 asprintf(&path, "/run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2671 else
2672 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2673
2674 if (!path)
2675 return -ENOMEM;
2676
2677 saved_umask = umask(0077);
2678 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2679 umask(saved_umask);
2680
2681 if (fd < 0) {
2682 free(path);
2683 return -errno;
2684 }
2685
2686 unlink(path);
2687
2688 log_debug("Serializing state to %s", path);
2689 free(path);
2690
2691 if (!(f = fdopen(fd, "w+")))
2692 return -errno;
2693
2694 *_f = f;
2695
2696 return 0;
2697 }
2698
2699 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2700 Iterator i;
2701 Unit *u;
2702 const char *t;
2703 int r;
2704
2705 assert(m);
2706 assert(f);
2707 assert(fds);
2708
2709 m->n_reloading ++;
2710
2711 fprintf(f, "current-job-id=%i\n", m->current_job_id);
2712 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2713
2714 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2715 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2716 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2717
2718 fputc('\n', f);
2719
2720 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2721 if (u->meta.id != t)
2722 continue;
2723
2724 if (!unit_can_serialize(u))
2725 continue;
2726
2727 /* Start marker */
2728 fputs(u->meta.id, f);
2729 fputc('\n', f);
2730
2731 if ((r = unit_serialize(u, f, fds)) < 0) {
2732 m->n_reloading --;
2733 return r;
2734 }
2735 }
2736
2737 assert(m->n_reloading > 0);
2738 m->n_reloading --;
2739
2740 if (ferror(f))
2741 return -EIO;
2742
2743 r = bus_fdset_add_all(m, fds);
2744 if (r < 0)
2745 return r;
2746
2747 return 0;
2748 }
2749
2750 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2751 int r = 0;
2752
2753 assert(m);
2754 assert(f);
2755
2756 log_debug("Deserializing state...");
2757
2758 m->n_reloading ++;
2759
2760 for (;;) {
2761 char line[LINE_MAX], *l;
2762
2763 if (!fgets(line, sizeof(line), f)) {
2764 if (feof(f))
2765 r = 0;
2766 else
2767 r = -errno;
2768
2769 goto finish;
2770 }
2771
2772 char_array_0(line);
2773 l = strstrip(line);
2774
2775 if (l[0] == 0)
2776 break;
2777
2778 if (startswith(l, "current-job-id=")) {
2779 uint32_t id;
2780
2781 if (safe_atou32(l+15, &id) < 0)
2782 log_debug("Failed to parse current job id value %s", l+15);
2783 else
2784 m->current_job_id = MAX(m->current_job_id, id);
2785 } else if (startswith(l, "taint-usr=")) {
2786 int b;
2787
2788 if ((b = parse_boolean(l+10)) < 0)
2789 log_debug("Failed to parse taint /usr flag %s", l+10);
2790 else
2791 m->taint_usr = m->taint_usr || b;
2792 } else if (startswith(l, "initrd-timestamp="))
2793 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2794 else if (startswith(l, "startup-timestamp="))
2795 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2796 else if (startswith(l, "finish-timestamp="))
2797 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2798 else
2799 log_debug("Unknown serialization item '%s'", l);
2800 }
2801
2802 for (;;) {
2803 Unit *u;
2804 char name[UNIT_NAME_MAX+2];
2805
2806 /* Start marker */
2807 if (!fgets(name, sizeof(name), f)) {
2808 if (feof(f))
2809 r = 0;
2810 else
2811 r = -errno;
2812
2813 goto finish;
2814 }
2815
2816 char_array_0(name);
2817
2818 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2819 goto finish;
2820
2821 if ((r = unit_deserialize(u, f, fds)) < 0)
2822 goto finish;
2823 }
2824
2825 finish:
2826 if (ferror(f)) {
2827 r = -EIO;
2828 goto finish;
2829 }
2830
2831 assert(m->n_reloading > 0);
2832 m->n_reloading --;
2833
2834 return r;
2835 }
2836
2837 int manager_reload(Manager *m) {
2838 int r, q;
2839 FILE *f;
2840 FDSet *fds;
2841
2842 assert(m);
2843
2844 if ((r = manager_open_serialization(m, &f)) < 0)
2845 return r;
2846
2847 m->n_reloading ++;
2848
2849 if (!(fds = fdset_new())) {
2850 m->n_reloading --;
2851 r = -ENOMEM;
2852 goto finish;
2853 }
2854
2855 if ((r = manager_serialize(m, f, fds)) < 0) {
2856 m->n_reloading --;
2857 goto finish;
2858 }
2859
2860 if (fseeko(f, 0, SEEK_SET) < 0) {
2861 m->n_reloading --;
2862 r = -errno;
2863 goto finish;
2864 }
2865
2866 /* From here on there is no way back. */
2867 manager_clear_jobs_and_units(m);
2868 manager_undo_generators(m);
2869
2870 /* Find new unit paths */
2871 lookup_paths_free(&m->lookup_paths);
2872 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as, true)) < 0)
2873 r = q;
2874
2875 manager_run_generators(m);
2876
2877 manager_build_unit_path_cache(m);
2878
2879 /* First, enumerate what we can from all config files */
2880 if ((q = manager_enumerate(m)) < 0)
2881 r = q;
2882
2883 /* Second, deserialize our stored data */
2884 if ((q = manager_deserialize(m, f, fds)) < 0)
2885 r = q;
2886
2887 fclose(f);
2888 f = NULL;
2889
2890 /* Third, fire things up! */
2891 if ((q = manager_coldplug(m)) < 0)
2892 r = q;
2893
2894 assert(m->n_reloading > 0);
2895 m->n_reloading--;
2896
2897 finish:
2898 if (f)
2899 fclose(f);
2900
2901 if (fds)
2902 fdset_free(fds);
2903
2904 return r;
2905 }
2906
2907 bool manager_is_booting_or_shutting_down(Manager *m) {
2908 Unit *u;
2909
2910 assert(m);
2911
2912 /* Is the initial job still around? */
2913 if (manager_get_job(m, 1))
2914 return true;
2915
2916 /* Is there a job for the shutdown target? */
2917 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
2918 if (u)
2919 return !!u->meta.job;
2920
2921 return false;
2922 }
2923
2924 void manager_reset_failed(Manager *m) {
2925 Unit *u;
2926 Iterator i;
2927
2928 assert(m);
2929
2930 HASHMAP_FOREACH(u, m->units, i)
2931 unit_reset_failed(u);
2932 }
2933
2934 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2935 Unit *u;
2936
2937 assert(m);
2938 assert(name);
2939
2940 /* Returns true if the unit is inactive or going down */
2941 if (!(u = manager_get_unit(m, name)))
2942 return true;
2943
2944 return unit_pending_inactive(u);
2945 }
2946
2947 void manager_check_finished(Manager *m) {
2948 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2949 usec_t kernel_usec = 0, initrd_usec = 0, userspace_usec = 0, total_usec = 0;
2950
2951 assert(m);
2952
2953 if (dual_timestamp_is_set(&m->finish_timestamp))
2954 return;
2955
2956 if (hashmap_size(m->jobs) > 0)
2957 return;
2958
2959 dual_timestamp_get(&m->finish_timestamp);
2960
2961 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2962
2963 userspace_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2964 total_usec = m->finish_timestamp.monotonic;
2965
2966 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2967
2968 kernel_usec = m->initrd_timestamp.monotonic;
2969 initrd_usec = m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic;
2970
2971 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2972 format_timespan(kernel, sizeof(kernel), kernel_usec),
2973 format_timespan(initrd, sizeof(initrd), initrd_usec),
2974 format_timespan(userspace, sizeof(userspace), userspace_usec),
2975 format_timespan(sum, sizeof(sum), total_usec));
2976 } else {
2977 kernel_usec = m->startup_timestamp.monotonic;
2978 initrd_usec = 0;
2979
2980 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2981 format_timespan(kernel, sizeof(kernel), kernel_usec),
2982 format_timespan(userspace, sizeof(userspace), userspace_usec),
2983 format_timespan(sum, sizeof(sum), total_usec));
2984 }
2985 } else {
2986 userspace_usec = initrd_usec = kernel_usec = 0;
2987 total_usec = m->finish_timestamp.monotonic - m->startup_timestamp.monotonic;
2988
2989 log_debug("Startup finished in %s.",
2990 format_timespan(sum, sizeof(sum), total_usec));
2991 }
2992
2993 bus_broadcast_finished(m, kernel_usec, initrd_usec, userspace_usec, total_usec);
2994
2995 sd_notifyf(false,
2996 "READY=1\nSTATUS=Startup finished in %s.",
2997 format_timespan(sum, sizeof(sum), total_usec));
2998 }
2999
3000 void manager_run_generators(Manager *m) {
3001 DIR *d = NULL;
3002 const char *generator_path;
3003 const char *argv[3];
3004 mode_t u;
3005
3006 assert(m);
3007
3008 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
3009 if (!(d = opendir(generator_path))) {
3010
3011 if (errno == ENOENT)
3012 return;
3013
3014 log_error("Failed to enumerate generator directory: %m");
3015 return;
3016 }
3017
3018 if (!m->generator_unit_path) {
3019 const char *p;
3020 char user_path[] = "/tmp/systemd-generator-XXXXXX";
3021
3022 if (m->running_as == MANAGER_SYSTEM && getpid() == 1) {
3023 p = "/run/systemd/generator";
3024
3025 if (mkdir_p(p, 0755) < 0) {
3026 log_error("Failed to create generator directory: %m");
3027 goto finish;
3028 }
3029
3030 } else {
3031 if (!(p = mkdtemp(user_path))) {
3032 log_error("Failed to create generator directory: %m");
3033 goto finish;
3034 }
3035 }
3036
3037 if (!(m->generator_unit_path = strdup(p))) {
3038 log_error("Failed to allocate generator unit path.");
3039 goto finish;
3040 }
3041 }
3042
3043 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3044 argv[1] = m->generator_unit_path;
3045 argv[2] = NULL;
3046
3047 u = umask(0022);
3048 execute_directory(generator_path, d, (char**) argv);
3049 umask(u);
3050
3051 if (rmdir(m->generator_unit_path) >= 0) {
3052 /* Uh? we were able to remove this dir? I guess that
3053 * means the directory was empty, hence let's shortcut
3054 * this */
3055
3056 free(m->generator_unit_path);
3057 m->generator_unit_path = NULL;
3058 goto finish;
3059 }
3060
3061 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
3062 char **l;
3063
3064 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
3065 log_error("Failed to add generator directory to unit search path: %m");
3066 goto finish;
3067 }
3068
3069 strv_free(m->lookup_paths.unit_path);
3070 m->lookup_paths.unit_path = l;
3071
3072 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
3073 }
3074
3075 finish:
3076 if (d)
3077 closedir(d);
3078 }
3079
3080 void manager_undo_generators(Manager *m) {
3081 assert(m);
3082
3083 if (!m->generator_unit_path)
3084 return;
3085
3086 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
3087 rm_rf(m->generator_unit_path, false, true, false);
3088
3089 free(m->generator_unit_path);
3090 m->generator_unit_path = NULL;
3091 }
3092
3093 int manager_set_default_controllers(Manager *m, char **controllers) {
3094 char **l;
3095
3096 assert(m);
3097
3098 if (!(l = strv_copy(controllers)))
3099 return -ENOMEM;
3100
3101 strv_free(m->default_controllers);
3102 m->default_controllers = l;
3103
3104 return 0;
3105 }
3106
3107 void manager_recheck_syslog(Manager *m) {
3108 Unit *u;
3109
3110 assert(m);
3111
3112 if (m->running_as != MANAGER_SYSTEM)
3113 return;
3114
3115 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
3116 SocketState state;
3117
3118 state = SOCKET(u)->state;
3119
3120 if (state != SOCKET_DEAD &&
3121 state != SOCKET_FAILED &&
3122 state != SOCKET_RUNNING) {
3123
3124 /* Hmm, the socket is not set up, or is still
3125 * listening, let's better not try to use
3126 * it. Note that we have no problem if the
3127 * socket is completely down, since there
3128 * might be a foreign /dev/log socket around
3129 * and we want to make use of that.
3130 */
3131
3132 log_close_syslog();
3133 return;
3134 }
3135 }
3136
3137 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3138 if (TARGET(u)->state != TARGET_ACTIVE) {
3139 log_close_syslog();
3140 return;
3141 }
3142
3143 /* Hmm, OK, so the socket is either fully up, or fully down,
3144 * and the target is up, then let's make use of the socket */
3145 log_open();
3146 }
3147
3148 void manager_set_show_status(Manager *m, bool b) {
3149 assert(m);
3150
3151 if (m->running_as != MANAGER_SYSTEM)
3152 return;
3153
3154 m->show_status = b;
3155
3156 if (b)
3157 touch("/run/systemd/show-status");
3158 else
3159 unlink("/run/systemd/show-status");
3160 }
3161
3162 bool manager_get_show_status(Manager *m) {
3163 assert(m);
3164
3165 if (m->running_as != MANAGER_SYSTEM)
3166 return false;
3167
3168 if (m->show_status)
3169 return true;
3170
3171 /* If Plymouth is running make sure we show the status, so
3172 * that there's something nice to see when people press Esc */
3173
3174 return plymouth_running();
3175 }
3176
3177 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3178 [MANAGER_SYSTEM] = "system",
3179 [MANAGER_USER] = "user"
3180 };
3181
3182 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);