]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
manager: show who killed us
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61
62 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
63 #define GC_QUEUE_ENTRIES_MAX 16
64
65 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
66 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
67
68 /* Where clients shall send notification messages to */
69 #define NOTIFY_SOCKET_SYSTEM "/dev/.run/systemd/notify"
70 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
71
72 static int manager_setup_notify(Manager *m) {
73 union {
74 struct sockaddr sa;
75 struct sockaddr_un un;
76 } sa;
77 struct epoll_event ev;
78 int one = 1;
79
80 assert(m);
81
82 m->notify_watch.type = WATCH_NOTIFY;
83 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
84 log_error("Failed to allocate notification socket: %m");
85 return -errno;
86 }
87
88 zero(sa);
89 sa.sa.sa_family = AF_UNIX;
90
91 if (getpid() != 1)
92 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
93 else {
94 unlink(NOTIFY_SOCKET_SYSTEM);
95 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
96 }
97
98 if (sa.un.sun_path[0] == '@')
99 sa.un.sun_path[0] = 0;
100
101 if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
102 log_error("bind() failed: %m");
103 return -errno;
104 }
105
106 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
107 log_error("SO_PASSCRED failed: %m");
108 return -errno;
109 }
110
111 zero(ev);
112 ev.events = EPOLLIN;
113 ev.data.ptr = &m->notify_watch;
114
115 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
116 return -errno;
117
118 if (sa.un.sun_path[0] == 0)
119 sa.un.sun_path[0] = '@';
120
121 if (!(m->notify_socket = strdup(sa.un.sun_path)))
122 return -ENOMEM;
123
124 log_debug("Using notification socket %s", m->notify_socket);
125
126 return 0;
127 }
128
129 static int enable_special_signals(Manager *m) {
130 char fd;
131
132 assert(m);
133
134 /* Enable that we get SIGINT on control-alt-del */
135 if (reboot(RB_DISABLE_CAD) < 0)
136 log_warning("Failed to enable ctrl-alt-del handling: %m");
137
138 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY)) < 0)
139 log_warning("Failed to open /dev/tty0: %m");
140 else {
141 /* Enable that we get SIGWINCH on kbrequest */
142 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
143 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
144
145 close_nointr_nofail(fd);
146 }
147
148 return 0;
149 }
150
151 static int manager_setup_signals(Manager *m) {
152 sigset_t mask;
153 struct epoll_event ev;
154 struct sigaction sa;
155
156 assert(m);
157
158 /* We are not interested in SIGSTOP and friends. */
159 zero(sa);
160 sa.sa_handler = SIG_DFL;
161 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
162 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
163
164 assert_se(sigemptyset(&mask) == 0);
165
166 sigset_add_many(&mask,
167 SIGCHLD, /* Child died */
168 SIGTERM, /* Reexecute daemon */
169 SIGHUP, /* Reload configuration */
170 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
171 SIGUSR2, /* systemd: dump status */
172 SIGINT, /* Kernel sends us this on control-alt-del */
173 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
174 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
175 SIGRTMIN+0, /* systemd: start default.target */
176 SIGRTMIN+1, /* systemd: isolate rescue.target */
177 SIGRTMIN+2, /* systemd: isolate emergency.target */
178 SIGRTMIN+3, /* systemd: start halt.target */
179 SIGRTMIN+4, /* systemd: start poweroff.target */
180 SIGRTMIN+5, /* systemd: start reboot.target */
181 SIGRTMIN+6, /* systemd: start kexec.target */
182 SIGRTMIN+13, /* systemd: Immediate halt */
183 SIGRTMIN+14, /* systemd: Immediate poweroff */
184 SIGRTMIN+15, /* systemd: Immediate reboot */
185 SIGRTMIN+16, /* systemd: Immediate kexec */
186 SIGRTMIN+20, /* systemd: enable status messages */
187 SIGRTMIN+21, /* systemd: disable status messages */
188 -1);
189 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
190
191 m->signal_watch.type = WATCH_SIGNAL;
192 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
193 return -errno;
194
195 zero(ev);
196 ev.events = EPOLLIN;
197 ev.data.ptr = &m->signal_watch;
198
199 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
200 return -errno;
201
202 if (m->running_as == MANAGER_SYSTEM)
203 return enable_special_signals(m);
204
205 return 0;
206 }
207
208 int manager_new(ManagerRunningAs running_as, Manager **_m) {
209 Manager *m;
210 int r = -ENOMEM;
211
212 assert(_m);
213 assert(running_as >= 0);
214 assert(running_as < _MANAGER_RUNNING_AS_MAX);
215
216 if (!(m = new0(Manager, 1)))
217 return -ENOMEM;
218
219 dual_timestamp_get(&m->startup_timestamp);
220
221 m->running_as = running_as;
222 m->name_data_slot = m->subscribed_data_slot = -1;
223 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
224 m->pin_cgroupfs_fd = -1;
225
226 #ifdef HAVE_AUDIT
227 m->audit_fd = -1;
228 #endif
229
230 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
231 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
232
233 if (!(m->environment = strv_copy(environ)))
234 goto fail;
235
236 if (!(m->default_controllers = strv_new("cpu", NULL)))
237 goto fail;
238
239 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
240 goto fail;
241
242 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
243 goto fail;
244
245 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
246 goto fail;
247
248 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
249 goto fail;
250
251 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
252 goto fail;
253
254 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
255 goto fail;
256
257 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
258 goto fail;
259
260 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
261 goto fail;
262
263 if ((r = manager_setup_signals(m)) < 0)
264 goto fail;
265
266 if ((r = manager_setup_cgroup(m)) < 0)
267 goto fail;
268
269 if ((r = manager_setup_notify(m)) < 0)
270 goto fail;
271
272 /* Try to connect to the busses, if possible. */
273 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
274 goto fail;
275
276 #ifdef HAVE_AUDIT
277 if ((m->audit_fd = audit_open()) < 0)
278 log_error("Failed to connect to audit log: %m");
279 #endif
280
281 *_m = m;
282 return 0;
283
284 fail:
285 manager_free(m);
286 return r;
287 }
288
289 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
290 Meta *meta;
291 unsigned n = 0;
292
293 assert(m);
294
295 while ((meta = m->cleanup_queue)) {
296 assert(meta->in_cleanup_queue);
297
298 unit_free((Unit*) meta);
299 n++;
300 }
301
302 return n;
303 }
304
305 enum {
306 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
307 GC_OFFSET_UNSURE, /* No clue */
308 GC_OFFSET_GOOD, /* We still need this unit */
309 GC_OFFSET_BAD, /* We don't need this unit anymore */
310 _GC_OFFSET_MAX
311 };
312
313 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
314 Iterator i;
315 Unit *other;
316 bool is_bad;
317
318 assert(u);
319
320 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
321 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
322 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
323 return;
324
325 if (u->meta.in_cleanup_queue)
326 goto bad;
327
328 if (unit_check_gc(u))
329 goto good;
330
331 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
332
333 is_bad = true;
334
335 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
336 unit_gc_sweep(other, gc_marker);
337
338 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
339 goto good;
340
341 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
342 is_bad = false;
343 }
344
345 if (is_bad)
346 goto bad;
347
348 /* We were unable to find anything out about this entry, so
349 * let's investigate it later */
350 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
351 unit_add_to_gc_queue(u);
352 return;
353
354 bad:
355 /* We definitely know that this one is not useful anymore, so
356 * let's mark it for deletion */
357 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
358 unit_add_to_cleanup_queue(u);
359 return;
360
361 good:
362 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
363 }
364
365 static unsigned manager_dispatch_gc_queue(Manager *m) {
366 Meta *meta;
367 unsigned n = 0;
368 unsigned gc_marker;
369
370 assert(m);
371
372 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
373 (m->gc_queue_timestamp <= 0 ||
374 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
375 return 0;
376
377 log_debug("Running GC...");
378
379 m->gc_marker += _GC_OFFSET_MAX;
380 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
381 m->gc_marker = 1;
382
383 gc_marker = m->gc_marker;
384
385 while ((meta = m->gc_queue)) {
386 assert(meta->in_gc_queue);
387
388 unit_gc_sweep((Unit*) meta, gc_marker);
389
390 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
391 meta->in_gc_queue = false;
392
393 n++;
394
395 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
396 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
397 log_debug("Collecting %s", meta->id);
398 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
399 unit_add_to_cleanup_queue((Unit*) meta);
400 }
401 }
402
403 m->n_in_gc_queue = 0;
404 m->gc_queue_timestamp = 0;
405
406 return n;
407 }
408
409 static void manager_clear_jobs_and_units(Manager *m) {
410 Job *j;
411 Unit *u;
412
413 assert(m);
414
415 while ((j = hashmap_first(m->transaction_jobs)))
416 job_free(j);
417
418 while ((u = hashmap_first(m->units)))
419 unit_free(u);
420
421 manager_dispatch_cleanup_queue(m);
422
423 assert(!m->load_queue);
424 assert(!m->run_queue);
425 assert(!m->dbus_unit_queue);
426 assert(!m->dbus_job_queue);
427 assert(!m->cleanup_queue);
428 assert(!m->gc_queue);
429
430 assert(hashmap_isempty(m->transaction_jobs));
431 assert(hashmap_isempty(m->jobs));
432 assert(hashmap_isempty(m->units));
433 }
434
435 void manager_free(Manager *m) {
436 UnitType c;
437
438 assert(m);
439
440 manager_clear_jobs_and_units(m);
441
442 for (c = 0; c < _UNIT_TYPE_MAX; c++)
443 if (unit_vtable[c]->shutdown)
444 unit_vtable[c]->shutdown(m);
445
446 /* If we reexecute ourselves, we keep the root cgroup
447 * around */
448 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
449
450 manager_undo_generators(m);
451
452 bus_done(m);
453
454 hashmap_free(m->units);
455 hashmap_free(m->jobs);
456 hashmap_free(m->transaction_jobs);
457 hashmap_free(m->watch_pids);
458 hashmap_free(m->watch_bus);
459
460 if (m->epoll_fd >= 0)
461 close_nointr_nofail(m->epoll_fd);
462 if (m->signal_watch.fd >= 0)
463 close_nointr_nofail(m->signal_watch.fd);
464 if (m->notify_watch.fd >= 0)
465 close_nointr_nofail(m->notify_watch.fd);
466
467 #ifdef HAVE_AUDIT
468 if (m->audit_fd >= 0)
469 audit_close(m->audit_fd);
470 #endif
471
472 free(m->notify_socket);
473
474 lookup_paths_free(&m->lookup_paths);
475 strv_free(m->environment);
476
477 strv_free(m->default_controllers);
478
479 hashmap_free(m->cgroup_bondings);
480 set_free_free(m->unit_path_cache);
481
482 free(m);
483 }
484
485 int manager_enumerate(Manager *m) {
486 int r = 0, q;
487 UnitType c;
488
489 assert(m);
490
491 /* Let's ask every type to load all units from disk/kernel
492 * that it might know */
493 for (c = 0; c < _UNIT_TYPE_MAX; c++)
494 if (unit_vtable[c]->enumerate)
495 if ((q = unit_vtable[c]->enumerate(m)) < 0)
496 r = q;
497
498 manager_dispatch_load_queue(m);
499 return r;
500 }
501
502 int manager_coldplug(Manager *m) {
503 int r = 0, q;
504 Iterator i;
505 Unit *u;
506 char *k;
507
508 assert(m);
509
510 /* Then, let's set up their initial state. */
511 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
512
513 /* ignore aliases */
514 if (u->meta.id != k)
515 continue;
516
517 if ((q = unit_coldplug(u)) < 0)
518 r = q;
519 }
520
521 return r;
522 }
523
524 static void manager_build_unit_path_cache(Manager *m) {
525 char **i;
526 DIR *d = NULL;
527 int r;
528
529 assert(m);
530
531 set_free_free(m->unit_path_cache);
532
533 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
534 log_error("Failed to allocate unit path cache.");
535 return;
536 }
537
538 /* This simply builds a list of files we know exist, so that
539 * we don't always have to go to disk */
540
541 STRV_FOREACH(i, m->lookup_paths.unit_path) {
542 struct dirent *de;
543
544 if (!(d = opendir(*i))) {
545 log_error("Failed to open directory: %m");
546 continue;
547 }
548
549 while ((de = readdir(d))) {
550 char *p;
551
552 if (ignore_file(de->d_name))
553 continue;
554
555 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
556 r = -ENOMEM;
557 goto fail;
558 }
559
560 if ((r = set_put(m->unit_path_cache, p)) < 0) {
561 free(p);
562 goto fail;
563 }
564 }
565
566 closedir(d);
567 d = NULL;
568 }
569
570 return;
571
572 fail:
573 log_error("Failed to build unit path cache: %s", strerror(-r));
574
575 set_free_free(m->unit_path_cache);
576 m->unit_path_cache = NULL;
577
578 if (d)
579 closedir(d);
580 }
581
582 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
583 int r, q;
584
585 assert(m);
586
587 manager_run_generators(m);
588
589 manager_build_unit_path_cache(m);
590
591 /* If we will deserialize make sure that during enumeration
592 * this is already known, so we increase the counter here
593 * already */
594 if (serialization)
595 m->n_deserializing ++;
596
597 /* First, enumerate what we can from all config files */
598 r = manager_enumerate(m);
599
600 /* Second, deserialize if there is something to deserialize */
601 if (serialization)
602 if ((q = manager_deserialize(m, serialization, fds)) < 0)
603 r = q;
604
605 /* Third, fire things up! */
606 if ((q = manager_coldplug(m)) < 0)
607 r = q;
608
609 if (serialization) {
610 assert(m->n_deserializing > 0);
611 m->n_deserializing --;
612 }
613
614 return r;
615 }
616
617 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
618 assert(m);
619 assert(j);
620
621 /* Deletes one job from the transaction */
622
623 manager_transaction_unlink_job(m, j, delete_dependencies);
624
625 if (!j->installed)
626 job_free(j);
627 }
628
629 static void transaction_delete_unit(Manager *m, Unit *u) {
630 Job *j;
631
632 /* Deletes all jobs associated with a certain unit from the
633 * transaction */
634
635 while ((j = hashmap_get(m->transaction_jobs, u)))
636 transaction_delete_job(m, j, true);
637 }
638
639 static void transaction_clean_dependencies(Manager *m) {
640 Iterator i;
641 Job *j;
642
643 assert(m);
644
645 /* Drops all dependencies of all installed jobs */
646
647 HASHMAP_FOREACH(j, m->jobs, i) {
648 while (j->subject_list)
649 job_dependency_free(j->subject_list);
650 while (j->object_list)
651 job_dependency_free(j->object_list);
652 }
653
654 assert(!m->transaction_anchor);
655 }
656
657 static void transaction_abort(Manager *m) {
658 Job *j;
659
660 assert(m);
661
662 while ((j = hashmap_first(m->transaction_jobs)))
663 if (j->installed)
664 transaction_delete_job(m, j, true);
665 else
666 job_free(j);
667
668 assert(hashmap_isempty(m->transaction_jobs));
669
670 transaction_clean_dependencies(m);
671 }
672
673 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
674 JobDependency *l;
675
676 assert(m);
677
678 /* A recursive sweep through the graph that marks all units
679 * that matter to the anchor job, i.e. are directly or
680 * indirectly a dependency of the anchor job via paths that
681 * are fully marked as mattering. */
682
683 if (j)
684 l = j->subject_list;
685 else
686 l = m->transaction_anchor;
687
688 LIST_FOREACH(subject, l, l) {
689
690 /* This link does not matter */
691 if (!l->matters)
692 continue;
693
694 /* This unit has already been marked */
695 if (l->object->generation == generation)
696 continue;
697
698 l->object->matters_to_anchor = true;
699 l->object->generation = generation;
700
701 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
702 }
703 }
704
705 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
706 JobDependency *l, *last;
707
708 assert(j);
709 assert(other);
710 assert(j->unit == other->unit);
711 assert(!j->installed);
712
713 /* Merges 'other' into 'j' and then deletes j. */
714
715 j->type = t;
716 j->state = JOB_WAITING;
717 j->override = j->override || other->override;
718
719 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
720
721 /* Patch us in as new owner of the JobDependency objects */
722 last = NULL;
723 LIST_FOREACH(subject, l, other->subject_list) {
724 assert(l->subject == other);
725 l->subject = j;
726 last = l;
727 }
728
729 /* Merge both lists */
730 if (last) {
731 last->subject_next = j->subject_list;
732 if (j->subject_list)
733 j->subject_list->subject_prev = last;
734 j->subject_list = other->subject_list;
735 }
736
737 /* Patch us in as new owner of the JobDependency objects */
738 last = NULL;
739 LIST_FOREACH(object, l, other->object_list) {
740 assert(l->object == other);
741 l->object = j;
742 last = l;
743 }
744
745 /* Merge both lists */
746 if (last) {
747 last->object_next = j->object_list;
748 if (j->object_list)
749 j->object_list->object_prev = last;
750 j->object_list = other->object_list;
751 }
752
753 /* Kill the other job */
754 other->subject_list = NULL;
755 other->object_list = NULL;
756 transaction_delete_job(m, other, true);
757 }
758 static bool job_is_conflicted_by(Job *j) {
759 JobDependency *l;
760
761 assert(j);
762
763 /* Returns true if this job is pulled in by a least one
764 * ConflictedBy dependency. */
765
766 LIST_FOREACH(object, l, j->object_list)
767 if (l->conflicts)
768 return true;
769
770 return false;
771 }
772
773 static int delete_one_unmergeable_job(Manager *m, Job *j) {
774 Job *k;
775
776 assert(j);
777
778 /* Tries to delete one item in the linked list
779 * j->transaction_next->transaction_next->... that conflicts
780 * with another one, in an attempt to make an inconsistent
781 * transaction work. */
782
783 /* We rely here on the fact that if a merged with b does not
784 * merge with c, either a or b merge with c neither */
785 LIST_FOREACH(transaction, j, j)
786 LIST_FOREACH(transaction, k, j->transaction_next) {
787 Job *d;
788
789 /* Is this one mergeable? Then skip it */
790 if (job_type_is_mergeable(j->type, k->type))
791 continue;
792
793 /* Ok, we found two that conflict, let's see if we can
794 * drop one of them */
795 if (!j->matters_to_anchor && !k->matters_to_anchor) {
796
797 /* Both jobs don't matter, so let's
798 * find the one that is smarter to
799 * remove. Let's think positive and
800 * rather remove stops then starts --
801 * except if something is being
802 * stopped because it is conflicted by
803 * another unit in which case we
804 * rather remove the start. */
805
806 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
807 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
808
809 if (j->type == JOB_STOP) {
810
811 if (job_is_conflicted_by(j))
812 d = k;
813 else
814 d = j;
815
816 } else if (k->type == JOB_STOP) {
817
818 if (job_is_conflicted_by(k))
819 d = j;
820 else
821 d = k;
822 } else
823 d = j;
824
825 } else if (!j->matters_to_anchor)
826 d = j;
827 else if (!k->matters_to_anchor)
828 d = k;
829 else
830 return -ENOEXEC;
831
832 /* Ok, we can drop one, so let's do so. */
833 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
834 transaction_delete_job(m, d, true);
835 return 0;
836 }
837
838 return -EINVAL;
839 }
840
841 static int transaction_merge_jobs(Manager *m, DBusError *e) {
842 Job *j;
843 Iterator i;
844 int r;
845
846 assert(m);
847
848 /* First step, check whether any of the jobs for one specific
849 * task conflict. If so, try to drop one of them. */
850 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
851 JobType t;
852 Job *k;
853
854 t = j->type;
855 LIST_FOREACH(transaction, k, j->transaction_next) {
856 if (job_type_merge(&t, k->type) >= 0)
857 continue;
858
859 /* OK, we could not merge all jobs for this
860 * action. Let's see if we can get rid of one
861 * of them */
862
863 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
864 /* Ok, we managed to drop one, now
865 * let's ask our callers to call us
866 * again after garbage collecting */
867 return -EAGAIN;
868
869 /* We couldn't merge anything. Failure */
870 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
871 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
872 return r;
873 }
874 }
875
876 /* Second step, merge the jobs. */
877 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
878 JobType t = j->type;
879 Job *k;
880
881 /* Merge all transactions */
882 LIST_FOREACH(transaction, k, j->transaction_next)
883 assert_se(job_type_merge(&t, k->type) == 0);
884
885 /* If an active job is mergeable, merge it too */
886 if (j->unit->meta.job)
887 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
888
889 while ((k = j->transaction_next)) {
890 if (j->installed) {
891 transaction_merge_and_delete_job(m, k, j, t);
892 j = k;
893 } else
894 transaction_merge_and_delete_job(m, j, k, t);
895 }
896
897 assert(!j->transaction_next);
898 assert(!j->transaction_prev);
899 }
900
901 return 0;
902 }
903
904 static void transaction_drop_redundant(Manager *m) {
905 bool again;
906
907 assert(m);
908
909 /* Goes through the transaction and removes all jobs that are
910 * a noop */
911
912 do {
913 Job *j;
914 Iterator i;
915
916 again = false;
917
918 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
919 bool changes_something = false;
920 Job *k;
921
922 LIST_FOREACH(transaction, k, j) {
923
924 if (!job_is_anchor(k) &&
925 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
926 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
927 continue;
928
929 changes_something = true;
930 break;
931 }
932
933 if (changes_something)
934 continue;
935
936 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
937 transaction_delete_job(m, j, false);
938 again = true;
939 break;
940 }
941
942 } while (again);
943 }
944
945 static bool unit_matters_to_anchor(Unit *u, Job *j) {
946 assert(u);
947 assert(!j->transaction_prev);
948
949 /* Checks whether at least one of the jobs for this unit
950 * matters to the anchor. */
951
952 LIST_FOREACH(transaction, j, j)
953 if (j->matters_to_anchor)
954 return true;
955
956 return false;
957 }
958
959 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
960 Iterator i;
961 Unit *u;
962 int r;
963
964 assert(m);
965 assert(j);
966 assert(!j->transaction_prev);
967
968 /* Does a recursive sweep through the ordering graph, looking
969 * for a cycle. If we find cycle we try to break it. */
970
971 /* Have we seen this before? */
972 if (j->generation == generation) {
973 Job *k, *delete;
974
975 /* If the marker is NULL we have been here already and
976 * decided the job was loop-free from here. Hence
977 * shortcut things and return right-away. */
978 if (!j->marker)
979 return 0;
980
981 /* So, the marker is not NULL and we already have been
982 * here. We have a cycle. Let's try to break it. We go
983 * backwards in our path and try to find a suitable
984 * job to remove. We use the marker to find our way
985 * back, since smart how we are we stored our way back
986 * in there. */
987 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
988
989 delete = NULL;
990 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
991
992 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
993
994 if (!delete &&
995 !k->installed &&
996 !unit_matters_to_anchor(k->unit, k)) {
997 /* Ok, we can drop this one, so let's
998 * do so. */
999 delete = k;
1000 }
1001
1002 /* Check if this in fact was the beginning of
1003 * the cycle */
1004 if (k == j)
1005 break;
1006 }
1007
1008
1009 if (delete) {
1010 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1011 transaction_delete_unit(m, delete->unit);
1012 return -EAGAIN;
1013 }
1014
1015 log_error("Unable to break cycle");
1016
1017 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1018 return -ENOEXEC;
1019 }
1020
1021 /* Make the marker point to where we come from, so that we can
1022 * find our way backwards if we want to break a cycle. We use
1023 * a special marker for the beginning: we point to
1024 * ourselves. */
1025 j->marker = from ? from : j;
1026 j->generation = generation;
1027
1028 /* We assume that the the dependencies are bidirectional, and
1029 * hence can ignore UNIT_AFTER */
1030 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1031 Job *o;
1032
1033 /* Is there a job for this unit? */
1034 if (!(o = hashmap_get(m->transaction_jobs, u)))
1035
1036 /* Ok, there is no job for this in the
1037 * transaction, but maybe there is already one
1038 * running? */
1039 if (!(o = u->meta.job))
1040 continue;
1041
1042 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1043 return r;
1044 }
1045
1046 /* Ok, let's backtrack, and remember that this entry is not on
1047 * our path anymore. */
1048 j->marker = NULL;
1049
1050 return 0;
1051 }
1052
1053 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1054 Job *j;
1055 int r;
1056 Iterator i;
1057 unsigned g;
1058
1059 assert(m);
1060 assert(generation);
1061
1062 /* Check if the ordering graph is cyclic. If it is, try to fix
1063 * that up by dropping one of the jobs. */
1064
1065 g = (*generation)++;
1066
1067 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1068 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1069 return r;
1070
1071 return 0;
1072 }
1073
1074 static void transaction_collect_garbage(Manager *m) {
1075 bool again;
1076
1077 assert(m);
1078
1079 /* Drop jobs that are not required by any other job */
1080
1081 do {
1082 Iterator i;
1083 Job *j;
1084
1085 again = false;
1086
1087 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1088 if (j->object_list) {
1089 /* log_debug("Keeping job %s/%s because of %s/%s", */
1090 /* j->unit->meta.id, job_type_to_string(j->type), */
1091 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1092 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1093 continue;
1094 }
1095
1096 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1097 transaction_delete_job(m, j, true);
1098 again = true;
1099 break;
1100 }
1101
1102 } while (again);
1103 }
1104
1105 static int transaction_is_destructive(Manager *m, DBusError *e) {
1106 Iterator i;
1107 Job *j;
1108
1109 assert(m);
1110
1111 /* Checks whether applying this transaction means that
1112 * existing jobs would be replaced */
1113
1114 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1115
1116 /* Assume merged */
1117 assert(!j->transaction_prev);
1118 assert(!j->transaction_next);
1119
1120 if (j->unit->meta.job &&
1121 j->unit->meta.job != j &&
1122 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1123
1124 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1125 return -EEXIST;
1126 }
1127 }
1128
1129 return 0;
1130 }
1131
1132 static void transaction_minimize_impact(Manager *m) {
1133 bool again;
1134 assert(m);
1135
1136 /* Drops all unnecessary jobs that reverse already active jobs
1137 * or that stop a running service. */
1138
1139 do {
1140 Job *j;
1141 Iterator i;
1142
1143 again = false;
1144
1145 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1146 LIST_FOREACH(transaction, j, j) {
1147 bool stops_running_service, changes_existing_job;
1148
1149 /* If it matters, we shouldn't drop it */
1150 if (j->matters_to_anchor)
1151 continue;
1152
1153 /* Would this stop a running service?
1154 * Would this change an existing job?
1155 * If so, let's drop this entry */
1156
1157 stops_running_service =
1158 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1159
1160 changes_existing_job =
1161 j->unit->meta.job &&
1162 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1163
1164 if (!stops_running_service && !changes_existing_job)
1165 continue;
1166
1167 if (stops_running_service)
1168 log_info("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1169
1170 if (changes_existing_job)
1171 log_info("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1172
1173 /* Ok, let's get rid of this */
1174 log_info("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1175
1176 transaction_delete_job(m, j, true);
1177 again = true;
1178 break;
1179 }
1180
1181 if (again)
1182 break;
1183 }
1184
1185 } while (again);
1186 }
1187
1188 static int transaction_apply(Manager *m) {
1189 Iterator i;
1190 Job *j;
1191 int r;
1192
1193 /* Moves the transaction jobs to the set of active jobs */
1194
1195 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1196 /* Assume merged */
1197 assert(!j->transaction_prev);
1198 assert(!j->transaction_next);
1199
1200 if (j->installed)
1201 continue;
1202
1203 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1204 goto rollback;
1205 }
1206
1207 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1208 if (j->installed) {
1209 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1210 continue;
1211 }
1212
1213 if (j->unit->meta.job)
1214 job_free(j->unit->meta.job);
1215
1216 j->unit->meta.job = j;
1217 j->installed = true;
1218 m->n_installed_jobs ++;
1219
1220 /* We're fully installed. Now let's free data we don't
1221 * need anymore. */
1222
1223 assert(!j->transaction_next);
1224 assert(!j->transaction_prev);
1225
1226 job_add_to_run_queue(j);
1227 job_add_to_dbus_queue(j);
1228 job_start_timer(j);
1229
1230 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1231 }
1232
1233 /* As last step, kill all remaining job dependencies. */
1234 transaction_clean_dependencies(m);
1235
1236 return 0;
1237
1238 rollback:
1239
1240 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1241 if (j->installed)
1242 continue;
1243
1244 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1245 }
1246
1247 return r;
1248 }
1249
1250 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1251 int r;
1252 unsigned generation = 1;
1253
1254 assert(m);
1255
1256 /* This applies the changes recorded in transaction_jobs to
1257 * the actual list of jobs, if possible. */
1258
1259 /* First step: figure out which jobs matter */
1260 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1261
1262 /* Second step: Try not to stop any running services if
1263 * we don't have to. Don't try to reverse running
1264 * jobs if we don't have to. */
1265 if (mode == JOB_FAIL)
1266 transaction_minimize_impact(m);
1267
1268 /* Third step: Drop redundant jobs */
1269 transaction_drop_redundant(m);
1270
1271 for (;;) {
1272 /* Fourth step: Let's remove unneeded jobs that might
1273 * be lurking. */
1274 transaction_collect_garbage(m);
1275
1276 /* Fifth step: verify order makes sense and correct
1277 * cycles if necessary and possible */
1278 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1279 break;
1280
1281 if (r != -EAGAIN) {
1282 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1283 goto rollback;
1284 }
1285
1286 /* Let's see if the resulting transaction ordering
1287 * graph is still cyclic... */
1288 }
1289
1290 for (;;) {
1291 /* Sixth step: let's drop unmergeable entries if
1292 * necessary and possible, merge entries we can
1293 * merge */
1294 if ((r = transaction_merge_jobs(m, e)) >= 0)
1295 break;
1296
1297 if (r != -EAGAIN) {
1298 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1299 goto rollback;
1300 }
1301
1302 /* Seventh step: an entry got dropped, let's garbage
1303 * collect its dependencies. */
1304 transaction_collect_garbage(m);
1305
1306 /* Let's see if the resulting transaction still has
1307 * unmergeable entries ... */
1308 }
1309
1310 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1311 transaction_drop_redundant(m);
1312
1313 /* Ninth step: check whether we can actually apply this */
1314 if (mode == JOB_FAIL)
1315 if ((r = transaction_is_destructive(m, e)) < 0) {
1316 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1317 goto rollback;
1318 }
1319
1320 /* Tenth step: apply changes */
1321 if ((r = transaction_apply(m)) < 0) {
1322 log_warning("Failed to apply transaction: %s", strerror(-r));
1323 goto rollback;
1324 }
1325
1326 assert(hashmap_isempty(m->transaction_jobs));
1327 assert(!m->transaction_anchor);
1328
1329 return 0;
1330
1331 rollback:
1332 transaction_abort(m);
1333 return r;
1334 }
1335
1336 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1337 Job *j, *f;
1338
1339 assert(m);
1340 assert(unit);
1341
1342 /* Looks for an existing prospective job and returns that. If
1343 * it doesn't exist it is created and added to the prospective
1344 * jobs list. */
1345
1346 f = hashmap_get(m->transaction_jobs, unit);
1347
1348 LIST_FOREACH(transaction, j, f) {
1349 assert(j->unit == unit);
1350
1351 if (j->type == type) {
1352 if (is_new)
1353 *is_new = false;
1354 return j;
1355 }
1356 }
1357
1358 if (unit->meta.job && unit->meta.job->type == type)
1359 j = unit->meta.job;
1360 else if (!(j = job_new(m, type, unit)))
1361 return NULL;
1362
1363 j->generation = 0;
1364 j->marker = NULL;
1365 j->matters_to_anchor = false;
1366 j->override = override;
1367
1368 LIST_PREPEND(Job, transaction, f, j);
1369
1370 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1371 job_free(j);
1372 return NULL;
1373 }
1374
1375 if (is_new)
1376 *is_new = true;
1377
1378 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1379
1380 return j;
1381 }
1382
1383 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1384 assert(m);
1385 assert(j);
1386
1387 if (j->transaction_prev)
1388 j->transaction_prev->transaction_next = j->transaction_next;
1389 else if (j->transaction_next)
1390 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1391 else
1392 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1393
1394 if (j->transaction_next)
1395 j->transaction_next->transaction_prev = j->transaction_prev;
1396
1397 j->transaction_prev = j->transaction_next = NULL;
1398
1399 while (j->subject_list)
1400 job_dependency_free(j->subject_list);
1401
1402 while (j->object_list) {
1403 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1404
1405 job_dependency_free(j->object_list);
1406
1407 if (other && delete_dependencies) {
1408 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1409 other->unit->meta.id, job_type_to_string(other->type),
1410 j->unit->meta.id, job_type_to_string(j->type));
1411 transaction_delete_job(m, other, delete_dependencies);
1412 }
1413 }
1414 }
1415
1416 static int transaction_add_job_and_dependencies(
1417 Manager *m,
1418 JobType type,
1419 Unit *unit,
1420 Job *by,
1421 bool matters,
1422 bool override,
1423 bool conflicts,
1424 bool ignore_deps,
1425 DBusError *e,
1426 Job **_ret) {
1427 Job *ret;
1428 Iterator i;
1429 Unit *dep;
1430 int r;
1431 bool is_new;
1432
1433 assert(m);
1434 assert(type < _JOB_TYPE_MAX);
1435 assert(unit);
1436
1437 /* log_debug("Pulling in %s/%s from %s/%s", */
1438 /* unit->meta.id, job_type_to_string(type), */
1439 /* by ? by->unit->meta.id : "NA", */
1440 /* by ? job_type_to_string(by->type) : "NA"); */
1441
1442 if (unit->meta.load_state != UNIT_LOADED &&
1443 unit->meta.load_state != UNIT_ERROR &&
1444 unit->meta.load_state != UNIT_MASKED) {
1445 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1446 return -EINVAL;
1447 }
1448
1449 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1450 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1451 "Unit %s failed to load: %s. "
1452 "See system logs and 'systemctl status' for details.",
1453 unit->meta.id,
1454 strerror(-unit->meta.load_error));
1455 return -EINVAL;
1456 }
1457
1458 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1459 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1460 return -EINVAL;
1461 }
1462
1463 if (!unit_job_is_applicable(unit, type)) {
1464 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1465 return -EBADR;
1466 }
1467
1468 /* First add the job. */
1469 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1470 return -ENOMEM;
1471
1472 ret->ignore_deps = ret->ignore_deps || ignore_deps;
1473
1474 /* Then, add a link to the job. */
1475 if (!job_dependency_new(by, ret, matters, conflicts))
1476 return -ENOMEM;
1477
1478 if (is_new && !ignore_deps) {
1479 Set *following;
1480
1481 /* If we are following some other unit, make sure we
1482 * add all dependencies of everybody following. */
1483 if (unit_following_set(ret->unit, &following) > 0) {
1484 SET_FOREACH(dep, following, i)
1485 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, e, NULL)) < 0) {
1486 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1487
1488 if (e)
1489 dbus_error_free(e);
1490 }
1491
1492 set_free(following);
1493 }
1494
1495 /* Finally, recursively add in all dependencies. */
1496 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1497 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1498 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, e, NULL)) < 0) {
1499 if (r != -EBADR)
1500 goto fail;
1501
1502 if (e)
1503 dbus_error_free(e);
1504 }
1505
1506 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1507 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, e, NULL)) < 0) {
1508
1509 if (r != -EBADR)
1510 goto fail;
1511
1512 if (e)
1513 dbus_error_free(e);
1514 }
1515
1516 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1517 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, e, NULL)) < 0) {
1518 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1519
1520 if (e)
1521 dbus_error_free(e);
1522 }
1523
1524 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1525 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, e, NULL)) < 0) {
1526 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1527
1528 if (e)
1529 dbus_error_free(e);
1530 }
1531
1532 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1533 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, e, NULL)) < 0) {
1534
1535 if (r != -EBADR)
1536 goto fail;
1537
1538 if (e)
1539 dbus_error_free(e);
1540 }
1541
1542 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1543 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, e, NULL)) < 0) {
1544 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1545
1546 if (e)
1547 dbus_error_free(e);
1548 }
1549
1550 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1551 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, e, NULL)) < 0) {
1552
1553 if (r != -EBADR)
1554 goto fail;
1555
1556 if (e)
1557 dbus_error_free(e);
1558 }
1559
1560 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1561 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, e, NULL)) < 0) {
1562 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1563
1564 if (e)
1565 dbus_error_free(e);
1566 }
1567
1568 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1569
1570 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1571 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, e, NULL)) < 0) {
1572
1573 if (r != -EBADR)
1574 goto fail;
1575
1576 if (e)
1577 dbus_error_free(e);
1578 }
1579
1580 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1581 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, e, NULL)) < 0) {
1582
1583 if (r != -EBADR)
1584 goto fail;
1585
1586 if (e)
1587 dbus_error_free(e);
1588 }
1589 }
1590
1591 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1592 }
1593
1594 if (_ret)
1595 *_ret = ret;
1596
1597 return 0;
1598
1599 fail:
1600 return r;
1601 }
1602
1603 static int transaction_add_isolate_jobs(Manager *m) {
1604 Iterator i;
1605 Unit *u;
1606 char *k;
1607 int r;
1608
1609 assert(m);
1610
1611 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1612
1613 /* ignore aliases */
1614 if (u->meta.id != k)
1615 continue;
1616
1617 if (UNIT_VTABLE(u)->no_isolate)
1618 continue;
1619
1620 /* No need to stop inactive jobs */
1621 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1622 continue;
1623
1624 /* Is there already something listed for this? */
1625 if (hashmap_get(m->transaction_jobs, u))
1626 continue;
1627
1628 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, NULL, NULL)) < 0)
1629 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1630 }
1631
1632 return 0;
1633 }
1634
1635 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1636 int r;
1637 Job *ret;
1638
1639 assert(m);
1640 assert(type < _JOB_TYPE_MAX);
1641 assert(unit);
1642 assert(mode < _JOB_MODE_MAX);
1643
1644 if (mode == JOB_ISOLATE && type != JOB_START) {
1645 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1646 return -EINVAL;
1647 }
1648
1649 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1650 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1651 return -EPERM;
1652 }
1653
1654 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1655
1656 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false, mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1657 transaction_abort(m);
1658 return r;
1659 }
1660
1661 if (mode == JOB_ISOLATE)
1662 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1663 transaction_abort(m);
1664 return r;
1665 }
1666
1667 if ((r = transaction_activate(m, mode, e)) < 0)
1668 return r;
1669
1670 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1671
1672 if (_ret)
1673 *_ret = ret;
1674
1675 return 0;
1676 }
1677
1678 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1679 Unit *unit;
1680 int r;
1681
1682 assert(m);
1683 assert(type < _JOB_TYPE_MAX);
1684 assert(name);
1685 assert(mode < _JOB_MODE_MAX);
1686
1687 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1688 return r;
1689
1690 return manager_add_job(m, type, unit, mode, override, e, _ret);
1691 }
1692
1693 Job *manager_get_job(Manager *m, uint32_t id) {
1694 assert(m);
1695
1696 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1697 }
1698
1699 Unit *manager_get_unit(Manager *m, const char *name) {
1700 assert(m);
1701 assert(name);
1702
1703 return hashmap_get(m->units, name);
1704 }
1705
1706 unsigned manager_dispatch_load_queue(Manager *m) {
1707 Meta *meta;
1708 unsigned n = 0;
1709
1710 assert(m);
1711
1712 /* Make sure we are not run recursively */
1713 if (m->dispatching_load_queue)
1714 return 0;
1715
1716 m->dispatching_load_queue = true;
1717
1718 /* Dispatches the load queue. Takes a unit from the queue and
1719 * tries to load its data until the queue is empty */
1720
1721 while ((meta = m->load_queue)) {
1722 assert(meta->in_load_queue);
1723
1724 unit_load((Unit*) meta);
1725 n++;
1726 }
1727
1728 m->dispatching_load_queue = false;
1729 return n;
1730 }
1731
1732 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1733 Unit *ret;
1734 int r;
1735
1736 assert(m);
1737 assert(name || path);
1738
1739 /* This will prepare the unit for loading, but not actually
1740 * load anything from disk. */
1741
1742 if (path && !is_path(path)) {
1743 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1744 return -EINVAL;
1745 }
1746
1747 if (!name)
1748 name = file_name_from_path(path);
1749
1750 if (!unit_name_is_valid(name, false)) {
1751 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1752 return -EINVAL;
1753 }
1754
1755 if ((ret = manager_get_unit(m, name))) {
1756 *_ret = ret;
1757 return 1;
1758 }
1759
1760 if (!(ret = unit_new(m)))
1761 return -ENOMEM;
1762
1763 if (path)
1764 if (!(ret->meta.fragment_path = strdup(path))) {
1765 unit_free(ret);
1766 return -ENOMEM;
1767 }
1768
1769 if ((r = unit_add_name(ret, name)) < 0) {
1770 unit_free(ret);
1771 return r;
1772 }
1773
1774 unit_add_to_load_queue(ret);
1775 unit_add_to_dbus_queue(ret);
1776 unit_add_to_gc_queue(ret);
1777
1778 if (_ret)
1779 *_ret = ret;
1780
1781 return 0;
1782 }
1783
1784 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1785 int r;
1786
1787 assert(m);
1788
1789 /* This will load the service information files, but not actually
1790 * start any services or anything. */
1791
1792 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1793 return r;
1794
1795 manager_dispatch_load_queue(m);
1796
1797 if (_ret)
1798 *_ret = unit_follow_merge(*_ret);
1799
1800 return 0;
1801 }
1802
1803 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1804 Iterator i;
1805 Job *j;
1806
1807 assert(s);
1808 assert(f);
1809
1810 HASHMAP_FOREACH(j, s->jobs, i)
1811 job_dump(j, f, prefix);
1812 }
1813
1814 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1815 Iterator i;
1816 Unit *u;
1817 const char *t;
1818
1819 assert(s);
1820 assert(f);
1821
1822 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1823 if (u->meta.id == t)
1824 unit_dump(u, f, prefix);
1825 }
1826
1827 void manager_clear_jobs(Manager *m) {
1828 Job *j;
1829
1830 assert(m);
1831
1832 transaction_abort(m);
1833
1834 while ((j = hashmap_first(m->jobs)))
1835 job_finish_and_invalidate(j, JOB_CANCELED);
1836 }
1837
1838 unsigned manager_dispatch_run_queue(Manager *m) {
1839 Job *j;
1840 unsigned n = 0;
1841
1842 if (m->dispatching_run_queue)
1843 return 0;
1844
1845 m->dispatching_run_queue = true;
1846
1847 while ((j = m->run_queue)) {
1848 assert(j->installed);
1849 assert(j->in_run_queue);
1850
1851 job_run_and_invalidate(j);
1852 n++;
1853 }
1854
1855 m->dispatching_run_queue = false;
1856 return n;
1857 }
1858
1859 unsigned manager_dispatch_dbus_queue(Manager *m) {
1860 Job *j;
1861 Meta *meta;
1862 unsigned n = 0;
1863
1864 assert(m);
1865
1866 if (m->dispatching_dbus_queue)
1867 return 0;
1868
1869 m->dispatching_dbus_queue = true;
1870
1871 while ((meta = m->dbus_unit_queue)) {
1872 assert(meta->in_dbus_queue);
1873
1874 bus_unit_send_change_signal((Unit*) meta);
1875 n++;
1876 }
1877
1878 while ((j = m->dbus_job_queue)) {
1879 assert(j->in_dbus_queue);
1880
1881 bus_job_send_change_signal(j);
1882 n++;
1883 }
1884
1885 m->dispatching_dbus_queue = false;
1886 return n;
1887 }
1888
1889 static int manager_process_notify_fd(Manager *m) {
1890 ssize_t n;
1891
1892 assert(m);
1893
1894 for (;;) {
1895 char buf[4096];
1896 struct msghdr msghdr;
1897 struct iovec iovec;
1898 struct ucred *ucred;
1899 union {
1900 struct cmsghdr cmsghdr;
1901 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1902 } control;
1903 Unit *u;
1904 char **tags;
1905
1906 zero(iovec);
1907 iovec.iov_base = buf;
1908 iovec.iov_len = sizeof(buf)-1;
1909
1910 zero(control);
1911 zero(msghdr);
1912 msghdr.msg_iov = &iovec;
1913 msghdr.msg_iovlen = 1;
1914 msghdr.msg_control = &control;
1915 msghdr.msg_controllen = sizeof(control);
1916
1917 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1918 if (n >= 0)
1919 return -EIO;
1920
1921 if (errno == EAGAIN || errno == EINTR)
1922 break;
1923
1924 return -errno;
1925 }
1926
1927 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1928 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1929 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1930 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1931 log_warning("Received notify message without credentials. Ignoring.");
1932 continue;
1933 }
1934
1935 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1936
1937 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1938 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1939 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1940 continue;
1941 }
1942
1943 assert((size_t) n < sizeof(buf));
1944 buf[n] = 0;
1945 if (!(tags = strv_split(buf, "\n\r")))
1946 return -ENOMEM;
1947
1948 log_debug("Got notification message for unit %s", u->meta.id);
1949
1950 if (UNIT_VTABLE(u)->notify_message)
1951 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1952
1953 strv_free(tags);
1954 }
1955
1956 return 0;
1957 }
1958
1959 static int manager_dispatch_sigchld(Manager *m) {
1960 assert(m);
1961
1962 for (;;) {
1963 siginfo_t si;
1964 Unit *u;
1965 int r;
1966
1967 zero(si);
1968
1969 /* First we call waitd() for a PID and do not reap the
1970 * zombie. That way we can still access /proc/$PID for
1971 * it while it is a zombie. */
1972 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1973
1974 if (errno == ECHILD)
1975 break;
1976
1977 if (errno == EINTR)
1978 continue;
1979
1980 return -errno;
1981 }
1982
1983 if (si.si_pid <= 0)
1984 break;
1985
1986 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
1987 char *name = NULL;
1988
1989 get_process_name(si.si_pid, &name);
1990 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
1991 free(name);
1992 }
1993
1994 /* Let's flush any message the dying child might still
1995 * have queued for us. This ensures that the process
1996 * still exists in /proc so that we can figure out
1997 * which cgroup and hence unit it belongs to. */
1998 if ((r = manager_process_notify_fd(m)) < 0)
1999 return r;
2000
2001 /* And now figure out the unit this belongs to */
2002 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2003 u = cgroup_unit_by_pid(m, si.si_pid);
2004
2005 /* And now, we actually reap the zombie. */
2006 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2007 if (errno == EINTR)
2008 continue;
2009
2010 return -errno;
2011 }
2012
2013 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2014 continue;
2015
2016 log_debug("Child %lu died (code=%s, status=%i/%s)",
2017 (long unsigned) si.si_pid,
2018 sigchld_code_to_string(si.si_code),
2019 si.si_status,
2020 strna(si.si_code == CLD_EXITED
2021 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2022 : signal_to_string(si.si_status)));
2023
2024 if (!u)
2025 continue;
2026
2027 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2028
2029 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2030 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2031 }
2032
2033 return 0;
2034 }
2035
2036 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2037 int r;
2038 DBusError error;
2039
2040 dbus_error_init(&error);
2041
2042 log_debug("Activating special unit %s", name);
2043
2044 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2045 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2046
2047 dbus_error_free(&error);
2048
2049 return r;
2050 }
2051
2052 static int manager_process_signal_fd(Manager *m) {
2053 ssize_t n;
2054 struct signalfd_siginfo sfsi;
2055 bool sigchld = false;
2056
2057 assert(m);
2058
2059 for (;;) {
2060 char *p = NULL;
2061
2062 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2063
2064 if (n >= 0)
2065 return -EIO;
2066
2067 if (errno == EINTR || errno == EAGAIN)
2068 break;
2069
2070 return -errno;
2071 }
2072
2073 get_process_name(sfsi.ssi_pid, &p);
2074 log_debug("Received SIG%s from PID %lu (%s)",
2075 strna(signal_to_string(sfsi.ssi_signo)),
2076 (unsigned long) sfsi.ssi_pid, strna(p));
2077 free(p);
2078
2079 switch (sfsi.ssi_signo) {
2080
2081 case SIGCHLD:
2082 sigchld = true;
2083 break;
2084
2085 case SIGTERM:
2086 if (m->running_as == MANAGER_SYSTEM) {
2087 /* This is for compatibility with the
2088 * original sysvinit */
2089 m->exit_code = MANAGER_REEXECUTE;
2090 break;
2091 }
2092
2093 /* Fall through */
2094
2095 case SIGINT:
2096 if (m->running_as == MANAGER_SYSTEM) {
2097 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2098 break;
2099 }
2100
2101 /* Run the exit target if there is one, if not, just exit. */
2102 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2103 m->exit_code = MANAGER_EXIT;
2104 return 0;
2105 }
2106
2107 break;
2108
2109 case SIGWINCH:
2110 if (m->running_as == MANAGER_SYSTEM)
2111 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2112
2113 /* This is a nop on non-init */
2114 break;
2115
2116 case SIGPWR:
2117 if (m->running_as == MANAGER_SYSTEM)
2118 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2119
2120 /* This is a nop on non-init */
2121 break;
2122
2123 case SIGUSR1: {
2124 Unit *u;
2125
2126 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2127
2128 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2129 log_info("Trying to reconnect to bus...");
2130 bus_init(m, true);
2131 }
2132
2133 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2134 log_info("Loading D-Bus service...");
2135 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2136 }
2137
2138 break;
2139 }
2140
2141 case SIGUSR2: {
2142 FILE *f;
2143 char *dump = NULL;
2144 size_t size;
2145
2146 if (!(f = open_memstream(&dump, &size))) {
2147 log_warning("Failed to allocate memory stream.");
2148 break;
2149 }
2150
2151 manager_dump_units(m, f, "\t");
2152 manager_dump_jobs(m, f, "\t");
2153
2154 if (ferror(f)) {
2155 fclose(f);
2156 free(dump);
2157 log_warning("Failed to write status stream");
2158 break;
2159 }
2160
2161 fclose(f);
2162 log_dump(LOG_INFO, dump);
2163 free(dump);
2164
2165 break;
2166 }
2167
2168 case SIGHUP:
2169 m->exit_code = MANAGER_RELOAD;
2170 break;
2171
2172 default: {
2173 /* Starting SIGRTMIN+0 */
2174 static const char * const target_table[] = {
2175 [0] = SPECIAL_DEFAULT_TARGET,
2176 [1] = SPECIAL_RESCUE_TARGET,
2177 [2] = SPECIAL_EMERGENCY_TARGET,
2178 [3] = SPECIAL_HALT_TARGET,
2179 [4] = SPECIAL_POWEROFF_TARGET,
2180 [5] = SPECIAL_REBOOT_TARGET,
2181 [6] = SPECIAL_KEXEC_TARGET
2182 };
2183
2184 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2185 static const ManagerExitCode code_table[] = {
2186 [0] = MANAGER_HALT,
2187 [1] = MANAGER_POWEROFF,
2188 [2] = MANAGER_REBOOT,
2189 [3] = MANAGER_KEXEC
2190 };
2191
2192 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2193 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2194 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2195 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2196 break;
2197 }
2198
2199 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2200 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2201 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2202 break;
2203 }
2204
2205 switch (sfsi.ssi_signo - SIGRTMIN) {
2206
2207 case 20:
2208 log_debug("Enabling showing of status.");
2209 m->show_status = true;
2210 break;
2211
2212 case 21:
2213 log_debug("Disabling showing of status.");
2214 m->show_status = false;
2215 break;
2216
2217 default:
2218 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2219 }
2220 }
2221 }
2222 }
2223
2224 if (sigchld)
2225 return manager_dispatch_sigchld(m);
2226
2227 return 0;
2228 }
2229
2230 static int process_event(Manager *m, struct epoll_event *ev) {
2231 int r;
2232 Watch *w;
2233
2234 assert(m);
2235 assert(ev);
2236
2237 assert(w = ev->data.ptr);
2238
2239 if (w->type == WATCH_INVALID)
2240 return 0;
2241
2242 switch (w->type) {
2243
2244 case WATCH_SIGNAL:
2245
2246 /* An incoming signal? */
2247 if (ev->events != EPOLLIN)
2248 return -EINVAL;
2249
2250 if ((r = manager_process_signal_fd(m)) < 0)
2251 return r;
2252
2253 break;
2254
2255 case WATCH_NOTIFY:
2256
2257 /* An incoming daemon notification event? */
2258 if (ev->events != EPOLLIN)
2259 return -EINVAL;
2260
2261 if ((r = manager_process_notify_fd(m)) < 0)
2262 return r;
2263
2264 break;
2265
2266 case WATCH_FD:
2267
2268 /* Some fd event, to be dispatched to the units */
2269 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2270 break;
2271
2272 case WATCH_UNIT_TIMER:
2273 case WATCH_JOB_TIMER: {
2274 uint64_t v;
2275 ssize_t k;
2276
2277 /* Some timer event, to be dispatched to the units */
2278 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2279
2280 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2281 break;
2282
2283 return k < 0 ? -errno : -EIO;
2284 }
2285
2286 if (w->type == WATCH_UNIT_TIMER)
2287 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2288 else
2289 job_timer_event(w->data.job, v, w);
2290 break;
2291 }
2292
2293 case WATCH_MOUNT:
2294 /* Some mount table change, intended for the mount subsystem */
2295 mount_fd_event(m, ev->events);
2296 break;
2297
2298 case WATCH_SWAP:
2299 /* Some swap table change, intended for the swap subsystem */
2300 swap_fd_event(m, ev->events);
2301 break;
2302
2303 case WATCH_UDEV:
2304 /* Some notification from udev, intended for the device subsystem */
2305 device_fd_event(m, ev->events);
2306 break;
2307
2308 case WATCH_DBUS_WATCH:
2309 bus_watch_event(m, w, ev->events);
2310 break;
2311
2312 case WATCH_DBUS_TIMEOUT:
2313 bus_timeout_event(m, w, ev->events);
2314 break;
2315
2316 default:
2317 log_error("event type=%i", w->type);
2318 assert_not_reached("Unknown epoll event type.");
2319 }
2320
2321 return 0;
2322 }
2323
2324 int manager_loop(Manager *m) {
2325 int r;
2326
2327 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 1000);
2328
2329 assert(m);
2330 m->exit_code = MANAGER_RUNNING;
2331
2332 /* Release the path cache */
2333 set_free_free(m->unit_path_cache);
2334 m->unit_path_cache = NULL;
2335
2336 manager_check_finished(m);
2337
2338 /* There might still be some zombies hanging around from
2339 * before we were exec()'ed. Leat's reap them */
2340 if ((r = manager_dispatch_sigchld(m)) < 0)
2341 return r;
2342
2343 while (m->exit_code == MANAGER_RUNNING) {
2344 struct epoll_event event;
2345 int n;
2346
2347 if (!ratelimit_test(&rl)) {
2348 /* Yay, something is going seriously wrong, pause a little */
2349 log_warning("Looping too fast. Throttling execution a little.");
2350 sleep(1);
2351 }
2352
2353 if (manager_dispatch_load_queue(m) > 0)
2354 continue;
2355
2356 if (manager_dispatch_run_queue(m) > 0)
2357 continue;
2358
2359 if (bus_dispatch(m) > 0)
2360 continue;
2361
2362 if (manager_dispatch_cleanup_queue(m) > 0)
2363 continue;
2364
2365 if (manager_dispatch_gc_queue(m) > 0)
2366 continue;
2367
2368 if (manager_dispatch_dbus_queue(m) > 0)
2369 continue;
2370
2371 if (swap_dispatch_reload(m) > 0)
2372 continue;
2373
2374 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2375
2376 if (errno == EINTR)
2377 continue;
2378
2379 return -errno;
2380 }
2381
2382 assert(n == 1);
2383
2384 if ((r = process_event(m, &event)) < 0)
2385 return r;
2386 }
2387
2388 return m->exit_code;
2389 }
2390
2391 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2392 char *n;
2393 Unit *u;
2394
2395 assert(m);
2396 assert(s);
2397 assert(_u);
2398
2399 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2400 return -EINVAL;
2401
2402 if (!(n = bus_path_unescape(s+31)))
2403 return -ENOMEM;
2404
2405 u = manager_get_unit(m, n);
2406 free(n);
2407
2408 if (!u)
2409 return -ENOENT;
2410
2411 *_u = u;
2412
2413 return 0;
2414 }
2415
2416 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2417 Job *j;
2418 unsigned id;
2419 int r;
2420
2421 assert(m);
2422 assert(s);
2423 assert(_j);
2424
2425 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2426 return -EINVAL;
2427
2428 if ((r = safe_atou(s + 30, &id)) < 0)
2429 return r;
2430
2431 if (!(j = manager_get_job(m, id)))
2432 return -ENOENT;
2433
2434 *_j = j;
2435
2436 return 0;
2437 }
2438
2439 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2440
2441 #ifdef HAVE_AUDIT
2442 char *p;
2443
2444 if (m->audit_fd < 0)
2445 return;
2446
2447 /* Don't generate audit events if the service was already
2448 * started and we're just deserializing */
2449 if (m->n_deserializing > 0)
2450 return;
2451
2452 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2453 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2454 return;
2455 }
2456
2457 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2458 log_warning("Failed to send audit message: %m");
2459
2460 if (errno == EPERM) {
2461 /* We aren't allowed to send audit messages?
2462 * Then let's not retry again, to avoid
2463 * spamming the user with the same and same
2464 * messages over and over. */
2465
2466 audit_close(m->audit_fd);
2467 m->audit_fd = -1;
2468 }
2469 }
2470
2471 free(p);
2472 #endif
2473
2474 }
2475
2476 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2477 int fd = -1;
2478 union sockaddr_union sa;
2479 int n = 0;
2480 char *message = NULL;
2481
2482 /* Don't generate plymouth events if the service was already
2483 * started and we're just deserializing */
2484 if (m->n_deserializing > 0)
2485 return;
2486
2487 if (m->running_as != MANAGER_SYSTEM)
2488 return;
2489
2490 if (u->meta.type != UNIT_SERVICE &&
2491 u->meta.type != UNIT_MOUNT &&
2492 u->meta.type != UNIT_SWAP)
2493 return;
2494
2495 /* We set SOCK_NONBLOCK here so that we rather drop the
2496 * message then wait for plymouth */
2497 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2498 log_error("socket() failed: %m");
2499 return;
2500 }
2501
2502 zero(sa);
2503 sa.sa.sa_family = AF_UNIX;
2504 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2505 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2506
2507 if (errno != EPIPE &&
2508 errno != EAGAIN &&
2509 errno != ENOENT &&
2510 errno != ECONNREFUSED &&
2511 errno != ECONNRESET &&
2512 errno != ECONNABORTED)
2513 log_error("connect() failed: %m");
2514
2515 goto finish;
2516 }
2517
2518 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2519 log_error("Out of memory");
2520 goto finish;
2521 }
2522
2523 errno = 0;
2524 if (write(fd, message, n + 1) != n + 1) {
2525
2526 if (errno != EPIPE &&
2527 errno != EAGAIN &&
2528 errno != ENOENT &&
2529 errno != ECONNREFUSED &&
2530 errno != ECONNRESET &&
2531 errno != ECONNABORTED)
2532 log_error("Failed to write Plymouth message: %m");
2533
2534 goto finish;
2535 }
2536
2537 finish:
2538 if (fd >= 0)
2539 close_nointr_nofail(fd);
2540
2541 free(message);
2542 }
2543
2544 void manager_dispatch_bus_name_owner_changed(
2545 Manager *m,
2546 const char *name,
2547 const char* old_owner,
2548 const char *new_owner) {
2549
2550 Unit *u;
2551
2552 assert(m);
2553 assert(name);
2554
2555 if (!(u = hashmap_get(m->watch_bus, name)))
2556 return;
2557
2558 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2559 }
2560
2561 void manager_dispatch_bus_query_pid_done(
2562 Manager *m,
2563 const char *name,
2564 pid_t pid) {
2565
2566 Unit *u;
2567
2568 assert(m);
2569 assert(name);
2570 assert(pid >= 1);
2571
2572 if (!(u = hashmap_get(m->watch_bus, name)))
2573 return;
2574
2575 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2576 }
2577
2578 int manager_open_serialization(Manager *m, FILE **_f) {
2579 char *path = NULL;
2580 mode_t saved_umask;
2581 int fd;
2582 FILE *f;
2583
2584 assert(_f);
2585
2586 if (m->running_as == MANAGER_SYSTEM)
2587 asprintf(&path, "/dev/.run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2588 else
2589 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2590
2591 if (!path)
2592 return -ENOMEM;
2593
2594 saved_umask = umask(0077);
2595 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2596 umask(saved_umask);
2597
2598 if (fd < 0) {
2599 free(path);
2600 return -errno;
2601 }
2602
2603 unlink(path);
2604
2605 log_debug("Serializing state to %s", path);
2606 free(path);
2607
2608 if (!(f = fdopen(fd, "w+")) < 0)
2609 return -errno;
2610
2611 *_f = f;
2612
2613 return 0;
2614 }
2615
2616 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2617 Iterator i;
2618 Unit *u;
2619 const char *t;
2620 int r;
2621
2622 assert(m);
2623 assert(f);
2624 assert(fds);
2625
2626 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2627 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2628 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2629
2630 fputc('\n', f);
2631
2632 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2633 if (u->meta.id != t)
2634 continue;
2635
2636 if (!unit_can_serialize(u))
2637 continue;
2638
2639 /* Start marker */
2640 fputs(u->meta.id, f);
2641 fputc('\n', f);
2642
2643 if ((r = unit_serialize(u, f, fds)) < 0)
2644 return r;
2645 }
2646
2647 if (ferror(f))
2648 return -EIO;
2649
2650 return 0;
2651 }
2652
2653 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2654 int r = 0;
2655
2656 assert(m);
2657 assert(f);
2658
2659 log_debug("Deserializing state...");
2660
2661 m->n_deserializing ++;
2662
2663 for (;;) {
2664 char line[1024], *l;
2665
2666 if (!fgets(line, sizeof(line), f)) {
2667 if (feof(f))
2668 r = 0;
2669 else
2670 r = -errno;
2671
2672 goto finish;
2673 }
2674
2675 char_array_0(line);
2676 l = strstrip(line);
2677
2678 if (l[0] == 0)
2679 break;
2680
2681 if (startswith(l, "initrd-timestamp="))
2682 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2683 else if (startswith(l, "startup-timestamp="))
2684 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2685 else if (startswith(l, "finish-timestamp="))
2686 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2687 else
2688 log_debug("Unknown serialization item '%s'", l);
2689 }
2690
2691 for (;;) {
2692 Unit *u;
2693 char name[UNIT_NAME_MAX+2];
2694
2695 /* Start marker */
2696 if (!fgets(name, sizeof(name), f)) {
2697 if (feof(f))
2698 r = 0;
2699 else
2700 r = -errno;
2701
2702 goto finish;
2703 }
2704
2705 char_array_0(name);
2706
2707 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2708 goto finish;
2709
2710 if ((r = unit_deserialize(u, f, fds)) < 0)
2711 goto finish;
2712 }
2713
2714 finish:
2715 if (ferror(f)) {
2716 r = -EIO;
2717 goto finish;
2718 }
2719
2720 assert(m->n_deserializing > 0);
2721 m->n_deserializing --;
2722
2723 return r;
2724 }
2725
2726 int manager_reload(Manager *m) {
2727 int r, q;
2728 FILE *f;
2729 FDSet *fds;
2730
2731 assert(m);
2732
2733 if ((r = manager_open_serialization(m, &f)) < 0)
2734 return r;
2735
2736 if (!(fds = fdset_new())) {
2737 r = -ENOMEM;
2738 goto finish;
2739 }
2740
2741 if ((r = manager_serialize(m, f, fds)) < 0)
2742 goto finish;
2743
2744 if (fseeko(f, 0, SEEK_SET) < 0) {
2745 r = -errno;
2746 goto finish;
2747 }
2748
2749 /* From here on there is no way back. */
2750 manager_clear_jobs_and_units(m);
2751 manager_undo_generators(m);
2752
2753 /* Find new unit paths */
2754 lookup_paths_free(&m->lookup_paths);
2755 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2756 r = q;
2757
2758 manager_run_generators(m);
2759
2760 manager_build_unit_path_cache(m);
2761
2762 m->n_deserializing ++;
2763
2764 /* First, enumerate what we can from all config files */
2765 if ((q = manager_enumerate(m)) < 0)
2766 r = q;
2767
2768 /* Second, deserialize our stored data */
2769 if ((q = manager_deserialize(m, f, fds)) < 0)
2770 r = q;
2771
2772 fclose(f);
2773 f = NULL;
2774
2775 /* Third, fire things up! */
2776 if ((q = manager_coldplug(m)) < 0)
2777 r = q;
2778
2779 assert(m->n_deserializing > 0);
2780 m->n_deserializing ++;
2781
2782 finish:
2783 if (f)
2784 fclose(f);
2785
2786 if (fds)
2787 fdset_free(fds);
2788
2789 return r;
2790 }
2791
2792 bool manager_is_booting_or_shutting_down(Manager *m) {
2793 Unit *u;
2794
2795 assert(m);
2796
2797 /* Is the initial job still around? */
2798 if (manager_get_job(m, 1))
2799 return true;
2800
2801 /* Is there a job for the shutdown target? */
2802 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2803 return !!u->meta.job;
2804
2805 return false;
2806 }
2807
2808 void manager_reset_failed(Manager *m) {
2809 Unit *u;
2810 Iterator i;
2811
2812 assert(m);
2813
2814 HASHMAP_FOREACH(u, m->units, i)
2815 unit_reset_failed(u);
2816 }
2817
2818 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2819 Unit *u;
2820
2821 assert(m);
2822 assert(name);
2823
2824 /* Returns true if the unit is inactive or going down */
2825 if (!(u = manager_get_unit(m, name)))
2826 return true;
2827
2828 return unit_pending_inactive(u);
2829 }
2830
2831 void manager_check_finished(Manager *m) {
2832 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2833
2834 assert(m);
2835
2836 if (dual_timestamp_is_set(&m->finish_timestamp))
2837 return;
2838
2839 if (hashmap_size(m->jobs) > 0)
2840 return;
2841
2842 dual_timestamp_get(&m->finish_timestamp);
2843
2844 if (m->running_as == MANAGER_SYSTEM) {
2845 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2846 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2847 format_timespan(kernel, sizeof(kernel),
2848 m->initrd_timestamp.monotonic),
2849 format_timespan(initrd, sizeof(initrd),
2850 m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic),
2851 format_timespan(userspace, sizeof(userspace),
2852 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
2853 format_timespan(sum, sizeof(sum),
2854 m->finish_timestamp.monotonic));
2855 } else
2856 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2857 format_timespan(kernel, sizeof(kernel),
2858 m->startup_timestamp.monotonic),
2859 format_timespan(userspace, sizeof(userspace),
2860 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
2861 format_timespan(sum, sizeof(sum),
2862 m->finish_timestamp.monotonic));
2863 } else
2864 log_debug("Startup finished in %s.",
2865 format_timespan(userspace, sizeof(userspace),
2866 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic));
2867
2868 }
2869
2870 void manager_run_generators(Manager *m) {
2871 DIR *d = NULL;
2872 const char *generator_path;
2873 const char *argv[3];
2874
2875 assert(m);
2876
2877 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2878 if (!(d = opendir(generator_path))) {
2879
2880 if (errno == ENOENT)
2881 return;
2882
2883 log_error("Failed to enumerate generator directory: %m");
2884 return;
2885 }
2886
2887 if (!m->generator_unit_path) {
2888 char *p;
2889 char system_path[] = "/dev/.run/systemd/generator-XXXXXX",
2890 user_path[] = "/tmp/systemd-generator-XXXXXX";
2891
2892 if (!(p = mkdtemp(m->running_as == MANAGER_SYSTEM ? system_path : user_path))) {
2893 log_error("Failed to generate generator directory: %m");
2894 goto finish;
2895 }
2896
2897 if (!(m->generator_unit_path = strdup(p))) {
2898 log_error("Failed to allocate generator unit path.");
2899 goto finish;
2900 }
2901 }
2902
2903 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
2904 argv[1] = m->generator_unit_path;
2905 argv[2] = NULL;
2906
2907 execute_directory(generator_path, d, (char**) argv);
2908
2909 if (rmdir(m->generator_unit_path) >= 0) {
2910 /* Uh? we were able to remove this dir? I guess that
2911 * means the directory was empty, hence let's shortcut
2912 * this */
2913
2914 free(m->generator_unit_path);
2915 m->generator_unit_path = NULL;
2916 goto finish;
2917 }
2918
2919 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
2920 char **l;
2921
2922 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
2923 log_error("Failed to add generator directory to unit search path: %m");
2924 goto finish;
2925 }
2926
2927 strv_free(m->lookup_paths.unit_path);
2928 m->lookup_paths.unit_path = l;
2929
2930 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
2931 }
2932
2933 finish:
2934 if (d)
2935 closedir(d);
2936 }
2937
2938 void manager_undo_generators(Manager *m) {
2939 assert(m);
2940
2941 if (!m->generator_unit_path)
2942 return;
2943
2944 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
2945 rm_rf(m->generator_unit_path, false, true);
2946
2947 free(m->generator_unit_path);
2948 m->generator_unit_path = NULL;
2949 }
2950
2951 int manager_set_default_controllers(Manager *m, char **controllers) {
2952 char **l;
2953
2954 assert(m);
2955
2956 if (!(l = strv_copy(controllers)))
2957 return -ENOMEM;
2958
2959 strv_free(m->default_controllers);
2960 m->default_controllers = l;
2961
2962 return 0;
2963 }
2964
2965 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
2966 [MANAGER_SYSTEM] = "system",
2967 [MANAGER_USER] = "user"
2968 };
2969
2970 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);