]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
syslog: rework syslog detection so that we need no compile-time option what the name...
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60 #include "exit-status.h"
61
62 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
63 #define GC_QUEUE_ENTRIES_MAX 16
64
65 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
66 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
67
68 /* Where clients shall send notification messages to */
69 #define NOTIFY_SOCKET_SYSTEM "/dev/.run/systemd/notify"
70 #define NOTIFY_SOCKET_USER "@/org/freedesktop/systemd1/notify"
71
72 static int manager_setup_notify(Manager *m) {
73 union {
74 struct sockaddr sa;
75 struct sockaddr_un un;
76 } sa;
77 struct epoll_event ev;
78 int one = 1;
79
80 assert(m);
81
82 m->notify_watch.type = WATCH_NOTIFY;
83 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
84 log_error("Failed to allocate notification socket: %m");
85 return -errno;
86 }
87
88 zero(sa);
89 sa.sa.sa_family = AF_UNIX;
90
91 if (getpid() != 1)
92 snprintf(sa.un.sun_path, sizeof(sa.un.sun_path), NOTIFY_SOCKET_USER "/%llu", random_ull());
93 else {
94 unlink(NOTIFY_SOCKET_SYSTEM);
95 strncpy(sa.un.sun_path, NOTIFY_SOCKET_SYSTEM, sizeof(sa.un.sun_path));
96 }
97
98 if (sa.un.sun_path[0] == '@')
99 sa.un.sun_path[0] = 0;
100
101 if (bind(m->notify_watch.fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
102 log_error("bind() failed: %m");
103 return -errno;
104 }
105
106 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
107 log_error("SO_PASSCRED failed: %m");
108 return -errno;
109 }
110
111 zero(ev);
112 ev.events = EPOLLIN;
113 ev.data.ptr = &m->notify_watch;
114
115 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
116 return -errno;
117
118 if (sa.un.sun_path[0] == 0)
119 sa.un.sun_path[0] = '@';
120
121 if (!(m->notify_socket = strdup(sa.un.sun_path)))
122 return -ENOMEM;
123
124 log_debug("Using notification socket %s", m->notify_socket);
125
126 return 0;
127 }
128
129 static int enable_special_signals(Manager *m) {
130 char fd;
131
132 assert(m);
133
134 /* Enable that we get SIGINT on control-alt-del */
135 if (reboot(RB_DISABLE_CAD) < 0)
136 log_warning("Failed to enable ctrl-alt-del handling: %m");
137
138 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY)) < 0)
139 log_warning("Failed to open /dev/tty0: %m");
140 else {
141 /* Enable that we get SIGWINCH on kbrequest */
142 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
143 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
144
145 close_nointr_nofail(fd);
146 }
147
148 return 0;
149 }
150
151 static int manager_setup_signals(Manager *m) {
152 sigset_t mask;
153 struct epoll_event ev;
154 struct sigaction sa;
155
156 assert(m);
157
158 /* We are not interested in SIGSTOP and friends. */
159 zero(sa);
160 sa.sa_handler = SIG_DFL;
161 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
162 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
163
164 assert_se(sigemptyset(&mask) == 0);
165
166 sigset_add_many(&mask,
167 SIGCHLD, /* Child died */
168 SIGTERM, /* Reexecute daemon */
169 SIGHUP, /* Reload configuration */
170 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
171 SIGUSR2, /* systemd: dump status */
172 SIGINT, /* Kernel sends us this on control-alt-del */
173 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
174 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
175 SIGRTMIN+0, /* systemd: start default.target */
176 SIGRTMIN+1, /* systemd: isolate rescue.target */
177 SIGRTMIN+2, /* systemd: isolate emergency.target */
178 SIGRTMIN+3, /* systemd: start halt.target */
179 SIGRTMIN+4, /* systemd: start poweroff.target */
180 SIGRTMIN+5, /* systemd: start reboot.target */
181 SIGRTMIN+6, /* systemd: start kexec.target */
182 SIGRTMIN+13, /* systemd: Immediate halt */
183 SIGRTMIN+14, /* systemd: Immediate poweroff */
184 SIGRTMIN+15, /* systemd: Immediate reboot */
185 SIGRTMIN+16, /* systemd: Immediate kexec */
186 SIGRTMIN+20, /* systemd: enable status messages */
187 SIGRTMIN+21, /* systemd: disable status messages */
188 -1);
189 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
190
191 m->signal_watch.type = WATCH_SIGNAL;
192 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
193 return -errno;
194
195 zero(ev);
196 ev.events = EPOLLIN;
197 ev.data.ptr = &m->signal_watch;
198
199 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
200 return -errno;
201
202 if (m->running_as == MANAGER_SYSTEM)
203 return enable_special_signals(m);
204
205 return 0;
206 }
207
208 int manager_new(ManagerRunningAs running_as, Manager **_m) {
209 Manager *m;
210 int r = -ENOMEM;
211
212 assert(_m);
213 assert(running_as >= 0);
214 assert(running_as < _MANAGER_RUNNING_AS_MAX);
215
216 if (!(m = new0(Manager, 1)))
217 return -ENOMEM;
218
219 dual_timestamp_get(&m->startup_timestamp);
220
221 m->running_as = running_as;
222 m->name_data_slot = m->subscribed_data_slot = -1;
223 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
224 m->pin_cgroupfs_fd = -1;
225
226 #ifdef HAVE_AUDIT
227 m->audit_fd = -1;
228 #endif
229
230 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = m->swap_watch.fd = -1;
231 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
232
233 if (!(m->environment = strv_copy(environ)))
234 goto fail;
235
236 if (!(m->default_controllers = strv_new("cpu", NULL)))
237 goto fail;
238
239 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
240 goto fail;
241
242 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
243 goto fail;
244
245 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
246 goto fail;
247
248 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
249 goto fail;
250
251 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
252 goto fail;
253
254 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
255 goto fail;
256
257 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
258 goto fail;
259
260 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
261 goto fail;
262
263 if ((r = manager_setup_signals(m)) < 0)
264 goto fail;
265
266 if ((r = manager_setup_cgroup(m)) < 0)
267 goto fail;
268
269 if ((r = manager_setup_notify(m)) < 0)
270 goto fail;
271
272 /* Try to connect to the busses, if possible. */
273 if ((r = bus_init(m, running_as != MANAGER_SYSTEM)) < 0)
274 goto fail;
275
276 #ifdef HAVE_AUDIT
277 if ((m->audit_fd = audit_open()) < 0)
278 log_error("Failed to connect to audit log: %m");
279 #endif
280
281 *_m = m;
282 return 0;
283
284 fail:
285 manager_free(m);
286 return r;
287 }
288
289 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
290 Meta *meta;
291 unsigned n = 0;
292
293 assert(m);
294
295 while ((meta = m->cleanup_queue)) {
296 assert(meta->in_cleanup_queue);
297
298 unit_free((Unit*) meta);
299 n++;
300 }
301
302 return n;
303 }
304
305 enum {
306 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
307 GC_OFFSET_UNSURE, /* No clue */
308 GC_OFFSET_GOOD, /* We still need this unit */
309 GC_OFFSET_BAD, /* We don't need this unit anymore */
310 _GC_OFFSET_MAX
311 };
312
313 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
314 Iterator i;
315 Unit *other;
316 bool is_bad;
317
318 assert(u);
319
320 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
321 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
322 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
323 return;
324
325 if (u->meta.in_cleanup_queue)
326 goto bad;
327
328 if (unit_check_gc(u))
329 goto good;
330
331 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
332
333 is_bad = true;
334
335 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
336 unit_gc_sweep(other, gc_marker);
337
338 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
339 goto good;
340
341 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
342 is_bad = false;
343 }
344
345 if (is_bad)
346 goto bad;
347
348 /* We were unable to find anything out about this entry, so
349 * let's investigate it later */
350 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
351 unit_add_to_gc_queue(u);
352 return;
353
354 bad:
355 /* We definitely know that this one is not useful anymore, so
356 * let's mark it for deletion */
357 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
358 unit_add_to_cleanup_queue(u);
359 return;
360
361 good:
362 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
363 }
364
365 static unsigned manager_dispatch_gc_queue(Manager *m) {
366 Meta *meta;
367 unsigned n = 0;
368 unsigned gc_marker;
369
370 assert(m);
371
372 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
373 (m->gc_queue_timestamp <= 0 ||
374 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
375 return 0;
376
377 log_debug("Running GC...");
378
379 m->gc_marker += _GC_OFFSET_MAX;
380 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
381 m->gc_marker = 1;
382
383 gc_marker = m->gc_marker;
384
385 while ((meta = m->gc_queue)) {
386 assert(meta->in_gc_queue);
387
388 unit_gc_sweep((Unit*) meta, gc_marker);
389
390 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
391 meta->in_gc_queue = false;
392
393 n++;
394
395 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
396 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
397 log_debug("Collecting %s", meta->id);
398 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
399 unit_add_to_cleanup_queue((Unit*) meta);
400 }
401 }
402
403 m->n_in_gc_queue = 0;
404 m->gc_queue_timestamp = 0;
405
406 return n;
407 }
408
409 static void manager_clear_jobs_and_units(Manager *m) {
410 Job *j;
411 Unit *u;
412
413 assert(m);
414
415 while ((j = hashmap_first(m->transaction_jobs)))
416 job_free(j);
417
418 while ((u = hashmap_first(m->units)))
419 unit_free(u);
420
421 manager_dispatch_cleanup_queue(m);
422
423 assert(!m->load_queue);
424 assert(!m->run_queue);
425 assert(!m->dbus_unit_queue);
426 assert(!m->dbus_job_queue);
427 assert(!m->cleanup_queue);
428 assert(!m->gc_queue);
429
430 assert(hashmap_isempty(m->transaction_jobs));
431 assert(hashmap_isempty(m->jobs));
432 assert(hashmap_isempty(m->units));
433 }
434
435 void manager_free(Manager *m) {
436 UnitType c;
437
438 assert(m);
439
440 manager_clear_jobs_and_units(m);
441
442 for (c = 0; c < _UNIT_TYPE_MAX; c++)
443 if (unit_vtable[c]->shutdown)
444 unit_vtable[c]->shutdown(m);
445
446 /* If we reexecute ourselves, we keep the root cgroup
447 * around */
448 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
449
450 manager_undo_generators(m);
451
452 bus_done(m);
453
454 hashmap_free(m->units);
455 hashmap_free(m->jobs);
456 hashmap_free(m->transaction_jobs);
457 hashmap_free(m->watch_pids);
458 hashmap_free(m->watch_bus);
459
460 if (m->epoll_fd >= 0)
461 close_nointr_nofail(m->epoll_fd);
462 if (m->signal_watch.fd >= 0)
463 close_nointr_nofail(m->signal_watch.fd);
464 if (m->notify_watch.fd >= 0)
465 close_nointr_nofail(m->notify_watch.fd);
466
467 #ifdef HAVE_AUDIT
468 if (m->audit_fd >= 0)
469 audit_close(m->audit_fd);
470 #endif
471
472 free(m->notify_socket);
473
474 lookup_paths_free(&m->lookup_paths);
475 strv_free(m->environment);
476
477 strv_free(m->default_controllers);
478
479 hashmap_free(m->cgroup_bondings);
480 set_free_free(m->unit_path_cache);
481
482 free(m);
483 }
484
485 int manager_enumerate(Manager *m) {
486 int r = 0, q;
487 UnitType c;
488
489 assert(m);
490
491 /* Let's ask every type to load all units from disk/kernel
492 * that it might know */
493 for (c = 0; c < _UNIT_TYPE_MAX; c++)
494 if (unit_vtable[c]->enumerate)
495 if ((q = unit_vtable[c]->enumerate(m)) < 0)
496 r = q;
497
498 manager_dispatch_load_queue(m);
499 return r;
500 }
501
502 int manager_coldplug(Manager *m) {
503 int r = 0, q;
504 Iterator i;
505 Unit *u;
506 char *k;
507
508 assert(m);
509
510 /* Then, let's set up their initial state. */
511 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
512
513 /* ignore aliases */
514 if (u->meta.id != k)
515 continue;
516
517 if ((q = unit_coldplug(u)) < 0)
518 r = q;
519 }
520
521 return r;
522 }
523
524 static void manager_build_unit_path_cache(Manager *m) {
525 char **i;
526 DIR *d = NULL;
527 int r;
528
529 assert(m);
530
531 set_free_free(m->unit_path_cache);
532
533 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
534 log_error("Failed to allocate unit path cache.");
535 return;
536 }
537
538 /* This simply builds a list of files we know exist, so that
539 * we don't always have to go to disk */
540
541 STRV_FOREACH(i, m->lookup_paths.unit_path) {
542 struct dirent *de;
543
544 if (!(d = opendir(*i))) {
545 log_error("Failed to open directory: %m");
546 continue;
547 }
548
549 while ((de = readdir(d))) {
550 char *p;
551
552 if (ignore_file(de->d_name))
553 continue;
554
555 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
556 r = -ENOMEM;
557 goto fail;
558 }
559
560 if ((r = set_put(m->unit_path_cache, p)) < 0) {
561 free(p);
562 goto fail;
563 }
564 }
565
566 closedir(d);
567 d = NULL;
568 }
569
570 return;
571
572 fail:
573 log_error("Failed to build unit path cache: %s", strerror(-r));
574
575 set_free_free(m->unit_path_cache);
576 m->unit_path_cache = NULL;
577
578 if (d)
579 closedir(d);
580 }
581
582 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
583 int r, q;
584
585 assert(m);
586
587 manager_run_generators(m);
588
589 manager_build_unit_path_cache(m);
590
591 /* If we will deserialize make sure that during enumeration
592 * this is already known, so we increase the counter here
593 * already */
594 if (serialization)
595 m->n_deserializing ++;
596
597 /* First, enumerate what we can from all config files */
598 r = manager_enumerate(m);
599
600 /* Second, deserialize if there is something to deserialize */
601 if (serialization)
602 if ((q = manager_deserialize(m, serialization, fds)) < 0)
603 r = q;
604
605 /* Third, fire things up! */
606 if ((q = manager_coldplug(m)) < 0)
607 r = q;
608
609 if (serialization) {
610 assert(m->n_deserializing > 0);
611 m->n_deserializing --;
612 }
613
614 return r;
615 }
616
617 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
618 assert(m);
619 assert(j);
620
621 /* Deletes one job from the transaction */
622
623 manager_transaction_unlink_job(m, j, delete_dependencies);
624
625 if (!j->installed)
626 job_free(j);
627 }
628
629 static void transaction_delete_unit(Manager *m, Unit *u) {
630 Job *j;
631
632 /* Deletes all jobs associated with a certain unit from the
633 * transaction */
634
635 while ((j = hashmap_get(m->transaction_jobs, u)))
636 transaction_delete_job(m, j, true);
637 }
638
639 static void transaction_clean_dependencies(Manager *m) {
640 Iterator i;
641 Job *j;
642
643 assert(m);
644
645 /* Drops all dependencies of all installed jobs */
646
647 HASHMAP_FOREACH(j, m->jobs, i) {
648 while (j->subject_list)
649 job_dependency_free(j->subject_list);
650 while (j->object_list)
651 job_dependency_free(j->object_list);
652 }
653
654 assert(!m->transaction_anchor);
655 }
656
657 static void transaction_abort(Manager *m) {
658 Job *j;
659
660 assert(m);
661
662 while ((j = hashmap_first(m->transaction_jobs)))
663 if (j->installed)
664 transaction_delete_job(m, j, true);
665 else
666 job_free(j);
667
668 assert(hashmap_isempty(m->transaction_jobs));
669
670 transaction_clean_dependencies(m);
671 }
672
673 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
674 JobDependency *l;
675
676 assert(m);
677
678 /* A recursive sweep through the graph that marks all units
679 * that matter to the anchor job, i.e. are directly or
680 * indirectly a dependency of the anchor job via paths that
681 * are fully marked as mattering. */
682
683 if (j)
684 l = j->subject_list;
685 else
686 l = m->transaction_anchor;
687
688 LIST_FOREACH(subject, l, l) {
689
690 /* This link does not matter */
691 if (!l->matters)
692 continue;
693
694 /* This unit has already been marked */
695 if (l->object->generation == generation)
696 continue;
697
698 l->object->matters_to_anchor = true;
699 l->object->generation = generation;
700
701 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
702 }
703 }
704
705 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
706 JobDependency *l, *last;
707
708 assert(j);
709 assert(other);
710 assert(j->unit == other->unit);
711 assert(!j->installed);
712
713 /* Merges 'other' into 'j' and then deletes j. */
714
715 j->type = t;
716 j->state = JOB_WAITING;
717 j->override = j->override || other->override;
718
719 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
720
721 /* Patch us in as new owner of the JobDependency objects */
722 last = NULL;
723 LIST_FOREACH(subject, l, other->subject_list) {
724 assert(l->subject == other);
725 l->subject = j;
726 last = l;
727 }
728
729 /* Merge both lists */
730 if (last) {
731 last->subject_next = j->subject_list;
732 if (j->subject_list)
733 j->subject_list->subject_prev = last;
734 j->subject_list = other->subject_list;
735 }
736
737 /* Patch us in as new owner of the JobDependency objects */
738 last = NULL;
739 LIST_FOREACH(object, l, other->object_list) {
740 assert(l->object == other);
741 l->object = j;
742 last = l;
743 }
744
745 /* Merge both lists */
746 if (last) {
747 last->object_next = j->object_list;
748 if (j->object_list)
749 j->object_list->object_prev = last;
750 j->object_list = other->object_list;
751 }
752
753 /* Kill the other job */
754 other->subject_list = NULL;
755 other->object_list = NULL;
756 transaction_delete_job(m, other, true);
757 }
758 static bool job_is_conflicted_by(Job *j) {
759 JobDependency *l;
760
761 assert(j);
762
763 /* Returns true if this job is pulled in by a least one
764 * ConflictedBy dependency. */
765
766 LIST_FOREACH(object, l, j->object_list)
767 if (l->conflicts)
768 return true;
769
770 return false;
771 }
772
773 static int delete_one_unmergeable_job(Manager *m, Job *j) {
774 Job *k;
775
776 assert(j);
777
778 /* Tries to delete one item in the linked list
779 * j->transaction_next->transaction_next->... that conflicts
780 * with another one, in an attempt to make an inconsistent
781 * transaction work. */
782
783 /* We rely here on the fact that if a merged with b does not
784 * merge with c, either a or b merge with c neither */
785 LIST_FOREACH(transaction, j, j)
786 LIST_FOREACH(transaction, k, j->transaction_next) {
787 Job *d;
788
789 /* Is this one mergeable? Then skip it */
790 if (job_type_is_mergeable(j->type, k->type))
791 continue;
792
793 /* Ok, we found two that conflict, let's see if we can
794 * drop one of them */
795 if (!j->matters_to_anchor && !k->matters_to_anchor) {
796
797 /* Both jobs don't matter, so let's
798 * find the one that is smarter to
799 * remove. Let's think positive and
800 * rather remove stops then starts --
801 * except if something is being
802 * stopped because it is conflicted by
803 * another unit in which case we
804 * rather remove the start. */
805
806 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
807 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
808
809 if (j->type == JOB_STOP) {
810
811 if (job_is_conflicted_by(j))
812 d = k;
813 else
814 d = j;
815
816 } else if (k->type == JOB_STOP) {
817
818 if (job_is_conflicted_by(k))
819 d = j;
820 else
821 d = k;
822 } else
823 d = j;
824
825 } else if (!j->matters_to_anchor)
826 d = j;
827 else if (!k->matters_to_anchor)
828 d = k;
829 else
830 return -ENOEXEC;
831
832 /* Ok, we can drop one, so let's do so. */
833 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
834 transaction_delete_job(m, d, true);
835 return 0;
836 }
837
838 return -EINVAL;
839 }
840
841 static int transaction_merge_jobs(Manager *m, DBusError *e) {
842 Job *j;
843 Iterator i;
844 int r;
845
846 assert(m);
847
848 /* First step, check whether any of the jobs for one specific
849 * task conflict. If so, try to drop one of them. */
850 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
851 JobType t;
852 Job *k;
853
854 t = j->type;
855 LIST_FOREACH(transaction, k, j->transaction_next) {
856 if (job_type_merge(&t, k->type) >= 0)
857 continue;
858
859 /* OK, we could not merge all jobs for this
860 * action. Let's see if we can get rid of one
861 * of them */
862
863 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
864 /* Ok, we managed to drop one, now
865 * let's ask our callers to call us
866 * again after garbage collecting */
867 return -EAGAIN;
868
869 /* We couldn't merge anything. Failure */
870 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
871 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
872 return r;
873 }
874 }
875
876 /* Second step, merge the jobs. */
877 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
878 JobType t = j->type;
879 Job *k;
880
881 /* Merge all transactions */
882 LIST_FOREACH(transaction, k, j->transaction_next)
883 assert_se(job_type_merge(&t, k->type) == 0);
884
885 /* If an active job is mergeable, merge it too */
886 if (j->unit->meta.job)
887 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
888
889 while ((k = j->transaction_next)) {
890 if (j->installed) {
891 transaction_merge_and_delete_job(m, k, j, t);
892 j = k;
893 } else
894 transaction_merge_and_delete_job(m, j, k, t);
895 }
896
897 assert(!j->transaction_next);
898 assert(!j->transaction_prev);
899 }
900
901 return 0;
902 }
903
904 static void transaction_drop_redundant(Manager *m) {
905 bool again;
906
907 assert(m);
908
909 /* Goes through the transaction and removes all jobs that are
910 * a noop */
911
912 do {
913 Job *j;
914 Iterator i;
915
916 again = false;
917
918 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
919 bool changes_something = false;
920 Job *k;
921
922 LIST_FOREACH(transaction, k, j) {
923
924 if (!job_is_anchor(k) &&
925 (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
926 (!k->unit->meta.job || !job_type_is_conflicting(k->type, k->unit->meta.job->type)))
927 continue;
928
929 changes_something = true;
930 break;
931 }
932
933 if (changes_something)
934 continue;
935
936 /* log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type)); */
937 transaction_delete_job(m, j, false);
938 again = true;
939 break;
940 }
941
942 } while (again);
943 }
944
945 static bool unit_matters_to_anchor(Unit *u, Job *j) {
946 assert(u);
947 assert(!j->transaction_prev);
948
949 /* Checks whether at least one of the jobs for this unit
950 * matters to the anchor. */
951
952 LIST_FOREACH(transaction, j, j)
953 if (j->matters_to_anchor)
954 return true;
955
956 return false;
957 }
958
959 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
960 Iterator i;
961 Unit *u;
962 int r;
963
964 assert(m);
965 assert(j);
966 assert(!j->transaction_prev);
967
968 /* Does a recursive sweep through the ordering graph, looking
969 * for a cycle. If we find cycle we try to break it. */
970
971 /* Have we seen this before? */
972 if (j->generation == generation) {
973 Job *k, *delete;
974
975 /* If the marker is NULL we have been here already and
976 * decided the job was loop-free from here. Hence
977 * shortcut things and return right-away. */
978 if (!j->marker)
979 return 0;
980
981 /* So, the marker is not NULL and we already have been
982 * here. We have a cycle. Let's try to break it. We go
983 * backwards in our path and try to find a suitable
984 * job to remove. We use the marker to find our way
985 * back, since smart how we are we stored our way back
986 * in there. */
987 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
988
989 delete = NULL;
990 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
991
992 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
993
994 if (!delete &&
995 !k->installed &&
996 !unit_matters_to_anchor(k->unit, k)) {
997 /* Ok, we can drop this one, so let's
998 * do so. */
999 delete = k;
1000 }
1001
1002 /* Check if this in fact was the beginning of
1003 * the cycle */
1004 if (k == j)
1005 break;
1006 }
1007
1008
1009 if (delete) {
1010 log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->meta.id, job_type_to_string(delete->type));
1011 transaction_delete_unit(m, delete->unit);
1012 return -EAGAIN;
1013 }
1014
1015 log_error("Unable to break cycle");
1016
1017 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
1018 return -ENOEXEC;
1019 }
1020
1021 /* Make the marker point to where we come from, so that we can
1022 * find our way backwards if we want to break a cycle. We use
1023 * a special marker for the beginning: we point to
1024 * ourselves. */
1025 j->marker = from ? from : j;
1026 j->generation = generation;
1027
1028 /* We assume that the the dependencies are bidirectional, and
1029 * hence can ignore UNIT_AFTER */
1030 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1031 Job *o;
1032
1033 /* Is there a job for this unit? */
1034 if (!(o = hashmap_get(m->transaction_jobs, u)))
1035
1036 /* Ok, there is no job for this in the
1037 * transaction, but maybe there is already one
1038 * running? */
1039 if (!(o = u->meta.job))
1040 continue;
1041
1042 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1043 return r;
1044 }
1045
1046 /* Ok, let's backtrack, and remember that this entry is not on
1047 * our path anymore. */
1048 j->marker = NULL;
1049
1050 return 0;
1051 }
1052
1053 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1054 Job *j;
1055 int r;
1056 Iterator i;
1057 unsigned g;
1058
1059 assert(m);
1060 assert(generation);
1061
1062 /* Check if the ordering graph is cyclic. If it is, try to fix
1063 * that up by dropping one of the jobs. */
1064
1065 g = (*generation)++;
1066
1067 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1068 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1069 return r;
1070
1071 return 0;
1072 }
1073
1074 static void transaction_collect_garbage(Manager *m) {
1075 bool again;
1076
1077 assert(m);
1078
1079 /* Drop jobs that are not required by any other job */
1080
1081 do {
1082 Iterator i;
1083 Job *j;
1084
1085 again = false;
1086
1087 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1088 if (j->object_list) {
1089 /* log_debug("Keeping job %s/%s because of %s/%s", */
1090 /* j->unit->meta.id, job_type_to_string(j->type), */
1091 /* j->object_list->subject ? j->object_list->subject->unit->meta.id : "root", */
1092 /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
1093 continue;
1094 }
1095
1096 /* log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type)); */
1097 transaction_delete_job(m, j, true);
1098 again = true;
1099 break;
1100 }
1101
1102 } while (again);
1103 }
1104
1105 static int transaction_is_destructive(Manager *m, DBusError *e) {
1106 Iterator i;
1107 Job *j;
1108
1109 assert(m);
1110
1111 /* Checks whether applying this transaction means that
1112 * existing jobs would be replaced */
1113
1114 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1115
1116 /* Assume merged */
1117 assert(!j->transaction_prev);
1118 assert(!j->transaction_next);
1119
1120 if (j->unit->meta.job &&
1121 j->unit->meta.job != j &&
1122 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1123
1124 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1125 return -EEXIST;
1126 }
1127 }
1128
1129 return 0;
1130 }
1131
1132 static void transaction_minimize_impact(Manager *m) {
1133 bool again;
1134 assert(m);
1135
1136 /* Drops all unnecessary jobs that reverse already active jobs
1137 * or that stop a running service. */
1138
1139 do {
1140 Job *j;
1141 Iterator i;
1142
1143 again = false;
1144
1145 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1146 LIST_FOREACH(transaction, j, j) {
1147 bool stops_running_service, changes_existing_job;
1148
1149 /* If it matters, we shouldn't drop it */
1150 if (j->matters_to_anchor)
1151 continue;
1152
1153 /* Would this stop a running service?
1154 * Would this change an existing job?
1155 * If so, let's drop this entry */
1156
1157 stops_running_service =
1158 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1159
1160 changes_existing_job =
1161 j->unit->meta.job &&
1162 job_type_is_conflicting(j->type, j->unit->meta.job->type);
1163
1164 if (!stops_running_service && !changes_existing_job)
1165 continue;
1166
1167 if (stops_running_service)
1168 log_info("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1169
1170 if (changes_existing_job)
1171 log_info("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1172
1173 /* Ok, let's get rid of this */
1174 log_info("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1175
1176 transaction_delete_job(m, j, true);
1177 again = true;
1178 break;
1179 }
1180
1181 if (again)
1182 break;
1183 }
1184
1185 } while (again);
1186 }
1187
1188 static int transaction_apply(Manager *m) {
1189 Iterator i;
1190 Job *j;
1191 int r;
1192
1193 /* Moves the transaction jobs to the set of active jobs */
1194
1195 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1196 /* Assume merged */
1197 assert(!j->transaction_prev);
1198 assert(!j->transaction_next);
1199
1200 if (j->installed)
1201 continue;
1202
1203 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1204 goto rollback;
1205 }
1206
1207 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1208 if (j->installed) {
1209 /* log_debug("Skipping already installed job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id); */
1210 continue;
1211 }
1212
1213 if (j->unit->meta.job)
1214 job_free(j->unit->meta.job);
1215
1216 j->unit->meta.job = j;
1217 j->installed = true;
1218 m->n_installed_jobs ++;
1219
1220 /* We're fully installed. Now let's free data we don't
1221 * need anymore. */
1222
1223 assert(!j->transaction_next);
1224 assert(!j->transaction_prev);
1225
1226 job_add_to_run_queue(j);
1227 job_add_to_dbus_queue(j);
1228 job_start_timer(j);
1229
1230 log_debug("Installed new job %s/%s as %u", j->unit->meta.id, job_type_to_string(j->type), (unsigned) j->id);
1231 }
1232
1233 /* As last step, kill all remaining job dependencies. */
1234 transaction_clean_dependencies(m);
1235
1236 return 0;
1237
1238 rollback:
1239
1240 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1241 if (j->installed)
1242 continue;
1243
1244 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1245 }
1246
1247 return r;
1248 }
1249
1250 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1251 int r;
1252 unsigned generation = 1;
1253
1254 assert(m);
1255
1256 /* This applies the changes recorded in transaction_jobs to
1257 * the actual list of jobs, if possible. */
1258
1259 /* First step: figure out which jobs matter */
1260 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1261
1262 /* Second step: Try not to stop any running services if
1263 * we don't have to. Don't try to reverse running
1264 * jobs if we don't have to. */
1265 if (mode == JOB_FAIL)
1266 transaction_minimize_impact(m);
1267
1268 /* Third step: Drop redundant jobs */
1269 transaction_drop_redundant(m);
1270
1271 for (;;) {
1272 /* Fourth step: Let's remove unneeded jobs that might
1273 * be lurking. */
1274 transaction_collect_garbage(m);
1275
1276 /* Fifth step: verify order makes sense and correct
1277 * cycles if necessary and possible */
1278 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1279 break;
1280
1281 if (r != -EAGAIN) {
1282 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1283 goto rollback;
1284 }
1285
1286 /* Let's see if the resulting transaction ordering
1287 * graph is still cyclic... */
1288 }
1289
1290 for (;;) {
1291 /* Sixth step: let's drop unmergeable entries if
1292 * necessary and possible, merge entries we can
1293 * merge */
1294 if ((r = transaction_merge_jobs(m, e)) >= 0)
1295 break;
1296
1297 if (r != -EAGAIN) {
1298 log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
1299 goto rollback;
1300 }
1301
1302 /* Seventh step: an entry got dropped, let's garbage
1303 * collect its dependencies. */
1304 transaction_collect_garbage(m);
1305
1306 /* Let's see if the resulting transaction still has
1307 * unmergeable entries ... */
1308 }
1309
1310 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1311 transaction_drop_redundant(m);
1312
1313 /* Ninth step: check whether we can actually apply this */
1314 if (mode == JOB_FAIL)
1315 if ((r = transaction_is_destructive(m, e)) < 0) {
1316 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1317 goto rollback;
1318 }
1319
1320 /* Tenth step: apply changes */
1321 if ((r = transaction_apply(m)) < 0) {
1322 log_warning("Failed to apply transaction: %s", strerror(-r));
1323 goto rollback;
1324 }
1325
1326 assert(hashmap_isempty(m->transaction_jobs));
1327 assert(!m->transaction_anchor);
1328
1329 return 0;
1330
1331 rollback:
1332 transaction_abort(m);
1333 return r;
1334 }
1335
1336 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1337 Job *j, *f;
1338
1339 assert(m);
1340 assert(unit);
1341
1342 /* Looks for an existing prospective job and returns that. If
1343 * it doesn't exist it is created and added to the prospective
1344 * jobs list. */
1345
1346 f = hashmap_get(m->transaction_jobs, unit);
1347
1348 LIST_FOREACH(transaction, j, f) {
1349 assert(j->unit == unit);
1350
1351 if (j->type == type) {
1352 if (is_new)
1353 *is_new = false;
1354 return j;
1355 }
1356 }
1357
1358 if (unit->meta.job && unit->meta.job->type == type)
1359 j = unit->meta.job;
1360 else if (!(j = job_new(m, type, unit)))
1361 return NULL;
1362
1363 j->generation = 0;
1364 j->marker = NULL;
1365 j->matters_to_anchor = false;
1366 j->override = override;
1367
1368 LIST_PREPEND(Job, transaction, f, j);
1369
1370 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1371 job_free(j);
1372 return NULL;
1373 }
1374
1375 if (is_new)
1376 *is_new = true;
1377
1378 /* log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type)); */
1379
1380 return j;
1381 }
1382
1383 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1384 assert(m);
1385 assert(j);
1386
1387 if (j->transaction_prev)
1388 j->transaction_prev->transaction_next = j->transaction_next;
1389 else if (j->transaction_next)
1390 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1391 else
1392 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1393
1394 if (j->transaction_next)
1395 j->transaction_next->transaction_prev = j->transaction_prev;
1396
1397 j->transaction_prev = j->transaction_next = NULL;
1398
1399 while (j->subject_list)
1400 job_dependency_free(j->subject_list);
1401
1402 while (j->object_list) {
1403 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1404
1405 job_dependency_free(j->object_list);
1406
1407 if (other && delete_dependencies) {
1408 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1409 other->unit->meta.id, job_type_to_string(other->type),
1410 j->unit->meta.id, job_type_to_string(j->type));
1411 transaction_delete_job(m, other, delete_dependencies);
1412 }
1413 }
1414 }
1415
1416 static int transaction_add_job_and_dependencies(
1417 Manager *m,
1418 JobType type,
1419 Unit *unit,
1420 Job *by,
1421 bool matters,
1422 bool override,
1423 bool conflicts,
1424 bool ignore_deps,
1425 DBusError *e,
1426 Job **_ret) {
1427 Job *ret;
1428 Iterator i;
1429 Unit *dep;
1430 int r;
1431 bool is_new;
1432
1433 assert(m);
1434 assert(type < _JOB_TYPE_MAX);
1435 assert(unit);
1436
1437 /* log_debug("Pulling in %s/%s from %s/%s", */
1438 /* unit->meta.id, job_type_to_string(type), */
1439 /* by ? by->unit->meta.id : "NA", */
1440 /* by ? job_type_to_string(by->type) : "NA"); */
1441
1442 if (unit->meta.load_state != UNIT_LOADED &&
1443 unit->meta.load_state != UNIT_ERROR &&
1444 unit->meta.load_state != UNIT_MASKED) {
1445 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1446 return -EINVAL;
1447 }
1448
1449 if (type != JOB_STOP && unit->meta.load_state == UNIT_ERROR) {
1450 dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
1451 "Unit %s failed to load: %s. "
1452 "See system logs and 'systemctl status' for details.",
1453 unit->meta.id,
1454 strerror(-unit->meta.load_error));
1455 return -EINVAL;
1456 }
1457
1458 if (type != JOB_STOP && unit->meta.load_state == UNIT_MASKED) {
1459 dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->meta.id);
1460 return -EINVAL;
1461 }
1462
1463 if (!unit_job_is_applicable(unit, type)) {
1464 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1465 return -EBADR;
1466 }
1467
1468 /* First add the job. */
1469 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1470 return -ENOMEM;
1471
1472 ret->ignore_deps = ret->ignore_deps || ignore_deps;
1473
1474 /* Then, add a link to the job. */
1475 if (!job_dependency_new(by, ret, matters, conflicts))
1476 return -ENOMEM;
1477
1478 if (is_new && !ignore_deps) {
1479 Set *following;
1480
1481 /* If we are following some other unit, make sure we
1482 * add all dependencies of everybody following. */
1483 if (unit_following_set(ret->unit, &following) > 0) {
1484 SET_FOREACH(dep, following, i)
1485 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, e, NULL)) < 0) {
1486 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1487
1488 if (e)
1489 dbus_error_free(e);
1490 }
1491
1492 set_free(following);
1493 }
1494
1495 /* Finally, recursively add in all dependencies. */
1496 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1497 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1498 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, e, NULL)) < 0) {
1499 if (r != -EBADR)
1500 goto fail;
1501
1502 if (e)
1503 dbus_error_free(e);
1504 }
1505
1506 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BIND_TO], i)
1507 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, e, NULL)) < 0) {
1508
1509 if (r != -EBADR)
1510 goto fail;
1511
1512 if (e)
1513 dbus_error_free(e);
1514 }
1515
1516 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1517 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, e, NULL)) < 0) {
1518 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1519
1520 if (e)
1521 dbus_error_free(e);
1522 }
1523
1524 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1525 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, e, NULL)) < 0) {
1526 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1527
1528 if (e)
1529 dbus_error_free(e);
1530 }
1531
1532 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1533 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, e, NULL)) < 0) {
1534
1535 if (r != -EBADR)
1536 goto fail;
1537
1538 if (e)
1539 dbus_error_free(e);
1540 }
1541
1542 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1543 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, e, NULL)) < 0) {
1544 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1545
1546 if (e)
1547 dbus_error_free(e);
1548 }
1549
1550 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1551 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, e, NULL)) < 0) {
1552
1553 if (r != -EBADR)
1554 goto fail;
1555
1556 if (e)
1557 dbus_error_free(e);
1558 }
1559
1560 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1561 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, e, NULL)) < 0) {
1562 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1563
1564 if (e)
1565 dbus_error_free(e);
1566 }
1567
1568 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1569
1570 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1571 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, e, NULL)) < 0) {
1572
1573 if (r != -EBADR)
1574 goto fail;
1575
1576 if (e)
1577 dbus_error_free(e);
1578 }
1579
1580 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_BOUND_BY], i)
1581 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, e, NULL)) < 0) {
1582
1583 if (r != -EBADR)
1584 goto fail;
1585
1586 if (e)
1587 dbus_error_free(e);
1588 }
1589 }
1590
1591 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1592 }
1593
1594 if (_ret)
1595 *_ret = ret;
1596
1597 return 0;
1598
1599 fail:
1600 return r;
1601 }
1602
1603 static int transaction_add_isolate_jobs(Manager *m) {
1604 Iterator i;
1605 Unit *u;
1606 char *k;
1607 int r;
1608
1609 assert(m);
1610
1611 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1612
1613 /* ignore aliases */
1614 if (u->meta.id != k)
1615 continue;
1616
1617 if (UNIT_VTABLE(u)->no_isolate)
1618 continue;
1619
1620 /* No need to stop inactive jobs */
1621 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->meta.job)
1622 continue;
1623
1624 /* Is there already something listed for this? */
1625 if (hashmap_get(m->transaction_jobs, u))
1626 continue;
1627
1628 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, NULL, NULL)) < 0)
1629 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1630 }
1631
1632 return 0;
1633 }
1634
1635 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1636 int r;
1637 Job *ret;
1638
1639 assert(m);
1640 assert(type < _JOB_TYPE_MAX);
1641 assert(unit);
1642 assert(mode < _JOB_MODE_MAX);
1643
1644 if (mode == JOB_ISOLATE && type != JOB_START) {
1645 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1646 return -EINVAL;
1647 }
1648
1649 if (mode == JOB_ISOLATE && !unit->meta.allow_isolate) {
1650 dbus_set_error(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1651 return -EPERM;
1652 }
1653
1654 log_debug("Trying to enqueue job %s/%s/%s", unit->meta.id, job_type_to_string(type), job_mode_to_string(mode));
1655
1656 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false, mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
1657 transaction_abort(m);
1658 return r;
1659 }
1660
1661 if (mode == JOB_ISOLATE)
1662 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1663 transaction_abort(m);
1664 return r;
1665 }
1666
1667 if ((r = transaction_activate(m, mode, e)) < 0)
1668 return r;
1669
1670 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1671
1672 if (_ret)
1673 *_ret = ret;
1674
1675 return 0;
1676 }
1677
1678 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1679 Unit *unit;
1680 int r;
1681
1682 assert(m);
1683 assert(type < _JOB_TYPE_MAX);
1684 assert(name);
1685 assert(mode < _JOB_MODE_MAX);
1686
1687 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1688 return r;
1689
1690 return manager_add_job(m, type, unit, mode, override, e, _ret);
1691 }
1692
1693 Job *manager_get_job(Manager *m, uint32_t id) {
1694 assert(m);
1695
1696 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1697 }
1698
1699 Unit *manager_get_unit(Manager *m, const char *name) {
1700 assert(m);
1701 assert(name);
1702
1703 return hashmap_get(m->units, name);
1704 }
1705
1706 unsigned manager_dispatch_load_queue(Manager *m) {
1707 Meta *meta;
1708 unsigned n = 0;
1709
1710 assert(m);
1711
1712 /* Make sure we are not run recursively */
1713 if (m->dispatching_load_queue)
1714 return 0;
1715
1716 m->dispatching_load_queue = true;
1717
1718 /* Dispatches the load queue. Takes a unit from the queue and
1719 * tries to load its data until the queue is empty */
1720
1721 while ((meta = m->load_queue)) {
1722 assert(meta->in_load_queue);
1723
1724 unit_load((Unit*) meta);
1725 n++;
1726 }
1727
1728 m->dispatching_load_queue = false;
1729 return n;
1730 }
1731
1732 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1733 Unit *ret;
1734 int r;
1735
1736 assert(m);
1737 assert(name || path);
1738
1739 /* This will prepare the unit for loading, but not actually
1740 * load anything from disk. */
1741
1742 if (path && !is_path(path)) {
1743 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1744 return -EINVAL;
1745 }
1746
1747 if (!name)
1748 name = file_name_from_path(path);
1749
1750 if (!unit_name_is_valid(name, false)) {
1751 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1752 return -EINVAL;
1753 }
1754
1755 if ((ret = manager_get_unit(m, name))) {
1756 *_ret = ret;
1757 return 1;
1758 }
1759
1760 if (!(ret = unit_new(m)))
1761 return -ENOMEM;
1762
1763 if (path)
1764 if (!(ret->meta.fragment_path = strdup(path))) {
1765 unit_free(ret);
1766 return -ENOMEM;
1767 }
1768
1769 if ((r = unit_add_name(ret, name)) < 0) {
1770 unit_free(ret);
1771 return r;
1772 }
1773
1774 unit_add_to_load_queue(ret);
1775 unit_add_to_dbus_queue(ret);
1776 unit_add_to_gc_queue(ret);
1777
1778 if (_ret)
1779 *_ret = ret;
1780
1781 return 0;
1782 }
1783
1784 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1785 int r;
1786
1787 assert(m);
1788
1789 /* This will load the service information files, but not actually
1790 * start any services or anything. */
1791
1792 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1793 return r;
1794
1795 manager_dispatch_load_queue(m);
1796
1797 if (_ret)
1798 *_ret = unit_follow_merge(*_ret);
1799
1800 return 0;
1801 }
1802
1803 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1804 Iterator i;
1805 Job *j;
1806
1807 assert(s);
1808 assert(f);
1809
1810 HASHMAP_FOREACH(j, s->jobs, i)
1811 job_dump(j, f, prefix);
1812 }
1813
1814 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1815 Iterator i;
1816 Unit *u;
1817 const char *t;
1818
1819 assert(s);
1820 assert(f);
1821
1822 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1823 if (u->meta.id == t)
1824 unit_dump(u, f, prefix);
1825 }
1826
1827 void manager_clear_jobs(Manager *m) {
1828 Job *j;
1829
1830 assert(m);
1831
1832 transaction_abort(m);
1833
1834 while ((j = hashmap_first(m->jobs)))
1835 job_finish_and_invalidate(j, JOB_CANCELED);
1836 }
1837
1838 unsigned manager_dispatch_run_queue(Manager *m) {
1839 Job *j;
1840 unsigned n = 0;
1841
1842 if (m->dispatching_run_queue)
1843 return 0;
1844
1845 m->dispatching_run_queue = true;
1846
1847 while ((j = m->run_queue)) {
1848 assert(j->installed);
1849 assert(j->in_run_queue);
1850
1851 job_run_and_invalidate(j);
1852 n++;
1853 }
1854
1855 m->dispatching_run_queue = false;
1856 return n;
1857 }
1858
1859 unsigned manager_dispatch_dbus_queue(Manager *m) {
1860 Job *j;
1861 Meta *meta;
1862 unsigned n = 0;
1863
1864 assert(m);
1865
1866 if (m->dispatching_dbus_queue)
1867 return 0;
1868
1869 m->dispatching_dbus_queue = true;
1870
1871 while ((meta = m->dbus_unit_queue)) {
1872 assert(meta->in_dbus_queue);
1873
1874 bus_unit_send_change_signal((Unit*) meta);
1875 n++;
1876 }
1877
1878 while ((j = m->dbus_job_queue)) {
1879 assert(j->in_dbus_queue);
1880
1881 bus_job_send_change_signal(j);
1882 n++;
1883 }
1884
1885 m->dispatching_dbus_queue = false;
1886 return n;
1887 }
1888
1889 static int manager_process_notify_fd(Manager *m) {
1890 ssize_t n;
1891
1892 assert(m);
1893
1894 for (;;) {
1895 char buf[4096];
1896 struct msghdr msghdr;
1897 struct iovec iovec;
1898 struct ucred *ucred;
1899 union {
1900 struct cmsghdr cmsghdr;
1901 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1902 } control;
1903 Unit *u;
1904 char **tags;
1905
1906 zero(iovec);
1907 iovec.iov_base = buf;
1908 iovec.iov_len = sizeof(buf)-1;
1909
1910 zero(control);
1911 zero(msghdr);
1912 msghdr.msg_iov = &iovec;
1913 msghdr.msg_iovlen = 1;
1914 msghdr.msg_control = &control;
1915 msghdr.msg_controllen = sizeof(control);
1916
1917 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1918 if (n >= 0)
1919 return -EIO;
1920
1921 if (errno == EAGAIN || errno == EINTR)
1922 break;
1923
1924 return -errno;
1925 }
1926
1927 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1928 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1929 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1930 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1931 log_warning("Received notify message without credentials. Ignoring.");
1932 continue;
1933 }
1934
1935 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1936
1937 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1938 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1939 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1940 continue;
1941 }
1942
1943 assert((size_t) n < sizeof(buf));
1944 buf[n] = 0;
1945 if (!(tags = strv_split(buf, "\n\r")))
1946 return -ENOMEM;
1947
1948 log_debug("Got notification message for unit %s", u->meta.id);
1949
1950 if (UNIT_VTABLE(u)->notify_message)
1951 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1952
1953 strv_free(tags);
1954 }
1955
1956 return 0;
1957 }
1958
1959 static int manager_dispatch_sigchld(Manager *m) {
1960 assert(m);
1961
1962 for (;;) {
1963 siginfo_t si;
1964 Unit *u;
1965 int r;
1966
1967 zero(si);
1968
1969 /* First we call waitd() for a PID and do not reap the
1970 * zombie. That way we can still access /proc/$PID for
1971 * it while it is a zombie. */
1972 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1973
1974 if (errno == ECHILD)
1975 break;
1976
1977 if (errno == EINTR)
1978 continue;
1979
1980 return -errno;
1981 }
1982
1983 if (si.si_pid <= 0)
1984 break;
1985
1986 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
1987 char *name = NULL;
1988
1989 get_process_name(si.si_pid, &name);
1990 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
1991 free(name);
1992 }
1993
1994 /* Let's flush any message the dying child might still
1995 * have queued for us. This ensures that the process
1996 * still exists in /proc so that we can figure out
1997 * which cgroup and hence unit it belongs to. */
1998 if ((r = manager_process_notify_fd(m)) < 0)
1999 return r;
2000
2001 /* And now figure out the unit this belongs to */
2002 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
2003 u = cgroup_unit_by_pid(m, si.si_pid);
2004
2005 /* And now, we actually reap the zombie. */
2006 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2007 if (errno == EINTR)
2008 continue;
2009
2010 return -errno;
2011 }
2012
2013 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
2014 continue;
2015
2016 log_debug("Child %lu died (code=%s, status=%i/%s)",
2017 (long unsigned) si.si_pid,
2018 sigchld_code_to_string(si.si_code),
2019 si.si_status,
2020 strna(si.si_code == CLD_EXITED
2021 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2022 : signal_to_string(si.si_status)));
2023
2024 if (!u)
2025 continue;
2026
2027 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
2028
2029 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
2030 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
2031 }
2032
2033 return 0;
2034 }
2035
2036 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
2037 int r;
2038 DBusError error;
2039
2040 dbus_error_init(&error);
2041
2042 log_debug("Activating special unit %s", name);
2043
2044 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
2045 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
2046
2047 dbus_error_free(&error);
2048
2049 return r;
2050 }
2051
2052 static int manager_process_signal_fd(Manager *m) {
2053 ssize_t n;
2054 struct signalfd_siginfo sfsi;
2055 bool sigchld = false;
2056
2057 assert(m);
2058
2059 for (;;) {
2060 char *p = NULL;
2061
2062 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
2063
2064 if (n >= 0)
2065 return -EIO;
2066
2067 if (errno == EINTR || errno == EAGAIN)
2068 break;
2069
2070 return -errno;
2071 }
2072
2073 if (sfsi.ssi_pid > 0)
2074 get_process_name(sfsi.ssi_pid, &p);
2075
2076 log_debug("Received SIG%s from PID %lu (%s)",
2077 strna(signal_to_string(sfsi.ssi_signo)),
2078 (unsigned long) sfsi.ssi_pid, strna(p));
2079 free(p);
2080
2081 switch (sfsi.ssi_signo) {
2082
2083 case SIGCHLD:
2084 sigchld = true;
2085 break;
2086
2087 case SIGTERM:
2088 if (m->running_as == MANAGER_SYSTEM) {
2089 /* This is for compatibility with the
2090 * original sysvinit */
2091 m->exit_code = MANAGER_REEXECUTE;
2092 break;
2093 }
2094
2095 /* Fall through */
2096
2097 case SIGINT:
2098 if (m->running_as == MANAGER_SYSTEM) {
2099 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
2100 break;
2101 }
2102
2103 /* Run the exit target if there is one, if not, just exit. */
2104 if (manager_start_target(m, SPECIAL_EXIT_TARGET, JOB_REPLACE) < 0) {
2105 m->exit_code = MANAGER_EXIT;
2106 return 0;
2107 }
2108
2109 break;
2110
2111 case SIGWINCH:
2112 if (m->running_as == MANAGER_SYSTEM)
2113 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2114
2115 /* This is a nop on non-init */
2116 break;
2117
2118 case SIGPWR:
2119 if (m->running_as == MANAGER_SYSTEM)
2120 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2121
2122 /* This is a nop on non-init */
2123 break;
2124
2125 case SIGUSR1: {
2126 Unit *u;
2127
2128 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2129
2130 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2131 log_info("Trying to reconnect to bus...");
2132 bus_init(m, true);
2133 }
2134
2135 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2136 log_info("Loading D-Bus service...");
2137 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2138 }
2139
2140 break;
2141 }
2142
2143 case SIGUSR2: {
2144 FILE *f;
2145 char *dump = NULL;
2146 size_t size;
2147
2148 if (!(f = open_memstream(&dump, &size))) {
2149 log_warning("Failed to allocate memory stream.");
2150 break;
2151 }
2152
2153 manager_dump_units(m, f, "\t");
2154 manager_dump_jobs(m, f, "\t");
2155
2156 if (ferror(f)) {
2157 fclose(f);
2158 free(dump);
2159 log_warning("Failed to write status stream");
2160 break;
2161 }
2162
2163 fclose(f);
2164 log_dump(LOG_INFO, dump);
2165 free(dump);
2166
2167 break;
2168 }
2169
2170 case SIGHUP:
2171 m->exit_code = MANAGER_RELOAD;
2172 break;
2173
2174 default: {
2175 /* Starting SIGRTMIN+0 */
2176 static const char * const target_table[] = {
2177 [0] = SPECIAL_DEFAULT_TARGET,
2178 [1] = SPECIAL_RESCUE_TARGET,
2179 [2] = SPECIAL_EMERGENCY_TARGET,
2180 [3] = SPECIAL_HALT_TARGET,
2181 [4] = SPECIAL_POWEROFF_TARGET,
2182 [5] = SPECIAL_REBOOT_TARGET,
2183 [6] = SPECIAL_KEXEC_TARGET
2184 };
2185
2186 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2187 static const ManagerExitCode code_table[] = {
2188 [0] = MANAGER_HALT,
2189 [1] = MANAGER_POWEROFF,
2190 [2] = MANAGER_REBOOT,
2191 [3] = MANAGER_KEXEC
2192 };
2193
2194 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2195 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2196 manager_start_target(m, target_table[sfsi.ssi_signo - SIGRTMIN],
2197 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2198 break;
2199 }
2200
2201 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2202 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2203 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2204 break;
2205 }
2206
2207 switch (sfsi.ssi_signo - SIGRTMIN) {
2208
2209 case 20:
2210 log_debug("Enabling showing of status.");
2211 m->show_status = true;
2212 break;
2213
2214 case 21:
2215 log_debug("Disabling showing of status.");
2216 m->show_status = false;
2217 break;
2218
2219 default:
2220 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2221 }
2222 }
2223 }
2224 }
2225
2226 if (sigchld)
2227 return manager_dispatch_sigchld(m);
2228
2229 return 0;
2230 }
2231
2232 static int process_event(Manager *m, struct epoll_event *ev) {
2233 int r;
2234 Watch *w;
2235
2236 assert(m);
2237 assert(ev);
2238
2239 assert(w = ev->data.ptr);
2240
2241 if (w->type == WATCH_INVALID)
2242 return 0;
2243
2244 switch (w->type) {
2245
2246 case WATCH_SIGNAL:
2247
2248 /* An incoming signal? */
2249 if (ev->events != EPOLLIN)
2250 return -EINVAL;
2251
2252 if ((r = manager_process_signal_fd(m)) < 0)
2253 return r;
2254
2255 break;
2256
2257 case WATCH_NOTIFY:
2258
2259 /* An incoming daemon notification event? */
2260 if (ev->events != EPOLLIN)
2261 return -EINVAL;
2262
2263 if ((r = manager_process_notify_fd(m)) < 0)
2264 return r;
2265
2266 break;
2267
2268 case WATCH_FD:
2269
2270 /* Some fd event, to be dispatched to the units */
2271 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2272 break;
2273
2274 case WATCH_UNIT_TIMER:
2275 case WATCH_JOB_TIMER: {
2276 uint64_t v;
2277 ssize_t k;
2278
2279 /* Some timer event, to be dispatched to the units */
2280 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2281
2282 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2283 break;
2284
2285 return k < 0 ? -errno : -EIO;
2286 }
2287
2288 if (w->type == WATCH_UNIT_TIMER)
2289 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2290 else
2291 job_timer_event(w->data.job, v, w);
2292 break;
2293 }
2294
2295 case WATCH_MOUNT:
2296 /* Some mount table change, intended for the mount subsystem */
2297 mount_fd_event(m, ev->events);
2298 break;
2299
2300 case WATCH_SWAP:
2301 /* Some swap table change, intended for the swap subsystem */
2302 swap_fd_event(m, ev->events);
2303 break;
2304
2305 case WATCH_UDEV:
2306 /* Some notification from udev, intended for the device subsystem */
2307 device_fd_event(m, ev->events);
2308 break;
2309
2310 case WATCH_DBUS_WATCH:
2311 bus_watch_event(m, w, ev->events);
2312 break;
2313
2314 case WATCH_DBUS_TIMEOUT:
2315 bus_timeout_event(m, w, ev->events);
2316 break;
2317
2318 default:
2319 log_error("event type=%i", w->type);
2320 assert_not_reached("Unknown epoll event type.");
2321 }
2322
2323 return 0;
2324 }
2325
2326 int manager_loop(Manager *m) {
2327 int r;
2328
2329 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 1000);
2330
2331 assert(m);
2332 m->exit_code = MANAGER_RUNNING;
2333
2334 /* Release the path cache */
2335 set_free_free(m->unit_path_cache);
2336 m->unit_path_cache = NULL;
2337
2338 manager_check_finished(m);
2339
2340 /* There might still be some zombies hanging around from
2341 * before we were exec()'ed. Leat's reap them */
2342 if ((r = manager_dispatch_sigchld(m)) < 0)
2343 return r;
2344
2345 while (m->exit_code == MANAGER_RUNNING) {
2346 struct epoll_event event;
2347 int n;
2348
2349 if (!ratelimit_test(&rl)) {
2350 /* Yay, something is going seriously wrong, pause a little */
2351 log_warning("Looping too fast. Throttling execution a little.");
2352 sleep(1);
2353 }
2354
2355 if (manager_dispatch_load_queue(m) > 0)
2356 continue;
2357
2358 if (manager_dispatch_run_queue(m) > 0)
2359 continue;
2360
2361 if (bus_dispatch(m) > 0)
2362 continue;
2363
2364 if (manager_dispatch_cleanup_queue(m) > 0)
2365 continue;
2366
2367 if (manager_dispatch_gc_queue(m) > 0)
2368 continue;
2369
2370 if (manager_dispatch_dbus_queue(m) > 0)
2371 continue;
2372
2373 if (swap_dispatch_reload(m) > 0)
2374 continue;
2375
2376 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2377
2378 if (errno == EINTR)
2379 continue;
2380
2381 return -errno;
2382 }
2383
2384 assert(n == 1);
2385
2386 if ((r = process_event(m, &event)) < 0)
2387 return r;
2388 }
2389
2390 return m->exit_code;
2391 }
2392
2393 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2394 char *n;
2395 Unit *u;
2396
2397 assert(m);
2398 assert(s);
2399 assert(_u);
2400
2401 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2402 return -EINVAL;
2403
2404 if (!(n = bus_path_unescape(s+31)))
2405 return -ENOMEM;
2406
2407 u = manager_get_unit(m, n);
2408 free(n);
2409
2410 if (!u)
2411 return -ENOENT;
2412
2413 *_u = u;
2414
2415 return 0;
2416 }
2417
2418 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2419 Job *j;
2420 unsigned id;
2421 int r;
2422
2423 assert(m);
2424 assert(s);
2425 assert(_j);
2426
2427 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2428 return -EINVAL;
2429
2430 if ((r = safe_atou(s + 30, &id)) < 0)
2431 return r;
2432
2433 if (!(j = manager_get_job(m, id)))
2434 return -ENOENT;
2435
2436 *_j = j;
2437
2438 return 0;
2439 }
2440
2441 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2442
2443 #ifdef HAVE_AUDIT
2444 char *p;
2445
2446 if (m->audit_fd < 0)
2447 return;
2448
2449 /* Don't generate audit events if the service was already
2450 * started and we're just deserializing */
2451 if (m->n_deserializing > 0)
2452 return;
2453
2454 if (m->running_as != MANAGER_SYSTEM)
2455 return;
2456
2457 if (u->meta.type != UNIT_SERVICE)
2458 return;
2459
2460 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2461 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2462 return;
2463 }
2464
2465 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0) {
2466 log_warning("Failed to send audit message: %m");
2467
2468 if (errno == EPERM) {
2469 /* We aren't allowed to send audit messages?
2470 * Then let's not retry again, to avoid
2471 * spamming the user with the same and same
2472 * messages over and over. */
2473
2474 audit_close(m->audit_fd);
2475 m->audit_fd = -1;
2476 }
2477 }
2478
2479 free(p);
2480 #endif
2481
2482 }
2483
2484 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2485 int fd = -1;
2486 union sockaddr_union sa;
2487 int n = 0;
2488 char *message = NULL;
2489
2490 /* Don't generate plymouth events if the service was already
2491 * started and we're just deserializing */
2492 if (m->n_deserializing > 0)
2493 return;
2494
2495 if (m->running_as != MANAGER_SYSTEM)
2496 return;
2497
2498 if (u->meta.type != UNIT_SERVICE &&
2499 u->meta.type != UNIT_MOUNT &&
2500 u->meta.type != UNIT_SWAP)
2501 return;
2502
2503 /* We set SOCK_NONBLOCK here so that we rather drop the
2504 * message then wait for plymouth */
2505 if ((fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
2506 log_error("socket() failed: %m");
2507 return;
2508 }
2509
2510 zero(sa);
2511 sa.sa.sa_family = AF_UNIX;
2512 strncpy(sa.un.sun_path+1, "/org/freedesktop/plymouthd", sizeof(sa.un.sun_path)-1);
2513 if (connect(fd, &sa.sa, offsetof(struct sockaddr_un, sun_path) + 1 + strlen(sa.un.sun_path+1)) < 0) {
2514
2515 if (errno != EPIPE &&
2516 errno != EAGAIN &&
2517 errno != ENOENT &&
2518 errno != ECONNREFUSED &&
2519 errno != ECONNRESET &&
2520 errno != ECONNABORTED)
2521 log_error("connect() failed: %m");
2522
2523 goto finish;
2524 }
2525
2526 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->meta.id) + 1), u->meta.id, &n) < 0) {
2527 log_error("Out of memory");
2528 goto finish;
2529 }
2530
2531 errno = 0;
2532 if (write(fd, message, n + 1) != n + 1) {
2533
2534 if (errno != EPIPE &&
2535 errno != EAGAIN &&
2536 errno != ENOENT &&
2537 errno != ECONNREFUSED &&
2538 errno != ECONNRESET &&
2539 errno != ECONNABORTED)
2540 log_error("Failed to write Plymouth message: %m");
2541
2542 goto finish;
2543 }
2544
2545 finish:
2546 if (fd >= 0)
2547 close_nointr_nofail(fd);
2548
2549 free(message);
2550 }
2551
2552 void manager_dispatch_bus_name_owner_changed(
2553 Manager *m,
2554 const char *name,
2555 const char* old_owner,
2556 const char *new_owner) {
2557
2558 Unit *u;
2559
2560 assert(m);
2561 assert(name);
2562
2563 if (!(u = hashmap_get(m->watch_bus, name)))
2564 return;
2565
2566 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2567 }
2568
2569 void manager_dispatch_bus_query_pid_done(
2570 Manager *m,
2571 const char *name,
2572 pid_t pid) {
2573
2574 Unit *u;
2575
2576 assert(m);
2577 assert(name);
2578 assert(pid >= 1);
2579
2580 if (!(u = hashmap_get(m->watch_bus, name)))
2581 return;
2582
2583 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2584 }
2585
2586 int manager_open_serialization(Manager *m, FILE **_f) {
2587 char *path = NULL;
2588 mode_t saved_umask;
2589 int fd;
2590 FILE *f;
2591
2592 assert(_f);
2593
2594 if (m->running_as == MANAGER_SYSTEM)
2595 asprintf(&path, "/dev/.run/systemd/dump-%lu-XXXXXX", (unsigned long) getpid());
2596 else
2597 asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid());
2598
2599 if (!path)
2600 return -ENOMEM;
2601
2602 saved_umask = umask(0077);
2603 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2604 umask(saved_umask);
2605
2606 if (fd < 0) {
2607 free(path);
2608 return -errno;
2609 }
2610
2611 unlink(path);
2612
2613 log_debug("Serializing state to %s", path);
2614 free(path);
2615
2616 if (!(f = fdopen(fd, "w+")) < 0)
2617 return -errno;
2618
2619 *_f = f;
2620
2621 return 0;
2622 }
2623
2624 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2625 Iterator i;
2626 Unit *u;
2627 const char *t;
2628 int r;
2629
2630 assert(m);
2631 assert(f);
2632 assert(fds);
2633
2634 dual_timestamp_serialize(f, "initrd-timestamp", &m->initrd_timestamp);
2635 dual_timestamp_serialize(f, "startup-timestamp", &m->startup_timestamp);
2636 dual_timestamp_serialize(f, "finish-timestamp", &m->finish_timestamp);
2637
2638 fputc('\n', f);
2639
2640 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2641 if (u->meta.id != t)
2642 continue;
2643
2644 if (!unit_can_serialize(u))
2645 continue;
2646
2647 /* Start marker */
2648 fputs(u->meta.id, f);
2649 fputc('\n', f);
2650
2651 if ((r = unit_serialize(u, f, fds)) < 0)
2652 return r;
2653 }
2654
2655 if (ferror(f))
2656 return -EIO;
2657
2658 return 0;
2659 }
2660
2661 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2662 int r = 0;
2663
2664 assert(m);
2665 assert(f);
2666
2667 log_debug("Deserializing state...");
2668
2669 m->n_deserializing ++;
2670
2671 for (;;) {
2672 char line[1024], *l;
2673
2674 if (!fgets(line, sizeof(line), f)) {
2675 if (feof(f))
2676 r = 0;
2677 else
2678 r = -errno;
2679
2680 goto finish;
2681 }
2682
2683 char_array_0(line);
2684 l = strstrip(line);
2685
2686 if (l[0] == 0)
2687 break;
2688
2689 if (startswith(l, "initrd-timestamp="))
2690 dual_timestamp_deserialize(l+17, &m->initrd_timestamp);
2691 else if (startswith(l, "startup-timestamp="))
2692 dual_timestamp_deserialize(l+18, &m->startup_timestamp);
2693 else if (startswith(l, "finish-timestamp="))
2694 dual_timestamp_deserialize(l+17, &m->finish_timestamp);
2695 else
2696 log_debug("Unknown serialization item '%s'", l);
2697 }
2698
2699 for (;;) {
2700 Unit *u;
2701 char name[UNIT_NAME_MAX+2];
2702
2703 /* Start marker */
2704 if (!fgets(name, sizeof(name), f)) {
2705 if (feof(f))
2706 r = 0;
2707 else
2708 r = -errno;
2709
2710 goto finish;
2711 }
2712
2713 char_array_0(name);
2714
2715 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2716 goto finish;
2717
2718 if ((r = unit_deserialize(u, f, fds)) < 0)
2719 goto finish;
2720 }
2721
2722 finish:
2723 if (ferror(f)) {
2724 r = -EIO;
2725 goto finish;
2726 }
2727
2728 assert(m->n_deserializing > 0);
2729 m->n_deserializing --;
2730
2731 return r;
2732 }
2733
2734 int manager_reload(Manager *m) {
2735 int r, q;
2736 FILE *f;
2737 FDSet *fds;
2738
2739 assert(m);
2740
2741 if ((r = manager_open_serialization(m, &f)) < 0)
2742 return r;
2743
2744 if (!(fds = fdset_new())) {
2745 r = -ENOMEM;
2746 goto finish;
2747 }
2748
2749 if ((r = manager_serialize(m, f, fds)) < 0)
2750 goto finish;
2751
2752 if (fseeko(f, 0, SEEK_SET) < 0) {
2753 r = -errno;
2754 goto finish;
2755 }
2756
2757 /* From here on there is no way back. */
2758 manager_clear_jobs_and_units(m);
2759 manager_undo_generators(m);
2760
2761 /* Find new unit paths */
2762 lookup_paths_free(&m->lookup_paths);
2763 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2764 r = q;
2765
2766 manager_run_generators(m);
2767
2768 manager_build_unit_path_cache(m);
2769
2770 m->n_deserializing ++;
2771
2772 /* First, enumerate what we can from all config files */
2773 if ((q = manager_enumerate(m)) < 0)
2774 r = q;
2775
2776 /* Second, deserialize our stored data */
2777 if ((q = manager_deserialize(m, f, fds)) < 0)
2778 r = q;
2779
2780 fclose(f);
2781 f = NULL;
2782
2783 /* Third, fire things up! */
2784 if ((q = manager_coldplug(m)) < 0)
2785 r = q;
2786
2787 assert(m->n_deserializing > 0);
2788 m->n_deserializing ++;
2789
2790 finish:
2791 if (f)
2792 fclose(f);
2793
2794 if (fds)
2795 fdset_free(fds);
2796
2797 return r;
2798 }
2799
2800 bool manager_is_booting_or_shutting_down(Manager *m) {
2801 Unit *u;
2802
2803 assert(m);
2804
2805 /* Is the initial job still around? */
2806 if (manager_get_job(m, 1))
2807 return true;
2808
2809 /* Is there a job for the shutdown target? */
2810 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2811 return !!u->meta.job;
2812
2813 return false;
2814 }
2815
2816 void manager_reset_failed(Manager *m) {
2817 Unit *u;
2818 Iterator i;
2819
2820 assert(m);
2821
2822 HASHMAP_FOREACH(u, m->units, i)
2823 unit_reset_failed(u);
2824 }
2825
2826 bool manager_unit_pending_inactive(Manager *m, const char *name) {
2827 Unit *u;
2828
2829 assert(m);
2830 assert(name);
2831
2832 /* Returns true if the unit is inactive or going down */
2833 if (!(u = manager_get_unit(m, name)))
2834 return true;
2835
2836 return unit_pending_inactive(u);
2837 }
2838
2839 void manager_check_finished(Manager *m) {
2840 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
2841
2842 assert(m);
2843
2844 if (dual_timestamp_is_set(&m->finish_timestamp))
2845 return;
2846
2847 if (hashmap_size(m->jobs) > 0)
2848 return;
2849
2850 dual_timestamp_get(&m->finish_timestamp);
2851
2852 if (m->running_as == MANAGER_SYSTEM && detect_container(NULL) <= 0) {
2853
2854 if (dual_timestamp_is_set(&m->initrd_timestamp)) {
2855 log_info("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
2856 format_timespan(kernel, sizeof(kernel),
2857 m->initrd_timestamp.monotonic),
2858 format_timespan(initrd, sizeof(initrd),
2859 m->startup_timestamp.monotonic - m->initrd_timestamp.monotonic),
2860 format_timespan(userspace, sizeof(userspace),
2861 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
2862 format_timespan(sum, sizeof(sum),
2863 m->finish_timestamp.monotonic));
2864 } else
2865 log_info("Startup finished in %s (kernel) + %s (userspace) = %s.",
2866 format_timespan(kernel, sizeof(kernel),
2867 m->startup_timestamp.monotonic),
2868 format_timespan(userspace, sizeof(userspace),
2869 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic),
2870 format_timespan(sum, sizeof(sum),
2871 m->finish_timestamp.monotonic));
2872 } else
2873 log_debug("Startup finished in %s.",
2874 format_timespan(userspace, sizeof(userspace),
2875 m->finish_timestamp.monotonic - m->startup_timestamp.monotonic));
2876
2877 }
2878
2879 void manager_run_generators(Manager *m) {
2880 DIR *d = NULL;
2881 const char *generator_path;
2882 const char *argv[3];
2883
2884 assert(m);
2885
2886 generator_path = m->running_as == MANAGER_SYSTEM ? SYSTEM_GENERATOR_PATH : USER_GENERATOR_PATH;
2887 if (!(d = opendir(generator_path))) {
2888
2889 if (errno == ENOENT)
2890 return;
2891
2892 log_error("Failed to enumerate generator directory: %m");
2893 return;
2894 }
2895
2896 if (!m->generator_unit_path) {
2897 char *p;
2898 char system_path[] = "/dev/.run/systemd/generator-XXXXXX",
2899 user_path[] = "/tmp/systemd-generator-XXXXXX";
2900
2901 if (!(p = mkdtemp(m->running_as == MANAGER_SYSTEM ? system_path : user_path))) {
2902 log_error("Failed to generate generator directory: %m");
2903 goto finish;
2904 }
2905
2906 if (!(m->generator_unit_path = strdup(p))) {
2907 log_error("Failed to allocate generator unit path.");
2908 goto finish;
2909 }
2910 }
2911
2912 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
2913 argv[1] = m->generator_unit_path;
2914 argv[2] = NULL;
2915
2916 execute_directory(generator_path, d, (char**) argv);
2917
2918 if (rmdir(m->generator_unit_path) >= 0) {
2919 /* Uh? we were able to remove this dir? I guess that
2920 * means the directory was empty, hence let's shortcut
2921 * this */
2922
2923 free(m->generator_unit_path);
2924 m->generator_unit_path = NULL;
2925 goto finish;
2926 }
2927
2928 if (!strv_find(m->lookup_paths.unit_path, m->generator_unit_path)) {
2929 char **l;
2930
2931 if (!(l = strv_append(m->lookup_paths.unit_path, m->generator_unit_path))) {
2932 log_error("Failed to add generator directory to unit search path: %m");
2933 goto finish;
2934 }
2935
2936 strv_free(m->lookup_paths.unit_path);
2937 m->lookup_paths.unit_path = l;
2938
2939 log_debug("Added generator unit path %s to search path.", m->generator_unit_path);
2940 }
2941
2942 finish:
2943 if (d)
2944 closedir(d);
2945 }
2946
2947 void manager_undo_generators(Manager *m) {
2948 assert(m);
2949
2950 if (!m->generator_unit_path)
2951 return;
2952
2953 strv_remove(m->lookup_paths.unit_path, m->generator_unit_path);
2954 rm_rf(m->generator_unit_path, false, true);
2955
2956 free(m->generator_unit_path);
2957 m->generator_unit_path = NULL;
2958 }
2959
2960 int manager_set_default_controllers(Manager *m, char **controllers) {
2961 char **l;
2962
2963 assert(m);
2964
2965 if (!(l = strv_copy(controllers)))
2966 return -ENOMEM;
2967
2968 strv_free(m->default_controllers);
2969 m->default_controllers = l;
2970
2971 return 0;
2972 }
2973
2974 void manager_recheck_syslog(Manager *m) {
2975 Unit *u;
2976
2977 assert(m);
2978
2979 if (m->running_as != MANAGER_SYSTEM)
2980 return;
2981
2982 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_SOCKET))) {
2983 SocketState state;
2984
2985 state = SOCKET(u)->state;
2986
2987 if (state != SOCKET_DEAD &&
2988 state != SOCKET_FAILED &&
2989 state != SOCKET_RUNNING) {
2990
2991 /* Hmm, the socket is not set up, or is still
2992 * listening, let's better not try to use
2993 * it. Note that we have no problem if the
2994 * socket is completely down, since there
2995 * might be a foreign /dev/log socket around
2996 * and we want to make use of that.
2997 */
2998
2999 log_close_syslog();
3000 return;
3001 }
3002 }
3003
3004 if ((u = manager_get_unit(m, SPECIAL_SYSLOG_TARGET)))
3005 if (TARGET(u)->state != TARGET_ACTIVE) {
3006 log_close_syslog();
3007 return;
3008 }
3009
3010 /* Hmm, OK, so the socket is either fully up, or fully down,
3011 * and the target is up, then let's make use of the socket */
3012 log_open();
3013 }
3014
3015 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
3016 [MANAGER_SYSTEM] = "system",
3017 [MANAGER_USER] = "user"
3018 };
3019
3020 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);