]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
audit: suppress repeated audit events when deserializing
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8 -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <sys/poll.h>
31 #include <sys/reboot.h>
32 #include <sys/ioctl.h>
33 #include <linux/kd.h>
34 #include <termios.h>
35 #include <fcntl.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <dirent.h>
39
40 #ifdef HAVE_AUDIT
41 #include <libaudit.h>
42 #endif
43
44 #include "manager.h"
45 #include "hashmap.h"
46 #include "macro.h"
47 #include "strv.h"
48 #include "log.h"
49 #include "util.h"
50 #include "ratelimit.h"
51 #include "cgroup.h"
52 #include "mount-setup.h"
53 #include "unit-name.h"
54 #include "dbus-unit.h"
55 #include "dbus-job.h"
56 #include "missing.h"
57 #include "path-lookup.h"
58 #include "special.h"
59 #include "bus-errors.h"
60
61 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
62 #define GC_QUEUE_ENTRIES_MAX 16
63
64 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
65 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
66
67 /* Where clients shall send notification messages to */
68 #define NOTIFY_SOCKET "/org/freedesktop/systemd1/notify"
69
70 static int manager_setup_notify(Manager *m) {
71 union {
72 struct sockaddr sa;
73 struct sockaddr_un un;
74 } sa;
75 struct epoll_event ev;
76 int one = 1;
77
78 assert(m);
79
80 m->notify_watch.type = WATCH_NOTIFY;
81 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
82 log_error("Failed to allocate notification socket: %m");
83 return -errno;
84 }
85
86 zero(sa);
87 sa.sa.sa_family = AF_UNIX;
88
89 if (getpid() != 1)
90 snprintf(sa.un.sun_path+1, sizeof(sa.un.sun_path)-1, NOTIFY_SOCKET "/%llu", random_ull());
91 else
92 strncpy(sa.un.sun_path+1, NOTIFY_SOCKET, sizeof(sa.un.sun_path)-1);
93
94 if (bind(m->notify_watch.fd, &sa.sa, sizeof(sa_family_t) + 1 + strlen(sa.un.sun_path+1)) < 0) {
95 log_error("bind() failed: %m");
96 return -errno;
97 }
98
99 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
100 log_error("SO_PASSCRED failed: %m");
101 return -errno;
102 }
103
104 zero(ev);
105 ev.events = EPOLLIN;
106 ev.data.ptr = &m->notify_watch;
107
108 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
109 return -errno;
110
111 if (!(m->notify_socket = strdup(sa.un.sun_path+1)))
112 return -ENOMEM;
113
114 return 0;
115 }
116
117 static int enable_special_signals(Manager *m) {
118 char fd;
119
120 assert(m);
121
122 /* Enable that we get SIGINT on control-alt-del */
123 if (reboot(RB_DISABLE_CAD) < 0)
124 log_warning("Failed to enable ctrl-alt-del handling: %m");
125
126 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY)) < 0)
127 log_warning("Failed to open /dev/tty0: %m");
128 else {
129 /* Enable that we get SIGWINCH on kbrequest */
130 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
131 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
132
133 close_nointr_nofail(fd);
134 }
135
136 return 0;
137 }
138
139 static int manager_setup_signals(Manager *m) {
140 sigset_t mask;
141 struct epoll_event ev;
142 struct sigaction sa;
143
144 assert(m);
145
146 /* We are not interested in SIGSTOP and friends. */
147 zero(sa);
148 sa.sa_handler = SIG_DFL;
149 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
150 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
151
152 assert_se(sigemptyset(&mask) == 0);
153
154 sigset_add_many(&mask,
155 SIGCHLD, /* Child died */
156 SIGTERM, /* Reexecute daemon */
157 SIGHUP, /* Reload configuration */
158 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
159 SIGUSR2, /* systemd: dump status */
160 SIGINT, /* Kernel sends us this on control-alt-del */
161 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
162 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
163 SIGRTMIN+0, /* systemd: start default.target */
164 SIGRTMIN+1, /* systemd: start rescue.target */
165 SIGRTMIN+2, /* systemd: isolate emergency.target */
166 SIGRTMIN+3, /* systemd: start halt.target */
167 SIGRTMIN+4, /* systemd: start poweroff.target */
168 SIGRTMIN+5, /* systemd: start reboot.target */
169 -1);
170 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
171
172 m->signal_watch.type = WATCH_SIGNAL;
173 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
174 return -errno;
175
176 zero(ev);
177 ev.events = EPOLLIN;
178 ev.data.ptr = &m->signal_watch;
179
180 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
181 return -errno;
182
183 if (m->running_as == MANAGER_SYSTEM)
184 return enable_special_signals(m);
185
186 return 0;
187 }
188
189 int manager_new(ManagerRunningAs running_as, Manager **_m) {
190 Manager *m;
191 int r = -ENOMEM;
192
193 assert(_m);
194 assert(running_as >= 0);
195 assert(running_as < _MANAGER_RUNNING_AS_MAX);
196
197 if (!(m = new0(Manager, 1)))
198 return -ENOMEM;
199
200 dual_timestamp_get(&m->startup_timestamp);
201
202 m->running_as = running_as;
203 m->name_data_slot = m->subscribed_data_slot = -1;
204 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
205 m->pin_cgroupfs_fd = -1;
206
207 #ifdef HAVE_AUDIT
208 m->audit_fd = -1;
209 #endif
210
211 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = -1;
212 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
213
214 if (!(m->environment = strv_copy(environ)))
215 goto fail;
216
217 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
218 goto fail;
219
220 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
221 goto fail;
222
223 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
224 goto fail;
225
226 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
227 goto fail;
228
229 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
230 goto fail;
231
232 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
233 goto fail;
234
235 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
236 goto fail;
237
238 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
239 goto fail;
240
241 if ((r = manager_setup_signals(m)) < 0)
242 goto fail;
243
244 if ((r = manager_setup_cgroup(m)) < 0)
245 goto fail;
246
247 if ((r = manager_setup_notify(m)) < 0)
248 goto fail;
249
250 /* Try to connect to the busses, if possible. */
251 if ((r = bus_init(m)) < 0)
252 goto fail;
253
254 #ifdef HAVE_AUDIT
255 if ((m->audit_fd = audit_open()) < 0)
256 log_error("Failed to connect to audit log: %m");
257 #endif
258
259 *_m = m;
260 return 0;
261
262 fail:
263 manager_free(m);
264 return r;
265 }
266
267 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
268 Meta *meta;
269 unsigned n = 0;
270
271 assert(m);
272
273 while ((meta = m->cleanup_queue)) {
274 assert(meta->in_cleanup_queue);
275
276 unit_free((Unit*) meta);
277 n++;
278 }
279
280 return n;
281 }
282
283 enum {
284 GC_OFFSET_IN_PATH, /* This one is on the path we were travelling */
285 GC_OFFSET_UNSURE, /* No clue */
286 GC_OFFSET_GOOD, /* We still need this unit */
287 GC_OFFSET_BAD, /* We don't need this unit anymore */
288 _GC_OFFSET_MAX
289 };
290
291 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
292 Iterator i;
293 Unit *other;
294 bool is_bad;
295
296 assert(u);
297
298 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
299 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
300 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
301 return;
302
303 if (u->meta.in_cleanup_queue)
304 goto bad;
305
306 if (unit_check_gc(u))
307 goto good;
308
309 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
310
311 is_bad = true;
312
313 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
314 unit_gc_sweep(other, gc_marker);
315
316 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
317 goto good;
318
319 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
320 is_bad = false;
321 }
322
323 if (is_bad)
324 goto bad;
325
326 /* We were unable to find anything out about this entry, so
327 * let's investigate it later */
328 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
329 unit_add_to_gc_queue(u);
330 return;
331
332 bad:
333 /* We definitely know that this one is not useful anymore, so
334 * let's mark it for deletion */
335 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
336 unit_add_to_cleanup_queue(u);
337 return;
338
339 good:
340 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
341 }
342
343 static unsigned manager_dispatch_gc_queue(Manager *m) {
344 Meta *meta;
345 unsigned n = 0;
346 unsigned gc_marker;
347
348 assert(m);
349
350 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
351 (m->gc_queue_timestamp <= 0 ||
352 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
353 return 0;
354
355 log_debug("Running GC...");
356
357 m->gc_marker += _GC_OFFSET_MAX;
358 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
359 m->gc_marker = 1;
360
361 gc_marker = m->gc_marker;
362
363 while ((meta = m->gc_queue)) {
364 assert(meta->in_gc_queue);
365
366 unit_gc_sweep((Unit*) meta, gc_marker);
367
368 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
369 meta->in_gc_queue = false;
370
371 n++;
372
373 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
374 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
375 log_debug("Collecting %s", meta->id);
376 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
377 unit_add_to_cleanup_queue((Unit*) meta);
378 }
379 }
380
381 m->n_in_gc_queue = 0;
382 m->gc_queue_timestamp = 0;
383
384 return n;
385 }
386
387 static void manager_clear_jobs_and_units(Manager *m) {
388 Job *j;
389 Unit *u;
390
391 assert(m);
392
393 while ((j = hashmap_first(m->transaction_jobs)))
394 job_free(j);
395
396 while ((u = hashmap_first(m->units)))
397 unit_free(u);
398
399 manager_dispatch_cleanup_queue(m);
400
401 assert(!m->load_queue);
402 assert(!m->run_queue);
403 assert(!m->dbus_unit_queue);
404 assert(!m->dbus_job_queue);
405 assert(!m->cleanup_queue);
406 assert(!m->gc_queue);
407
408 assert(hashmap_isempty(m->transaction_jobs));
409 assert(hashmap_isempty(m->jobs));
410 assert(hashmap_isempty(m->units));
411 }
412
413 void manager_free(Manager *m) {
414 UnitType c;
415
416 assert(m);
417
418 manager_clear_jobs_and_units(m);
419
420 for (c = 0; c < _UNIT_TYPE_MAX; c++)
421 if (unit_vtable[c]->shutdown)
422 unit_vtable[c]->shutdown(m);
423
424 /* If we reexecute ourselves, we keep the root cgroup
425 * around */
426 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
427
428 bus_done(m);
429
430 hashmap_free(m->units);
431 hashmap_free(m->jobs);
432 hashmap_free(m->transaction_jobs);
433 hashmap_free(m->watch_pids);
434 hashmap_free(m->watch_bus);
435
436 if (m->epoll_fd >= 0)
437 close_nointr_nofail(m->epoll_fd);
438 if (m->signal_watch.fd >= 0)
439 close_nointr_nofail(m->signal_watch.fd);
440 if (m->notify_watch.fd >= 0)
441 close_nointr_nofail(m->notify_watch.fd);
442
443 #ifdef HAVE_AUDIT
444 if (m->audit_fd >= 0)
445 audit_close(m->audit_fd);
446 #endif
447
448 free(m->notify_socket);
449
450 lookup_paths_free(&m->lookup_paths);
451 strv_free(m->environment);
452
453 hashmap_free(m->cgroup_bondings);
454 set_free_free(m->unit_path_cache);
455
456 free(m);
457 }
458
459 int manager_enumerate(Manager *m) {
460 int r = 0, q;
461 UnitType c;
462
463 assert(m);
464
465 /* Let's ask every type to load all units from disk/kernel
466 * that it might know */
467 for (c = 0; c < _UNIT_TYPE_MAX; c++)
468 if (unit_vtable[c]->enumerate)
469 if ((q = unit_vtable[c]->enumerate(m)) < 0)
470 r = q;
471
472 manager_dispatch_load_queue(m);
473 return r;
474 }
475
476 int manager_coldplug(Manager *m) {
477 int r = 0, q;
478 Iterator i;
479 Unit *u;
480 char *k;
481
482 assert(m);
483
484 /* Then, let's set up their initial state. */
485 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
486
487 /* ignore aliases */
488 if (u->meta.id != k)
489 continue;
490
491 if ((q = unit_coldplug(u)) < 0)
492 r = q;
493 }
494
495 return r;
496 }
497
498 static void manager_build_unit_path_cache(Manager *m) {
499 char **i;
500 DIR *d = NULL;
501 int r;
502
503 assert(m);
504
505 set_free_free(m->unit_path_cache);
506
507 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
508 log_error("Failed to allocate unit path cache.");
509 return;
510 }
511
512 /* This simply builds a list of files we know exist, so that
513 * we don't always have to go to disk */
514
515 STRV_FOREACH(i, m->lookup_paths.unit_path) {
516 struct dirent *de;
517
518 if (!(d = opendir(*i))) {
519 log_error("Failed to open directory: %m");
520 continue;
521 }
522
523 while ((de = readdir(d))) {
524 char *p;
525
526 if (ignore_file(de->d_name))
527 continue;
528
529 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
530 r = -ENOMEM;
531 goto fail;
532 }
533
534 if ((r = set_put(m->unit_path_cache, p)) < 0) {
535 free(p);
536 goto fail;
537 }
538 }
539
540 closedir(d);
541 d = NULL;
542 }
543
544 return;
545
546 fail:
547 log_error("Failed to build unit path cache: %s", strerror(-r));
548
549 set_free_free(m->unit_path_cache);
550 m->unit_path_cache = NULL;
551
552 if (d)
553 closedir(d);
554 }
555
556 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
557 int r, q;
558
559 assert(m);
560
561 manager_build_unit_path_cache(m);
562
563 /* If we will deserialize make sure that during enumeration
564 * this is already known, so we increase the counter here
565 * already */
566 if (serialization)
567 m->n_deserializing ++;
568
569 /* First, enumerate what we can from all config files */
570 r = manager_enumerate(m);
571
572 /* Second, deserialize if there is something to deserialize */
573 if (serialization)
574 if ((q = manager_deserialize(m, serialization, fds)) < 0)
575 r = q;
576
577 /* Third, fire things up! */
578 if ((q = manager_coldplug(m)) < 0)
579 r = q;
580
581 if (serialization) {
582 assert(m->n_deserializing > 0);
583 m->n_deserializing --;
584 }
585
586 return r;
587 }
588
589 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
590 assert(m);
591 assert(j);
592
593 /* Deletes one job from the transaction */
594
595 manager_transaction_unlink_job(m, j, delete_dependencies);
596
597 if (!j->installed)
598 job_free(j);
599 }
600
601 static void transaction_delete_unit(Manager *m, Unit *u) {
602 Job *j;
603
604 /* Deletes all jobs associated with a certain unit from the
605 * transaction */
606
607 while ((j = hashmap_get(m->transaction_jobs, u)))
608 transaction_delete_job(m, j, true);
609 }
610
611 static void transaction_clean_dependencies(Manager *m) {
612 Iterator i;
613 Job *j;
614
615 assert(m);
616
617 /* Drops all dependencies of all installed jobs */
618
619 HASHMAP_FOREACH(j, m->jobs, i) {
620 while (j->subject_list)
621 job_dependency_free(j->subject_list);
622 while (j->object_list)
623 job_dependency_free(j->object_list);
624 }
625
626 assert(!m->transaction_anchor);
627 }
628
629 static void transaction_abort(Manager *m) {
630 Job *j;
631
632 assert(m);
633
634 while ((j = hashmap_first(m->transaction_jobs)))
635 if (j->installed)
636 transaction_delete_job(m, j, true);
637 else
638 job_free(j);
639
640 assert(hashmap_isempty(m->transaction_jobs));
641
642 transaction_clean_dependencies(m);
643 }
644
645 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
646 JobDependency *l;
647
648 assert(m);
649
650 /* A recursive sweep through the graph that marks all units
651 * that matter to the anchor job, i.e. are directly or
652 * indirectly a dependency of the anchor job via paths that
653 * are fully marked as mattering. */
654
655 if (j)
656 l = j->subject_list;
657 else
658 l = m->transaction_anchor;
659
660 LIST_FOREACH(subject, l, l) {
661
662 /* This link does not matter */
663 if (!l->matters)
664 continue;
665
666 /* This unit has already been marked */
667 if (l->object->generation == generation)
668 continue;
669
670 l->object->matters_to_anchor = true;
671 l->object->generation = generation;
672
673 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
674 }
675 }
676
677 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
678 JobDependency *l, *last;
679
680 assert(j);
681 assert(other);
682 assert(j->unit == other->unit);
683 assert(!j->installed);
684
685 /* Merges 'other' into 'j' and then deletes j. */
686
687 j->type = t;
688 j->state = JOB_WAITING;
689 j->override = j->override || other->override;
690
691 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
692
693 /* Patch us in as new owner of the JobDependency objects */
694 last = NULL;
695 LIST_FOREACH(subject, l, other->subject_list) {
696 assert(l->subject == other);
697 l->subject = j;
698 last = l;
699 }
700
701 /* Merge both lists */
702 if (last) {
703 last->subject_next = j->subject_list;
704 if (j->subject_list)
705 j->subject_list->subject_prev = last;
706 j->subject_list = other->subject_list;
707 }
708
709 /* Patch us in as new owner of the JobDependency objects */
710 last = NULL;
711 LIST_FOREACH(object, l, other->object_list) {
712 assert(l->object == other);
713 l->object = j;
714 last = l;
715 }
716
717 /* Merge both lists */
718 if (last) {
719 last->object_next = j->object_list;
720 if (j->object_list)
721 j->object_list->object_prev = last;
722 j->object_list = other->object_list;
723 }
724
725 /* Kill the other job */
726 other->subject_list = NULL;
727 other->object_list = NULL;
728 transaction_delete_job(m, other, true);
729 }
730 static bool job_is_conflicted_by(Job *j) {
731 JobDependency *l;
732
733 assert(j);
734
735 /* Returns true if this job is pulled in by a least one
736 * ConflictedBy dependency. */
737
738 LIST_FOREACH(object, l, j->object_list)
739 if (l->conflicts)
740 return true;
741
742 return false;
743 }
744
745 static int delete_one_unmergeable_job(Manager *m, Job *j) {
746 Job *k;
747
748 assert(j);
749
750 /* Tries to delete one item in the linked list
751 * j->transaction_next->transaction_next->... that conflicts
752 * whith another one, in an attempt to make an inconsistent
753 * transaction work. */
754
755 /* We rely here on the fact that if a merged with b does not
756 * merge with c, either a or b merge with c neither */
757 LIST_FOREACH(transaction, j, j)
758 LIST_FOREACH(transaction, k, j->transaction_next) {
759 Job *d;
760
761 /* Is this one mergeable? Then skip it */
762 if (job_type_is_mergeable(j->type, k->type))
763 continue;
764
765 /* Ok, we found two that conflict, let's see if we can
766 * drop one of them */
767 if (!j->matters_to_anchor && !k->matters_to_anchor) {
768
769 /* Both jobs don't matter, so let's
770 * find the one that is smarter to
771 * remove. Let's think positive and
772 * rather remove stops then starts --
773 * except if something is being
774 * stopped because it is conflicted by
775 * another unit in which case we
776 * rather remove the start. */
777
778 log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->meta.id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
779 log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->meta.id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
780
781 if (j->type == JOB_STOP) {
782
783 if (job_is_conflicted_by(j))
784 d = k;
785 else
786 d = j;
787
788 } else if (k->type == JOB_STOP) {
789
790 if (job_is_conflicted_by(k))
791 d = j;
792 else
793 d = k;
794 } else
795 d = j;
796
797 } else if (!j->matters_to_anchor)
798 d = j;
799 else if (!k->matters_to_anchor)
800 d = k;
801 else
802 return -ENOEXEC;
803
804 /* Ok, we can drop one, so let's do so. */
805 log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
806 transaction_delete_job(m, d, true);
807 return 0;
808 }
809
810 return -EINVAL;
811 }
812
813 static int transaction_merge_jobs(Manager *m, DBusError *e) {
814 Job *j;
815 Iterator i;
816 int r;
817
818 assert(m);
819
820 /* First step, check whether any of the jobs for one specific
821 * task conflict. If so, try to drop one of them. */
822 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
823 JobType t;
824 Job *k;
825
826 t = j->type;
827 LIST_FOREACH(transaction, k, j->transaction_next) {
828 if (job_type_merge(&t, k->type) >= 0)
829 continue;
830
831 /* OK, we could not merge all jobs for this
832 * action. Let's see if we can get rid of one
833 * of them */
834
835 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
836 /* Ok, we managed to drop one, now
837 * let's ask our callers to call us
838 * again after garbage collecting */
839 return -EAGAIN;
840
841 /* We couldn't merge anything. Failure */
842 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
843 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
844 return r;
845 }
846 }
847
848 /* Second step, merge the jobs. */
849 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
850 JobType t = j->type;
851 Job *k;
852
853 /* Merge all transactions */
854 LIST_FOREACH(transaction, k, j->transaction_next)
855 assert_se(job_type_merge(&t, k->type) == 0);
856
857 /* If an active job is mergeable, merge it too */
858 if (j->unit->meta.job)
859 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
860
861 while ((k = j->transaction_next)) {
862 if (j->installed) {
863 transaction_merge_and_delete_job(m, k, j, t);
864 j = k;
865 } else
866 transaction_merge_and_delete_job(m, j, k, t);
867 }
868
869 assert(!j->transaction_next);
870 assert(!j->transaction_prev);
871 }
872
873 return 0;
874 }
875
876 static void transaction_drop_redundant(Manager *m) {
877 bool again;
878
879 assert(m);
880
881 /* Goes through the transaction and removes all jobs that are
882 * a noop */
883
884 do {
885 Job *j;
886 Iterator i;
887
888 again = false;
889
890 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
891 bool changes_something = false;
892 Job *k;
893
894 LIST_FOREACH(transaction, k, j) {
895
896 if (!job_is_anchor(k) &&
897 job_type_is_redundant(k->type, unit_active_state(k->unit)))
898 continue;
899
900 changes_something = true;
901 break;
902 }
903
904 if (changes_something)
905 continue;
906
907 log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type));
908 transaction_delete_job(m, j, false);
909 again = true;
910 break;
911 }
912
913 } while (again);
914 }
915
916 static bool unit_matters_to_anchor(Unit *u, Job *j) {
917 assert(u);
918 assert(!j->transaction_prev);
919
920 /* Checks whether at least one of the jobs for this unit
921 * matters to the anchor. */
922
923 LIST_FOREACH(transaction, j, j)
924 if (j->matters_to_anchor)
925 return true;
926
927 return false;
928 }
929
930 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
931 Iterator i;
932 Unit *u;
933 int r;
934
935 assert(m);
936 assert(j);
937 assert(!j->transaction_prev);
938
939 /* Does a recursive sweep through the ordering graph, looking
940 * for a cycle. If we find cycle we try to break it. */
941
942 /* Have we seen this before? */
943 if (j->generation == generation) {
944 Job *k, *delete;
945
946 /* If the marker is NULL we have been here already and
947 * decided the job was loop-free from here. Hence
948 * shortcut things and return right-away. */
949 if (!j->marker)
950 return 0;
951
952 /* So, the marker is not NULL and we already have been
953 * here. We have a cycle. Let's try to break it. We go
954 * backwards in our path and try to find a suitable
955 * job to remove. We use the marker to find our way
956 * back, since smart how we are we stored our way back
957 * in there. */
958 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
959
960 delete = NULL;
961 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
962
963 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
964
965 if (!delete &&
966 !k->installed &&
967 !unit_matters_to_anchor(k->unit, k)) {
968 /* Ok, we can drop this one, so let's
969 * do so. */
970 delete = k;
971 }
972
973 /* Check if this in fact was the beginning of
974 * the cycle */
975 if (k == j)
976 break;
977 }
978
979
980 if (delete) {
981 log_warning("Breaking ordering cycle by deleting job %s/%s", k->unit->meta.id, job_type_to_string(k->type));
982 transaction_delete_unit(m, delete->unit);
983 return -EAGAIN;
984 }
985
986 log_error("Unable to break cycle");
987
988 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See logs for details.");
989 return -ENOEXEC;
990 }
991
992 /* Make the marker point to where we come from, so that we can
993 * find our way backwards if we want to break a cycle. We use
994 * a special marker for the beginning: we point to
995 * ourselves. */
996 j->marker = from ? from : j;
997 j->generation = generation;
998
999 /* We assume that the the dependencies are bidirectional, and
1000 * hence can ignore UNIT_AFTER */
1001 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
1002 Job *o;
1003
1004 /* Is there a job for this unit? */
1005 if (!(o = hashmap_get(m->transaction_jobs, u)))
1006
1007 /* Ok, there is no job for this in the
1008 * transaction, but maybe there is already one
1009 * running? */
1010 if (!(o = u->meta.job))
1011 continue;
1012
1013 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
1014 return r;
1015 }
1016
1017 /* Ok, let's backtrack, and remember that this entry is not on
1018 * our path anymore. */
1019 j->marker = NULL;
1020
1021 return 0;
1022 }
1023
1024 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
1025 Job *j;
1026 int r;
1027 Iterator i;
1028 unsigned g;
1029
1030 assert(m);
1031 assert(generation);
1032
1033 /* Check if the ordering graph is cyclic. If it is, try to fix
1034 * that up by dropping one of the jobs. */
1035
1036 g = (*generation)++;
1037
1038 HASHMAP_FOREACH(j, m->transaction_jobs, i)
1039 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
1040 return r;
1041
1042 return 0;
1043 }
1044
1045 static void transaction_collect_garbage(Manager *m) {
1046 bool again;
1047
1048 assert(m);
1049
1050 /* Drop jobs that are not required by any other job */
1051
1052 do {
1053 Iterator i;
1054 Job *j;
1055
1056 again = false;
1057
1058 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1059 if (j->object_list)
1060 continue;
1061
1062 log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type));
1063 transaction_delete_job(m, j, true);
1064 again = true;
1065 break;
1066 }
1067
1068 } while (again);
1069 }
1070
1071 static int transaction_is_destructive(Manager *m, DBusError *e) {
1072 Iterator i;
1073 Job *j;
1074
1075 assert(m);
1076
1077 /* Checks whether applying this transaction means that
1078 * existing jobs would be replaced */
1079
1080 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1081
1082 /* Assume merged */
1083 assert(!j->transaction_prev);
1084 assert(!j->transaction_next);
1085
1086 if (j->unit->meta.job &&
1087 j->unit->meta.job != j &&
1088 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1089
1090 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1091 return -EEXIST;
1092 }
1093 }
1094
1095 return 0;
1096 }
1097
1098 static void transaction_minimize_impact(Manager *m) {
1099 bool again;
1100 assert(m);
1101
1102 /* Drops all unnecessary jobs that reverse already active jobs
1103 * or that stop a running service. */
1104
1105 do {
1106 Job *j;
1107 Iterator i;
1108
1109 again = false;
1110
1111 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1112 LIST_FOREACH(transaction, j, j) {
1113 bool stops_running_service, changes_existing_job;
1114
1115 /* If it matters, we shouldn't drop it */
1116 if (j->matters_to_anchor)
1117 continue;
1118
1119 /* Would this stop a running service?
1120 * Would this change an existing job?
1121 * If so, let's drop this entry */
1122
1123 stops_running_service =
1124 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1125
1126 changes_existing_job =
1127 j->unit->meta.job && job_type_is_conflicting(j->type, j->unit->meta.job->type);
1128
1129 if (!stops_running_service && !changes_existing_job)
1130 continue;
1131
1132 if (stops_running_service)
1133 log_info("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1134
1135 if (changes_existing_job)
1136 log_info("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1137
1138 /* Ok, let's get rid of this */
1139 log_info("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1140
1141 transaction_delete_job(m, j, true);
1142 again = true;
1143 break;
1144 }
1145
1146 if (again)
1147 break;
1148 }
1149
1150 } while (again);
1151 }
1152
1153 static int transaction_apply(Manager *m) {
1154 Iterator i;
1155 Job *j;
1156 int r;
1157
1158 /* Moves the transaction jobs to the set of active jobs */
1159
1160 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1161 /* Assume merged */
1162 assert(!j->transaction_prev);
1163 assert(!j->transaction_next);
1164
1165 if (j->installed)
1166 continue;
1167
1168 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1169 goto rollback;
1170 }
1171
1172 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1173 if (j->installed)
1174 continue;
1175
1176 if (j->unit->meta.job)
1177 job_free(j->unit->meta.job);
1178
1179 j->unit->meta.job = j;
1180 j->installed = true;
1181
1182 /* We're fully installed. Now let's free data we don't
1183 * need anymore. */
1184
1185 assert(!j->transaction_next);
1186 assert(!j->transaction_prev);
1187
1188 job_add_to_run_queue(j);
1189 job_add_to_dbus_queue(j);
1190 job_start_timer(j);
1191 }
1192
1193 /* As last step, kill all remaining job dependencies. */
1194 transaction_clean_dependencies(m);
1195
1196 return 0;
1197
1198 rollback:
1199
1200 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1201 if (j->installed)
1202 continue;
1203
1204 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1205 }
1206
1207 return r;
1208 }
1209
1210 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1211 int r;
1212 unsigned generation = 1;
1213
1214 assert(m);
1215
1216 /* This applies the changes recorded in transaction_jobs to
1217 * the actual list of jobs, if possible. */
1218
1219 /* First step: figure out which jobs matter */
1220 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1221
1222 /* Second step: Try not to stop any running services if
1223 * we don't have to. Don't try to reverse running
1224 * jobs if we don't have to. */
1225 transaction_minimize_impact(m);
1226
1227 /* Third step: Drop redundant jobs */
1228 transaction_drop_redundant(m);
1229
1230 for (;;) {
1231 /* Fourth step: Let's remove unneeded jobs that might
1232 * be lurking. */
1233 transaction_collect_garbage(m);
1234
1235 /* Fifth step: verify order makes sense and correct
1236 * cycles if necessary and possible */
1237 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1238 break;
1239
1240 if (r != -EAGAIN) {
1241 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1242 goto rollback;
1243 }
1244
1245 /* Let's see if the resulting transaction ordering
1246 * graph is still cyclic... */
1247 }
1248
1249 for (;;) {
1250 /* Sixth step: let's drop unmergeable entries if
1251 * necessary and possible, merge entries we can
1252 * merge */
1253 if ((r = transaction_merge_jobs(m, e)) >= 0)
1254 break;
1255
1256 if (r != -EAGAIN) {
1257 log_warning("Requested transaction contains unmergable jobs: %s", bus_error(e, r));
1258 goto rollback;
1259 }
1260
1261 /* Seventh step: an entry got dropped, let's garbage
1262 * collect its dependencies. */
1263 transaction_collect_garbage(m);
1264
1265 /* Let's see if the resulting transaction still has
1266 * unmergeable entries ... */
1267 }
1268
1269 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1270 transaction_drop_redundant(m);
1271
1272 /* Ninth step: check whether we can actually apply this */
1273 if (mode == JOB_FAIL)
1274 if ((r = transaction_is_destructive(m, e)) < 0) {
1275 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1276 goto rollback;
1277 }
1278
1279 /* Tenth step: apply changes */
1280 if ((r = transaction_apply(m)) < 0) {
1281 log_warning("Failed to apply transaction: %s", strerror(-r));
1282 goto rollback;
1283 }
1284
1285 assert(hashmap_isempty(m->transaction_jobs));
1286 assert(!m->transaction_anchor);
1287
1288 return 0;
1289
1290 rollback:
1291 transaction_abort(m);
1292 return r;
1293 }
1294
1295 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1296 Job *j, *f;
1297
1298 assert(m);
1299 assert(unit);
1300
1301 /* Looks for an axisting prospective job and returns that. If
1302 * it doesn't exist it is created and added to the prospective
1303 * jobs list. */
1304
1305 f = hashmap_get(m->transaction_jobs, unit);
1306
1307 LIST_FOREACH(transaction, j, f) {
1308 assert(j->unit == unit);
1309
1310 if (j->type == type) {
1311 if (is_new)
1312 *is_new = false;
1313 return j;
1314 }
1315 }
1316
1317 if (unit->meta.job && unit->meta.job->type == type)
1318 j = unit->meta.job;
1319 else if (!(j = job_new(m, type, unit)))
1320 return NULL;
1321
1322 j->generation = 0;
1323 j->marker = NULL;
1324 j->matters_to_anchor = false;
1325 j->override = override;
1326
1327 LIST_PREPEND(Job, transaction, f, j);
1328
1329 if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
1330 job_free(j);
1331 return NULL;
1332 }
1333
1334 if (is_new)
1335 *is_new = true;
1336
1337 log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type));
1338
1339 return j;
1340 }
1341
1342 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1343 assert(m);
1344 assert(j);
1345
1346 if (j->transaction_prev)
1347 j->transaction_prev->transaction_next = j->transaction_next;
1348 else if (j->transaction_next)
1349 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1350 else
1351 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1352
1353 if (j->transaction_next)
1354 j->transaction_next->transaction_prev = j->transaction_prev;
1355
1356 j->transaction_prev = j->transaction_next = NULL;
1357
1358 while (j->subject_list)
1359 job_dependency_free(j->subject_list);
1360
1361 while (j->object_list) {
1362 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1363
1364 job_dependency_free(j->object_list);
1365
1366 if (other && delete_dependencies) {
1367 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1368 other->unit->meta.id, job_type_to_string(other->type),
1369 j->unit->meta.id, job_type_to_string(j->type));
1370 transaction_delete_job(m, other, delete_dependencies);
1371 }
1372 }
1373 }
1374
1375 static int transaction_add_job_and_dependencies(
1376 Manager *m,
1377 JobType type,
1378 Unit *unit,
1379 Job *by,
1380 bool matters,
1381 bool override,
1382 bool conflicts,
1383 DBusError *e,
1384 Job **_ret) {
1385 Job *ret;
1386 Iterator i;
1387 Unit *dep;
1388 int r;
1389 bool is_new;
1390
1391 assert(m);
1392 assert(type < _JOB_TYPE_MAX);
1393 assert(unit);
1394
1395 if (unit->meta.load_state != UNIT_LOADED && unit->meta.load_state != UNIT_FAILED) {
1396 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->meta.id);
1397 return -EINVAL;
1398 }
1399
1400 if (type != JOB_STOP && unit->meta.load_state == UNIT_FAILED) {
1401 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s failed to load: %s. You might find more information in the logs.",
1402 unit->meta.id,
1403 strerror(-unit->meta.load_error));
1404 return -EINVAL;
1405 }
1406
1407 if (!unit_job_is_applicable(unit, type)) {
1408 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1409 return -EBADR;
1410 }
1411
1412 /* First add the job. */
1413 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1414 return -ENOMEM;
1415
1416 /* Then, add a link to the job. */
1417 if (!job_dependency_new(by, ret, matters, conflicts))
1418 return -ENOMEM;
1419
1420 if (is_new) {
1421 /* Finally, recursively add in all dependencies. */
1422 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1423 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1424 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
1425 goto fail;
1426
1427 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1428 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, e, NULL)) < 0 && r != -EBADR) {
1429 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1430 dbus_error_free(e);
1431 }
1432
1433 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1434 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, e, NULL)) < 0) {
1435 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1436 dbus_error_free(e);
1437 }
1438
1439 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1440 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
1441 goto fail;
1442
1443 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1444 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, e, NULL)) < 0 && r != -EBADR) {
1445 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1446 dbus_error_free(e);
1447 }
1448
1449 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1450 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, e, NULL)) < 0 && r != -EBADR)
1451 goto fail;
1452
1453 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTED_BY], i)
1454 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
1455 goto fail;
1456
1457 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1458
1459 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1460 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, e, NULL)) < 0 && r != -EBADR)
1461 goto fail;
1462 }
1463
1464 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1465 }
1466
1467 if (_ret)
1468 *_ret = ret;
1469
1470 return 0;
1471
1472 fail:
1473 return r;
1474 }
1475
1476 static int transaction_add_isolate_jobs(Manager *m) {
1477 Iterator i;
1478 Unit *u;
1479 char *k;
1480 int r;
1481
1482 assert(m);
1483
1484 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1485
1486 /* ignore aliases */
1487 if (u->meta.id != k)
1488 continue;
1489
1490 if (UNIT_VTABLE(u)->no_isolate)
1491 continue;
1492
1493 /* No need to stop inactive jobs */
1494 if (UNIT_IS_INACTIVE_OR_MAINTENANCE(unit_active_state(u)))
1495 continue;
1496
1497 /* Is there already something listed for this? */
1498 if (hashmap_get(m->transaction_jobs, u))
1499 continue;
1500
1501 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, NULL, NULL)) < 0)
1502 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1503 }
1504
1505 return 0;
1506 }
1507
1508 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1509 int r;
1510 Job *ret;
1511
1512 assert(m);
1513 assert(type < _JOB_TYPE_MAX);
1514 assert(unit);
1515 assert(mode < _JOB_MODE_MAX);
1516
1517 if (mode == JOB_ISOLATE && type != JOB_START) {
1518 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1519 return -EINVAL;
1520 }
1521
1522 log_debug("Trying to enqueue job %s/%s", unit->meta.id, job_type_to_string(type));
1523
1524 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false, e, &ret)) < 0) {
1525 transaction_abort(m);
1526 return r;
1527 }
1528
1529 if (mode == JOB_ISOLATE)
1530 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1531 transaction_abort(m);
1532 return r;
1533 }
1534
1535 if ((r = transaction_activate(m, mode, e)) < 0)
1536 return r;
1537
1538 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1539
1540 if (_ret)
1541 *_ret = ret;
1542
1543 return 0;
1544 }
1545
1546 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1547 Unit *unit;
1548 int r;
1549
1550 assert(m);
1551 assert(type < _JOB_TYPE_MAX);
1552 assert(name);
1553 assert(mode < _JOB_MODE_MAX);
1554
1555 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1556 return r;
1557
1558 return manager_add_job(m, type, unit, mode, override, e, _ret);
1559 }
1560
1561 Job *manager_get_job(Manager *m, uint32_t id) {
1562 assert(m);
1563
1564 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1565 }
1566
1567 Unit *manager_get_unit(Manager *m, const char *name) {
1568 assert(m);
1569 assert(name);
1570
1571 return hashmap_get(m->units, name);
1572 }
1573
1574 unsigned manager_dispatch_load_queue(Manager *m) {
1575 Meta *meta;
1576 unsigned n = 0;
1577
1578 assert(m);
1579
1580 /* Make sure we are not run recursively */
1581 if (m->dispatching_load_queue)
1582 return 0;
1583
1584 m->dispatching_load_queue = true;
1585
1586 /* Dispatches the load queue. Takes a unit from the queue and
1587 * tries to load its data until the queue is empty */
1588
1589 while ((meta = m->load_queue)) {
1590 assert(meta->in_load_queue);
1591
1592 unit_load((Unit*) meta);
1593 n++;
1594 }
1595
1596 m->dispatching_load_queue = false;
1597 return n;
1598 }
1599
1600 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1601 Unit *ret;
1602 int r;
1603
1604 assert(m);
1605 assert(name || path);
1606
1607 /* This will prepare the unit for loading, but not actually
1608 * load anything from disk. */
1609
1610 if (path && !is_path(path)) {
1611 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1612 return -EINVAL;
1613 }
1614
1615 if (!name)
1616 name = file_name_from_path(path);
1617
1618 if (!unit_name_is_valid(name)) {
1619 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1620 return -EINVAL;
1621 }
1622
1623 if ((ret = manager_get_unit(m, name))) {
1624 *_ret = ret;
1625 return 1;
1626 }
1627
1628 if (!(ret = unit_new(m)))
1629 return -ENOMEM;
1630
1631 if (path)
1632 if (!(ret->meta.fragment_path = strdup(path))) {
1633 unit_free(ret);
1634 return -ENOMEM;
1635 }
1636
1637 if ((r = unit_add_name(ret, name)) < 0) {
1638 unit_free(ret);
1639 return r;
1640 }
1641
1642 unit_add_to_load_queue(ret);
1643 unit_add_to_dbus_queue(ret);
1644 unit_add_to_gc_queue(ret);
1645
1646 if (_ret)
1647 *_ret = ret;
1648
1649 return 0;
1650 }
1651
1652 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1653 int r;
1654
1655 assert(m);
1656
1657 /* This will load the service information files, but not actually
1658 * start any services or anything. */
1659
1660 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1661 return r;
1662
1663 manager_dispatch_load_queue(m);
1664
1665 if (_ret)
1666 *_ret = unit_follow_merge(*_ret);
1667
1668 return 0;
1669 }
1670
1671 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1672 Iterator i;
1673 Job *j;
1674
1675 assert(s);
1676 assert(f);
1677
1678 HASHMAP_FOREACH(j, s->jobs, i)
1679 job_dump(j, f, prefix);
1680 }
1681
1682 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1683 Iterator i;
1684 Unit *u;
1685 const char *t;
1686
1687 assert(s);
1688 assert(f);
1689
1690 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1691 if (u->meta.id == t)
1692 unit_dump(u, f, prefix);
1693 }
1694
1695 void manager_clear_jobs(Manager *m) {
1696 Job *j;
1697
1698 assert(m);
1699
1700 transaction_abort(m);
1701
1702 while ((j = hashmap_first(m->jobs)))
1703 job_free(j);
1704 }
1705
1706 unsigned manager_dispatch_run_queue(Manager *m) {
1707 Job *j;
1708 unsigned n = 0;
1709
1710 if (m->dispatching_run_queue)
1711 return 0;
1712
1713 m->dispatching_run_queue = true;
1714
1715 while ((j = m->run_queue)) {
1716 assert(j->installed);
1717 assert(j->in_run_queue);
1718
1719 job_run_and_invalidate(j);
1720 n++;
1721 }
1722
1723 m->dispatching_run_queue = false;
1724 return n;
1725 }
1726
1727 unsigned manager_dispatch_dbus_queue(Manager *m) {
1728 Job *j;
1729 Meta *meta;
1730 unsigned n = 0;
1731
1732 assert(m);
1733
1734 if (m->dispatching_dbus_queue)
1735 return 0;
1736
1737 m->dispatching_dbus_queue = true;
1738
1739 while ((meta = m->dbus_unit_queue)) {
1740 assert(meta->in_dbus_queue);
1741
1742 bus_unit_send_change_signal((Unit*) meta);
1743 n++;
1744 }
1745
1746 while ((j = m->dbus_job_queue)) {
1747 assert(j->in_dbus_queue);
1748
1749 bus_job_send_change_signal(j);
1750 n++;
1751 }
1752
1753 m->dispatching_dbus_queue = false;
1754 return n;
1755 }
1756
1757 static int manager_process_notify_fd(Manager *m) {
1758 ssize_t n;
1759
1760 assert(m);
1761
1762 for (;;) {
1763 char buf[4096];
1764 struct msghdr msghdr;
1765 struct iovec iovec;
1766 struct ucred *ucred;
1767 union {
1768 struct cmsghdr cmsghdr;
1769 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1770 } control;
1771 Unit *u;
1772 char **tags;
1773
1774 zero(iovec);
1775 iovec.iov_base = buf;
1776 iovec.iov_len = sizeof(buf)-1;
1777
1778 zero(control);
1779 zero(msghdr);
1780 msghdr.msg_iov = &iovec;
1781 msghdr.msg_iovlen = 1;
1782 msghdr.msg_control = &control;
1783 msghdr.msg_controllen = sizeof(control);
1784
1785 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1786 if (n >= 0)
1787 return -EIO;
1788
1789 if (errno == EAGAIN)
1790 break;
1791
1792 return -errno;
1793 }
1794
1795 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1796 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1797 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1798 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1799 log_warning("Received notify message without credentials. Ignoring.");
1800 continue;
1801 }
1802
1803 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1804
1805 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1806 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1807 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1808 continue;
1809 }
1810
1811 assert((size_t) n < sizeof(buf));
1812 buf[n] = 0;
1813 if (!(tags = strv_split(buf, "\n\r")))
1814 return -ENOMEM;
1815
1816 log_debug("Got notification message for unit %s", u->meta.id);
1817
1818 if (UNIT_VTABLE(u)->notify_message)
1819 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1820
1821 strv_free(tags);
1822 }
1823
1824 return 0;
1825 }
1826
1827 static int manager_dispatch_sigchld(Manager *m) {
1828 assert(m);
1829
1830 for (;;) {
1831 siginfo_t si;
1832 Unit *u;
1833 int r;
1834
1835 zero(si);
1836
1837 /* First we call waitd() for a PID and do not reap the
1838 * zombie. That way we can still access /proc/$PID for
1839 * it while it is a zombie. */
1840 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1841
1842 if (errno == ECHILD)
1843 break;
1844
1845 if (errno == EINTR)
1846 continue;
1847
1848 return -errno;
1849 }
1850
1851 if (si.si_pid <= 0)
1852 break;
1853
1854 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
1855 char *name = NULL;
1856
1857 get_process_name(si.si_pid, &name);
1858 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
1859 free(name);
1860 }
1861
1862 /* Let's flush any message the dying child might still
1863 * have queued for us. This ensures that the process
1864 * still exists in /proc so that we can figure out
1865 * which cgroup and hence unit it belongs to. */
1866 if ((r = manager_process_notify_fd(m)) < 0)
1867 return r;
1868
1869 /* And now figure out the unit this belongs to */
1870 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
1871 u = cgroup_unit_by_pid(m, si.si_pid);
1872
1873 /* And now, we actually reap the zombie. */
1874 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
1875 if (errno == EINTR)
1876 continue;
1877
1878 return -errno;
1879 }
1880
1881 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
1882 continue;
1883
1884 log_debug("Child %lu died (code=%s, status=%i/%s)",
1885 (long unsigned) si.si_pid,
1886 sigchld_code_to_string(si.si_code),
1887 si.si_status,
1888 strna(si.si_code == CLD_EXITED ? exit_status_to_string(si.si_status) : signal_to_string(si.si_status)));
1889
1890 if (!u)
1891 continue;
1892
1893 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
1894
1895 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
1896 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
1897 }
1898
1899 return 0;
1900 }
1901
1902 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
1903 int r;
1904 DBusError error;
1905
1906 dbus_error_init(&error);
1907
1908 log_info("Activating special unit %s", name);
1909
1910 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
1911 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
1912
1913 dbus_error_free(&error);
1914
1915 return r;
1916 }
1917
1918 static int manager_process_signal_fd(Manager *m) {
1919 ssize_t n;
1920 struct signalfd_siginfo sfsi;
1921 bool sigchld = false;
1922
1923 assert(m);
1924
1925 for (;;) {
1926 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
1927
1928 if (n >= 0)
1929 return -EIO;
1930
1931 if (errno == EAGAIN)
1932 break;
1933
1934 return -errno;
1935 }
1936
1937 log_debug("Received SIG%s", strna(signal_to_string(sfsi.ssi_signo)));
1938
1939 switch (sfsi.ssi_signo) {
1940
1941 case SIGCHLD:
1942 sigchld = true;
1943 break;
1944
1945 case SIGTERM:
1946 if (m->running_as == MANAGER_SYSTEM) {
1947 /* This is for compatibility with the
1948 * original sysvinit */
1949 m->exit_code = MANAGER_REEXECUTE;
1950 break;
1951 }
1952
1953 /* Fall through */
1954
1955 case SIGINT:
1956 if (m->running_as == MANAGER_SYSTEM) {
1957 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
1958 break;
1959 }
1960
1961 /* Run the exit target if there is one, if not, just exit. */
1962 if (manager_start_target(m, SPECIAL_EXIT_SERVICE, JOB_REPLACE) < 0) {
1963 m->exit_code = MANAGER_EXIT;
1964 return 0;
1965 }
1966
1967 break;
1968
1969 case SIGWINCH:
1970 if (m->running_as == MANAGER_SYSTEM)
1971 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
1972
1973 /* This is a nop on non-init */
1974 break;
1975
1976 case SIGPWR:
1977 if (m->running_as == MANAGER_SYSTEM)
1978 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
1979
1980 /* This is a nop on non-init */
1981 break;
1982
1983 case SIGUSR1: {
1984 Unit *u;
1985
1986 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
1987
1988 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
1989 log_info("Trying to reconnect to bus...");
1990 bus_init(m);
1991 }
1992
1993 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
1994 log_info("Loading D-Bus service...");
1995 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
1996 }
1997
1998 break;
1999 }
2000
2001 case SIGUSR2: {
2002 FILE *f;
2003 char *dump = NULL;
2004 size_t size;
2005
2006 if (!(f = open_memstream(&dump, &size))) {
2007 log_warning("Failed to allocate memory stream.");
2008 break;
2009 }
2010
2011 manager_dump_units(m, f, "\t");
2012 manager_dump_jobs(m, f, "\t");
2013
2014 if (ferror(f)) {
2015 fclose(f);
2016 free(dump);
2017 log_warning("Failed to write status stream");
2018 break;
2019 }
2020
2021 fclose(f);
2022 log_dump(LOG_INFO, dump);
2023 free(dump);
2024
2025 break;
2026 }
2027
2028 case SIGHUP:
2029 m->exit_code = MANAGER_RELOAD;
2030 break;
2031
2032 default: {
2033 static const char * const table[] = {
2034 [0] = SPECIAL_DEFAULT_TARGET,
2035 [1] = SPECIAL_RESCUE_TARGET,
2036 [2] = SPECIAL_EMERGENCY_TARGET,
2037 [3] = SPECIAL_HALT_TARGET,
2038 [4] = SPECIAL_POWEROFF_TARGET,
2039 [5] = SPECIAL_REBOOT_TARGET
2040 };
2041
2042 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2043 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(table)) {
2044 manager_start_target(m, table[sfsi.ssi_signo - SIGRTMIN],
2045 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
2046 break;
2047 }
2048
2049 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
2050 }
2051 }
2052 }
2053
2054 if (sigchld)
2055 return manager_dispatch_sigchld(m);
2056
2057 return 0;
2058 }
2059
2060 static int process_event(Manager *m, struct epoll_event *ev) {
2061 int r;
2062 Watch *w;
2063
2064 assert(m);
2065 assert(ev);
2066
2067 assert(w = ev->data.ptr);
2068
2069 switch (w->type) {
2070
2071 case WATCH_SIGNAL:
2072
2073 /* An incoming signal? */
2074 if (ev->events != EPOLLIN)
2075 return -EINVAL;
2076
2077 if ((r = manager_process_signal_fd(m)) < 0)
2078 return r;
2079
2080 break;
2081
2082 case WATCH_NOTIFY:
2083
2084 /* An incoming daemon notification event? */
2085 if (ev->events != EPOLLIN)
2086 return -EINVAL;
2087
2088 if ((r = manager_process_notify_fd(m)) < 0)
2089 return r;
2090
2091 break;
2092
2093 case WATCH_FD:
2094
2095 /* Some fd event, to be dispatched to the units */
2096 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2097 break;
2098
2099 case WATCH_UNIT_TIMER:
2100 case WATCH_JOB_TIMER: {
2101 uint64_t v;
2102 ssize_t k;
2103
2104 /* Some timer event, to be dispatched to the units */
2105 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2106
2107 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2108 break;
2109
2110 return k < 0 ? -errno : -EIO;
2111 }
2112
2113 if (w->type == WATCH_UNIT_TIMER)
2114 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2115 else
2116 job_timer_event(w->data.job, v, w);
2117 break;
2118 }
2119
2120 case WATCH_MOUNT:
2121 /* Some mount table change, intended for the mount subsystem */
2122 mount_fd_event(m, ev->events);
2123 break;
2124
2125 case WATCH_UDEV:
2126 /* Some notification from udev, intended for the device subsystem */
2127 device_fd_event(m, ev->events);
2128 break;
2129
2130 case WATCH_DBUS_WATCH:
2131 bus_watch_event(m, w, ev->events);
2132 break;
2133
2134 case WATCH_DBUS_TIMEOUT:
2135 bus_timeout_event(m, w, ev->events);
2136 break;
2137
2138 default:
2139 assert_not_reached("Unknown epoll event type.");
2140 }
2141
2142 return 0;
2143 }
2144
2145 int manager_loop(Manager *m) {
2146 int r;
2147
2148 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 1000);
2149
2150 assert(m);
2151 m->exit_code = MANAGER_RUNNING;
2152
2153 /* Release the path cache */
2154 set_free_free(m->unit_path_cache);
2155 m->unit_path_cache = NULL;
2156
2157 /* There might still be some zombies hanging around from
2158 * before we were exec()'ed. Leat's reap them */
2159 if ((r = manager_dispatch_sigchld(m)) < 0)
2160 return r;
2161
2162 while (m->exit_code == MANAGER_RUNNING) {
2163 struct epoll_event event;
2164 int n;
2165
2166 if (!ratelimit_test(&rl)) {
2167 /* Yay, something is going seriously wrong, pause a little */
2168 log_warning("Looping too fast. Throttling execution a little.");
2169 sleep(1);
2170 }
2171
2172 if (manager_dispatch_load_queue(m) > 0)
2173 continue;
2174
2175 if (manager_dispatch_run_queue(m) > 0)
2176 continue;
2177
2178 if (bus_dispatch(m) > 0)
2179 continue;
2180
2181 if (manager_dispatch_cleanup_queue(m) > 0)
2182 continue;
2183
2184 if (manager_dispatch_gc_queue(m) > 0)
2185 continue;
2186
2187 if (manager_dispatch_dbus_queue(m) > 0)
2188 continue;
2189
2190 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2191
2192 if (errno == EINTR)
2193 continue;
2194
2195 return -errno;
2196 }
2197
2198 assert(n == 1);
2199
2200 if ((r = process_event(m, &event)) < 0)
2201 return r;
2202 }
2203
2204 return m->exit_code;
2205 }
2206
2207 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2208 char *n;
2209 Unit *u;
2210
2211 assert(m);
2212 assert(s);
2213 assert(_u);
2214
2215 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2216 return -EINVAL;
2217
2218 if (!(n = bus_path_unescape(s+31)))
2219 return -ENOMEM;
2220
2221 u = manager_get_unit(m, n);
2222 free(n);
2223
2224 if (!u)
2225 return -ENOENT;
2226
2227 *_u = u;
2228
2229 return 0;
2230 }
2231
2232 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2233 Job *j;
2234 unsigned id;
2235 int r;
2236
2237 assert(m);
2238 assert(s);
2239 assert(_j);
2240
2241 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2242 return -EINVAL;
2243
2244 if ((r = safe_atou(s + 30, &id)) < 0)
2245 return r;
2246
2247 if (!(j = manager_get_job(m, id)))
2248 return -ENOENT;
2249
2250 *_j = j;
2251
2252 return 0;
2253 }
2254
2255 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2256
2257 #ifdef HAVE_AUDIT
2258 char *p;
2259
2260 if (m->audit_fd < 0)
2261 return;
2262
2263 /* Don't generate audit events if the service was already
2264 * started and we're just deserializing */
2265 if (m->n_deserializing > 0)
2266 return;
2267
2268 if (!(p = unit_name_to_prefix_and_instance(u->meta.id))) {
2269 log_error("Failed to allocate unit name for audit message: %s", strerror(ENOMEM));
2270 return;
2271 }
2272
2273 if (audit_log_user_comm_message(m->audit_fd, type, "", p, NULL, NULL, NULL, success) < 0)
2274 log_error("Failed to send audit message: %m");
2275
2276 free(p);
2277 #endif
2278
2279 }
2280
2281 void manager_dispatch_bus_name_owner_changed(
2282 Manager *m,
2283 const char *name,
2284 const char* old_owner,
2285 const char *new_owner) {
2286
2287 Unit *u;
2288
2289 assert(m);
2290 assert(name);
2291
2292 if (!(u = hashmap_get(m->watch_bus, name)))
2293 return;
2294
2295 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2296 }
2297
2298 void manager_dispatch_bus_query_pid_done(
2299 Manager *m,
2300 const char *name,
2301 pid_t pid) {
2302
2303 Unit *u;
2304
2305 assert(m);
2306 assert(name);
2307 assert(pid >= 1);
2308
2309 if (!(u = hashmap_get(m->watch_bus, name)))
2310 return;
2311
2312 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2313 }
2314
2315 int manager_open_serialization(Manager *m, FILE **_f) {
2316 char *path;
2317 mode_t saved_umask;
2318 int fd;
2319 FILE *f;
2320
2321 assert(_f);
2322
2323 if (m->running_as == MANAGER_SYSTEM) {
2324 mkdir_p("/dev/.systemd", 0755);
2325
2326 if (asprintf(&path, "/dev/.systemd/dump-%lu-XXXXXX", (unsigned long) getpid()) < 0)
2327 return -ENOMEM;
2328 } else {
2329 if (asprintf(&path, "/tmp/systemd-dump-%lu-XXXXXX", (unsigned long) getpid()) < 0)
2330 return -ENOMEM;
2331 }
2332
2333 saved_umask = umask(0077);
2334 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2335 umask(saved_umask);
2336
2337 if (fd < 0) {
2338 free(path);
2339 return -errno;
2340 }
2341
2342 unlink(path);
2343
2344 log_debug("Serializing state to %s", path);
2345 free(path);
2346
2347 if (!(f = fdopen(fd, "w+")) < 0)
2348 return -errno;
2349
2350 *_f = f;
2351
2352 return 0;
2353 }
2354
2355 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2356 Iterator i;
2357 Unit *u;
2358 const char *t;
2359 int r;
2360
2361 assert(m);
2362 assert(f);
2363 assert(fds);
2364
2365 fprintf(f, "startup-timestamp=%llu %llu\n\n",
2366 (unsigned long long) m->startup_timestamp.realtime,
2367 (unsigned long long) m->startup_timestamp.monotonic);
2368
2369 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2370 if (u->meta.id != t)
2371 continue;
2372
2373 if (!unit_can_serialize(u))
2374 continue;
2375
2376 /* Start marker */
2377 fputs(u->meta.id, f);
2378 fputc('\n', f);
2379
2380 if ((r = unit_serialize(u, f, fds)) < 0)
2381 return r;
2382 }
2383
2384 if (ferror(f))
2385 return -EIO;
2386
2387 return 0;
2388 }
2389
2390 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2391 int r = 0;
2392
2393 assert(m);
2394 assert(f);
2395
2396 log_debug("Deserializing state...");
2397
2398 m->n_deserializing ++;
2399
2400 for (;;) {
2401 char line[1024], *l;
2402
2403 if (!fgets(line, sizeof(line), f)) {
2404 if (feof(f))
2405 r = 0;
2406 else
2407 r = -errno;
2408
2409 goto finish;
2410 }
2411
2412 char_array_0(line);
2413 l = strstrip(line);
2414
2415 if (l[0] == 0)
2416 break;
2417
2418 if (startswith(l, "startup-timestamp=")) {
2419 unsigned long long a, b;
2420
2421 if (sscanf(l+18, "%lli %llu", &a, &b) != 2)
2422 log_debug("Failed to parse startup timestamp value %s", l+18);
2423 else {
2424 m->startup_timestamp.realtime = a;
2425 m->startup_timestamp.monotonic = b;
2426 }
2427 } else
2428 log_debug("Unknown serialization item '%s'", l);
2429 }
2430
2431 for (;;) {
2432 Unit *u;
2433 char name[UNIT_NAME_MAX+2];
2434
2435 /* Start marker */
2436 if (!fgets(name, sizeof(name), f)) {
2437 if (feof(f))
2438 r = 0;
2439 else
2440 r = -errno;
2441
2442 goto finish;
2443 }
2444
2445 char_array_0(name);
2446
2447 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2448 goto finish;
2449
2450 if ((r = unit_deserialize(u, f, fds)) < 0)
2451 goto finish;
2452 }
2453
2454 finish:
2455 if (ferror(f)) {
2456 r = -EIO;
2457 goto finish;
2458 }
2459
2460 assert(m->n_deserializing > 0);
2461 m->n_deserializing --;
2462
2463 return r;
2464 }
2465
2466 int manager_reload(Manager *m) {
2467 int r, q;
2468 FILE *f;
2469 FDSet *fds;
2470
2471 assert(m);
2472
2473 if ((r = manager_open_serialization(m, &f)) < 0)
2474 return r;
2475
2476 if (!(fds = fdset_new())) {
2477 r = -ENOMEM;
2478 goto finish;
2479 }
2480
2481 if ((r = manager_serialize(m, f, fds)) < 0)
2482 goto finish;
2483
2484 if (fseeko(f, 0, SEEK_SET) < 0) {
2485 r = -errno;
2486 goto finish;
2487 }
2488
2489 /* From here on there is no way back. */
2490 manager_clear_jobs_and_units(m);
2491
2492 /* Find new unit paths */
2493 lookup_paths_free(&m->lookup_paths);
2494 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2495 r = q;
2496
2497 m->n_deserializing ++;
2498
2499 /* First, enumerate what we can from all config files */
2500 if ((q = manager_enumerate(m)) < 0)
2501 r = q;
2502
2503 /* Second, deserialize our stored data */
2504 if ((q = manager_deserialize(m, f, fds)) < 0)
2505 r = q;
2506
2507 fclose(f);
2508 f = NULL;
2509
2510 /* Third, fire things up! */
2511 if ((q = manager_coldplug(m)) < 0)
2512 r = q;
2513
2514 assert(m->n_deserializing > 0);
2515 m->n_deserializing ++;
2516
2517 finish:
2518 if (f)
2519 fclose(f);
2520
2521 if (fds)
2522 fdset_free(fds);
2523
2524 return r;
2525 }
2526
2527 bool manager_is_booting_or_shutting_down(Manager *m) {
2528 Unit *u;
2529
2530 assert(m);
2531
2532 /* Is the initial job still around? */
2533 if (manager_get_job(m, 1))
2534 return true;
2535
2536 /* Is there a job for the shutdown target? */
2537 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2538 return !!u->meta.job;
2539
2540 return false;
2541 }
2542
2543 void manager_reset_maintenance(Manager *m) {
2544 Unit *u;
2545 Iterator i;
2546
2547 assert(m);
2548
2549 HASHMAP_FOREACH(u, m->units, i)
2550 unit_reset_maintenance(u);
2551 }
2552
2553 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
2554 [MANAGER_SYSTEM] = "system",
2555 [MANAGER_SESSION] = "session"
2556 };
2557
2558 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);