]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/manager.c
socket: pass minimal abstract socket names
[thirdparty/systemd.git] / src / manager.c
1 /*-*- Mode: C; c-basic-offset: 8 -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <assert.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <sys/epoll.h>
26 #include <signal.h>
27 #include <sys/signalfd.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <utmpx.h>
31 #include <sys/poll.h>
32 #include <sys/reboot.h>
33 #include <sys/ioctl.h>
34 #include <linux/kd.h>
35 #include <termios.h>
36 #include <fcntl.h>
37 #include <sys/types.h>
38 #include <sys/stat.h>
39 #include <dirent.h>
40
41 #include "manager.h"
42 #include "hashmap.h"
43 #include "macro.h"
44 #include "strv.h"
45 #include "log.h"
46 #include "util.h"
47 #include "ratelimit.h"
48 #include "cgroup.h"
49 #include "mount-setup.h"
50 #include "utmp-wtmp.h"
51 #include "unit-name.h"
52 #include "dbus-unit.h"
53 #include "dbus-job.h"
54 #include "missing.h"
55 #include "path-lookup.h"
56 #include "special.h"
57 #include "bus-errors.h"
58
59 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
60 #define GC_QUEUE_ENTRIES_MAX 16
61
62 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
63 #define GC_QUEUE_USEC_MAX (10*USEC_PER_SEC)
64
65 /* Where clients shall send notification messages to */
66 #define NOTIFY_SOCKET "/org/freedesktop/systemd1/notify"
67
68 static int manager_setup_notify(Manager *m) {
69 union {
70 struct sockaddr sa;
71 struct sockaddr_un un;
72 } sa;
73 struct epoll_event ev;
74 int one = 1;
75
76 assert(m);
77
78 m->notify_watch.type = WATCH_NOTIFY;
79 if ((m->notify_watch.fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0)) < 0) {
80 log_error("Failed to allocate notification socket: %m");
81 return -errno;
82 }
83
84 zero(sa);
85 sa.sa.sa_family = AF_UNIX;
86
87 if (getpid() != 1)
88 snprintf(sa.un.sun_path+1, sizeof(sa.un.sun_path)-1, NOTIFY_SOCKET "/%llu", random_ull());
89 else
90 strncpy(sa.un.sun_path+1, NOTIFY_SOCKET, sizeof(sa.un.sun_path)-1);
91
92 if (bind(m->notify_watch.fd, &sa.sa, sizeof(sa_family_t) + 1 + strlen(sa.un.sun_path+1)) < 0) {
93 log_error("bind() failed: %m");
94 return -errno;
95 }
96
97 if (setsockopt(m->notify_watch.fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)) < 0) {
98 log_error("SO_PASSCRED failed: %m");
99 return -errno;
100 }
101
102 zero(ev);
103 ev.events = EPOLLIN;
104 ev.data.ptr = &m->notify_watch;
105
106 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->notify_watch.fd, &ev) < 0)
107 return -errno;
108
109 if (!(m->notify_socket = strdup(sa.un.sun_path+1)))
110 return -ENOMEM;
111
112 return 0;
113 }
114
115 static int enable_special_signals(Manager *m) {
116 char fd;
117
118 assert(m);
119
120 /* Enable that we get SIGINT on control-alt-del */
121 if (reboot(RB_DISABLE_CAD) < 0)
122 log_warning("Failed to enable ctrl-alt-del handling: %m");
123
124 if ((fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY)) < 0)
125 log_warning("Failed to open /dev/tty0: %m");
126 else {
127 /* Enable that we get SIGWINCH on kbrequest */
128 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
129 log_warning("Failed to enable kbrequest handling: %s", strerror(errno));
130
131 close_nointr_nofail(fd);
132 }
133
134 return 0;
135 }
136
137 static int manager_setup_signals(Manager *m) {
138 sigset_t mask;
139 struct epoll_event ev;
140 struct sigaction sa;
141
142 assert(m);
143
144 /* We are not interested in SIGSTOP and friends. */
145 zero(sa);
146 sa.sa_handler = SIG_DFL;
147 sa.sa_flags = SA_NOCLDSTOP|SA_RESTART;
148 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
149
150 assert_se(sigemptyset(&mask) == 0);
151
152 sigset_add_many(&mask,
153 SIGCHLD, /* Child died */
154 SIGTERM, /* Reexecute daemon */
155 SIGHUP, /* Reload configuration */
156 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
157 SIGUSR2, /* systemd: dump status */
158 SIGINT, /* Kernel sends us this on control-alt-del */
159 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
160 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
161 SIGRTMIN+0, /* systemd: start default.target */
162 SIGRTMIN+1, /* systemd: start rescue.target */
163 SIGRTMIN+2, /* systemd: isolate emergency.target */
164 SIGRTMIN+3, /* systemd: start halt.target */
165 SIGRTMIN+4, /* systemd: start poweroff.target */
166 SIGRTMIN+5, /* systemd: start reboot.target */
167 -1);
168 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
169
170 m->signal_watch.type = WATCH_SIGNAL;
171 if ((m->signal_watch.fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC)) < 0)
172 return -errno;
173
174 zero(ev);
175 ev.events = EPOLLIN;
176 ev.data.ptr = &m->signal_watch;
177
178 if (epoll_ctl(m->epoll_fd, EPOLL_CTL_ADD, m->signal_watch.fd, &ev) < 0)
179 return -errno;
180
181 if (m->running_as == MANAGER_SYSTEM)
182 return enable_special_signals(m);
183
184 return 0;
185 }
186
187 int manager_new(ManagerRunningAs running_as, Manager **_m) {
188 Manager *m;
189 int r = -ENOMEM;
190
191 assert(_m);
192 assert(running_as >= 0);
193 assert(running_as < _MANAGER_RUNNING_AS_MAX);
194
195 if (!(m = new0(Manager, 1)))
196 return -ENOMEM;
197
198 dual_timestamp_get(&m->startup_timestamp);
199
200 m->running_as = running_as;
201 m->name_data_slot = m->subscribed_data_slot = -1;
202 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
203 m->pin_cgroupfs_fd = -1;
204
205 m->signal_watch.fd = m->mount_watch.fd = m->udev_watch.fd = m->epoll_fd = m->dev_autofs_fd = -1;
206 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
207
208 if (!(m->environment = strv_copy(environ)))
209 goto fail;
210
211 if (!(m->units = hashmap_new(string_hash_func, string_compare_func)))
212 goto fail;
213
214 if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
215 goto fail;
216
217 if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
218 goto fail;
219
220 if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
221 goto fail;
222
223 if (!(m->cgroup_bondings = hashmap_new(string_hash_func, string_compare_func)))
224 goto fail;
225
226 if (!(m->watch_bus = hashmap_new(string_hash_func, string_compare_func)))
227 goto fail;
228
229 if ((m->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0)
230 goto fail;
231
232 if ((r = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
233 goto fail;
234
235 if ((r = manager_setup_signals(m)) < 0)
236 goto fail;
237
238 if ((r = manager_setup_cgroup(m)) < 0)
239 goto fail;
240
241 if ((r = manager_setup_notify(m)) < 0)
242 goto fail;
243
244 /* Try to connect to the busses, if possible. */
245 if ((r = bus_init(m)) < 0)
246 goto fail;
247
248 *_m = m;
249 return 0;
250
251 fail:
252 manager_free(m);
253 return r;
254 }
255
256 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
257 Meta *meta;
258 unsigned n = 0;
259
260 assert(m);
261
262 while ((meta = m->cleanup_queue)) {
263 assert(meta->in_cleanup_queue);
264
265 unit_free((Unit*) meta);
266 n++;
267 }
268
269 return n;
270 }
271
272 enum {
273 GC_OFFSET_IN_PATH, /* This one is on the path we were travelling */
274 GC_OFFSET_UNSURE, /* No clue */
275 GC_OFFSET_GOOD, /* We still need this unit */
276 GC_OFFSET_BAD, /* We don't need this unit anymore */
277 _GC_OFFSET_MAX
278 };
279
280 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
281 Iterator i;
282 Unit *other;
283 bool is_bad;
284
285 assert(u);
286
287 if (u->meta.gc_marker == gc_marker + GC_OFFSET_GOOD ||
288 u->meta.gc_marker == gc_marker + GC_OFFSET_BAD ||
289 u->meta.gc_marker == gc_marker + GC_OFFSET_IN_PATH)
290 return;
291
292 if (u->meta.in_cleanup_queue)
293 goto bad;
294
295 if (unit_check_gc(u))
296 goto good;
297
298 u->meta.gc_marker = gc_marker + GC_OFFSET_IN_PATH;
299
300 is_bad = true;
301
302 SET_FOREACH(other, u->meta.dependencies[UNIT_REFERENCED_BY], i) {
303 unit_gc_sweep(other, gc_marker);
304
305 if (other->meta.gc_marker == gc_marker + GC_OFFSET_GOOD)
306 goto good;
307
308 if (other->meta.gc_marker != gc_marker + GC_OFFSET_BAD)
309 is_bad = false;
310 }
311
312 if (is_bad)
313 goto bad;
314
315 /* We were unable to find anything out about this entry, so
316 * let's investigate it later */
317 u->meta.gc_marker = gc_marker + GC_OFFSET_UNSURE;
318 unit_add_to_gc_queue(u);
319 return;
320
321 bad:
322 /* We definitely know that this one is not useful anymore, so
323 * let's mark it for deletion */
324 u->meta.gc_marker = gc_marker + GC_OFFSET_BAD;
325 unit_add_to_cleanup_queue(u);
326 return;
327
328 good:
329 u->meta.gc_marker = gc_marker + GC_OFFSET_GOOD;
330 }
331
332 static unsigned manager_dispatch_gc_queue(Manager *m) {
333 Meta *meta;
334 unsigned n = 0;
335 unsigned gc_marker;
336
337 assert(m);
338
339 if ((m->n_in_gc_queue < GC_QUEUE_ENTRIES_MAX) &&
340 (m->gc_queue_timestamp <= 0 ||
341 (m->gc_queue_timestamp + GC_QUEUE_USEC_MAX) > now(CLOCK_MONOTONIC)))
342 return 0;
343
344 log_debug("Running GC...");
345
346 m->gc_marker += _GC_OFFSET_MAX;
347 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
348 m->gc_marker = 1;
349
350 gc_marker = m->gc_marker;
351
352 while ((meta = m->gc_queue)) {
353 assert(meta->in_gc_queue);
354
355 unit_gc_sweep((Unit*) meta, gc_marker);
356
357 LIST_REMOVE(Meta, gc_queue, m->gc_queue, meta);
358 meta->in_gc_queue = false;
359
360 n++;
361
362 if (meta->gc_marker == gc_marker + GC_OFFSET_BAD ||
363 meta->gc_marker == gc_marker + GC_OFFSET_UNSURE) {
364 log_debug("Collecting %s", meta->id);
365 meta->gc_marker = gc_marker + GC_OFFSET_BAD;
366 unit_add_to_cleanup_queue((Unit*) meta);
367 }
368 }
369
370 m->n_in_gc_queue = 0;
371 m->gc_queue_timestamp = 0;
372
373 return n;
374 }
375
376 static void manager_clear_jobs_and_units(Manager *m) {
377 Job *j;
378 Unit *u;
379
380 assert(m);
381
382 while ((j = hashmap_first(m->transaction_jobs)))
383 job_free(j);
384
385 while ((u = hashmap_first(m->units)))
386 unit_free(u);
387
388 manager_dispatch_cleanup_queue(m);
389
390 assert(!m->load_queue);
391 assert(!m->run_queue);
392 assert(!m->dbus_unit_queue);
393 assert(!m->dbus_job_queue);
394 assert(!m->cleanup_queue);
395 assert(!m->gc_queue);
396
397 assert(hashmap_isempty(m->transaction_jobs));
398 assert(hashmap_isempty(m->jobs));
399 assert(hashmap_isempty(m->units));
400 }
401
402 void manager_free(Manager *m) {
403 UnitType c;
404
405 assert(m);
406
407 manager_clear_jobs_and_units(m);
408
409 for (c = 0; c < _UNIT_TYPE_MAX; c++)
410 if (unit_vtable[c]->shutdown)
411 unit_vtable[c]->shutdown(m);
412
413 /* If we reexecute ourselves, we keep the root cgroup
414 * around */
415 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
416
417 bus_done(m);
418
419 hashmap_free(m->units);
420 hashmap_free(m->jobs);
421 hashmap_free(m->transaction_jobs);
422 hashmap_free(m->watch_pids);
423 hashmap_free(m->watch_bus);
424
425 if (m->epoll_fd >= 0)
426 close_nointr_nofail(m->epoll_fd);
427 if (m->signal_watch.fd >= 0)
428 close_nointr_nofail(m->signal_watch.fd);
429 if (m->notify_watch.fd >= 0)
430 close_nointr_nofail(m->notify_watch.fd);
431
432 free(m->notify_socket);
433
434 lookup_paths_free(&m->lookup_paths);
435 strv_free(m->environment);
436
437 hashmap_free(m->cgroup_bondings);
438 set_free_free(m->unit_path_cache);
439
440 free(m);
441 }
442
443 int manager_enumerate(Manager *m) {
444 int r = 0, q;
445 UnitType c;
446
447 assert(m);
448
449 /* Let's ask every type to load all units from disk/kernel
450 * that it might know */
451 for (c = 0; c < _UNIT_TYPE_MAX; c++)
452 if (unit_vtable[c]->enumerate)
453 if ((q = unit_vtable[c]->enumerate(m)) < 0)
454 r = q;
455
456 manager_dispatch_load_queue(m);
457 return r;
458 }
459
460 int manager_coldplug(Manager *m) {
461 int r = 0, q;
462 Iterator i;
463 Unit *u;
464 char *k;
465
466 assert(m);
467
468 /* Then, let's set up their initial state. */
469 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
470
471 /* ignore aliases */
472 if (u->meta.id != k)
473 continue;
474
475 if ((q = unit_coldplug(u)) < 0)
476 r = q;
477 }
478
479 return r;
480 }
481
482 static void manager_build_unit_path_cache(Manager *m) {
483 char **i;
484 DIR *d = NULL;
485 int r;
486
487 assert(m);
488
489 set_free_free(m->unit_path_cache);
490
491 if (!(m->unit_path_cache = set_new(string_hash_func, string_compare_func))) {
492 log_error("Failed to allocate unit path cache.");
493 return;
494 }
495
496 /* This simply builds a list of files we know exist, so that
497 * we don't always have to go to disk */
498
499 STRV_FOREACH(i, m->lookup_paths.unit_path) {
500 struct dirent *de;
501
502 if (!(d = opendir(*i))) {
503 log_error("Failed to open directory: %m");
504 continue;
505 }
506
507 while ((de = readdir(d))) {
508 char *p;
509
510 if (ignore_file(de->d_name))
511 continue;
512
513 if (asprintf(&p, "%s/%s", streq(*i, "/") ? "" : *i, de->d_name) < 0) {
514 r = -ENOMEM;
515 goto fail;
516 }
517
518 if ((r = set_put(m->unit_path_cache, p)) < 0) {
519 free(p);
520 goto fail;
521 }
522 }
523
524 closedir(d);
525 d = NULL;
526 }
527
528 return;
529
530 fail:
531 log_error("Failed to build unit path cache: %s", strerror(-r));
532
533 set_free_free(m->unit_path_cache);
534 m->unit_path_cache = NULL;
535
536 if (d)
537 closedir(d);
538 }
539
540 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
541 int r, q;
542
543 assert(m);
544
545 manager_build_unit_path_cache(m);
546
547 /* First, enumerate what we can from all config files */
548 r = manager_enumerate(m);
549
550 /* Second, deserialize if there is something to deserialize */
551 if (serialization)
552 if ((q = manager_deserialize(m, serialization, fds)) < 0)
553 r = q;
554
555 /* Third, fire things up! */
556 if ((q = manager_coldplug(m)) < 0)
557 r = q;
558
559 /* Now that the initial devices are available, let's see if we
560 * can write the utmp file */
561 manager_write_utmp_reboot(m);
562
563 return r;
564 }
565
566 static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
567 assert(m);
568 assert(j);
569
570 /* Deletes one job from the transaction */
571
572 manager_transaction_unlink_job(m, j, delete_dependencies);
573
574 if (!j->installed)
575 job_free(j);
576 }
577
578 static void transaction_delete_unit(Manager *m, Unit *u) {
579 Job *j;
580
581 /* Deletes all jobs associated with a certain unit from the
582 * transaction */
583
584 while ((j = hashmap_get(m->transaction_jobs, u)))
585 transaction_delete_job(m, j, true);
586 }
587
588 static void transaction_clean_dependencies(Manager *m) {
589 Iterator i;
590 Job *j;
591
592 assert(m);
593
594 /* Drops all dependencies of all installed jobs */
595
596 HASHMAP_FOREACH(j, m->jobs, i) {
597 while (j->subject_list)
598 job_dependency_free(j->subject_list);
599 while (j->object_list)
600 job_dependency_free(j->object_list);
601 }
602
603 assert(!m->transaction_anchor);
604 }
605
606 static void transaction_abort(Manager *m) {
607 Job *j;
608
609 assert(m);
610
611 while ((j = hashmap_first(m->transaction_jobs)))
612 if (j->installed)
613 transaction_delete_job(m, j, true);
614 else
615 job_free(j);
616
617 assert(hashmap_isempty(m->transaction_jobs));
618
619 transaction_clean_dependencies(m);
620 }
621
622 static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
623 JobDependency *l;
624
625 assert(m);
626
627 /* A recursive sweep through the graph that marks all units
628 * that matter to the anchor job, i.e. are directly or
629 * indirectly a dependency of the anchor job via paths that
630 * are fully marked as mattering. */
631
632 if (j)
633 l = j->subject_list;
634 else
635 l = m->transaction_anchor;
636
637 LIST_FOREACH(subject, l, l) {
638
639 /* This link does not matter */
640 if (!l->matters)
641 continue;
642
643 /* This unit has already been marked */
644 if (l->object->generation == generation)
645 continue;
646
647 l->object->matters_to_anchor = true;
648 l->object->generation = generation;
649
650 transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
651 }
652 }
653
654 static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
655 JobDependency *l, *last;
656
657 assert(j);
658 assert(other);
659 assert(j->unit == other->unit);
660 assert(!j->installed);
661
662 /* Merges 'other' into 'j' and then deletes j. */
663
664 j->type = t;
665 j->state = JOB_WAITING;
666 j->override = j->override || other->override;
667
668 j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
669
670 /* Patch us in as new owner of the JobDependency objects */
671 last = NULL;
672 LIST_FOREACH(subject, l, other->subject_list) {
673 assert(l->subject == other);
674 l->subject = j;
675 last = l;
676 }
677
678 /* Merge both lists */
679 if (last) {
680 last->subject_next = j->subject_list;
681 if (j->subject_list)
682 j->subject_list->subject_prev = last;
683 j->subject_list = other->subject_list;
684 }
685
686 /* Patch us in as new owner of the JobDependency objects */
687 last = NULL;
688 LIST_FOREACH(object, l, other->object_list) {
689 assert(l->object == other);
690 l->object = j;
691 last = l;
692 }
693
694 /* Merge both lists */
695 if (last) {
696 last->object_next = j->object_list;
697 if (j->object_list)
698 j->object_list->object_prev = last;
699 j->object_list = other->object_list;
700 }
701
702 /* Kill the other job */
703 other->subject_list = NULL;
704 other->object_list = NULL;
705 transaction_delete_job(m, other, true);
706 }
707
708 static int delete_one_unmergeable_job(Manager *m, Job *j) {
709 Job *k;
710
711 assert(j);
712
713 /* Tries to delete one item in the linked list
714 * j->transaction_next->transaction_next->... that conflicts
715 * whith another one, in an attempt to make an inconsistent
716 * transaction work. */
717
718 /* We rely here on the fact that if a merged with b does not
719 * merge with c, either a or b merge with c neither */
720 LIST_FOREACH(transaction, j, j)
721 LIST_FOREACH(transaction, k, j->transaction_next) {
722 Job *d;
723
724 /* Is this one mergeable? Then skip it */
725 if (job_type_is_mergeable(j->type, k->type))
726 continue;
727
728 /* Ok, we found two that conflict, let's see if we can
729 * drop one of them */
730 if (!j->matters_to_anchor)
731 d = j;
732 else if (!k->matters_to_anchor)
733 d = k;
734 else
735 return -ENOEXEC;
736
737 /* Ok, we can drop one, so let's do so. */
738 log_notice("Trying to fix job merging by deleting job %s/%s", d->unit->meta.id, job_type_to_string(d->type));
739 transaction_delete_job(m, d, true);
740 return 0;
741 }
742
743 return -EINVAL;
744 }
745
746 static int transaction_merge_jobs(Manager *m, DBusError *e) {
747 Job *j;
748 Iterator i;
749 int r;
750
751 assert(m);
752
753 /* First step, check whether any of the jobs for one specific
754 * task conflict. If so, try to drop one of them. */
755 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
756 JobType t;
757 Job *k;
758
759 t = j->type;
760 LIST_FOREACH(transaction, k, j->transaction_next) {
761 if ((r = job_type_merge(&t, k->type)) >= 0)
762 continue;
763
764 /* OK, we could not merge all jobs for this
765 * action. Let's see if we can get rid of one
766 * of them */
767
768 if ((r = delete_one_unmergeable_job(m, j)) >= 0)
769 /* Ok, we managed to drop one, now
770 * let's ask our callers to call us
771 * again after garbage collecting */
772 return -EAGAIN;
773
774 /* We couldn't merge anything. Failure */
775 dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
776 job_type_to_string(t), job_type_to_string(k->type), k->unit->meta.id);
777 return r;
778 }
779 }
780
781 /* Second step, merge the jobs. */
782 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
783 JobType t = j->type;
784 Job *k;
785
786 /* Merge all transactions */
787 LIST_FOREACH(transaction, k, j->transaction_next)
788 assert_se(job_type_merge(&t, k->type) == 0);
789
790 /* If an active job is mergeable, merge it too */
791 if (j->unit->meta.job)
792 job_type_merge(&t, j->unit->meta.job->type); /* Might fail. Which is OK */
793
794 while ((k = j->transaction_next)) {
795 if (j->installed) {
796 transaction_merge_and_delete_job(m, k, j, t);
797 j = k;
798 } else
799 transaction_merge_and_delete_job(m, j, k, t);
800 }
801
802 assert(!j->transaction_next);
803 assert(!j->transaction_prev);
804 }
805
806 return 0;
807 }
808
809 static void transaction_drop_redundant(Manager *m) {
810 bool again;
811
812 assert(m);
813
814 /* Goes through the transaction and removes all jobs that are
815 * a noop */
816
817 do {
818 Job *j;
819 Iterator i;
820
821 again = false;
822
823 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
824 bool changes_something = false;
825 Job *k;
826
827 LIST_FOREACH(transaction, k, j) {
828
829 if (!job_is_anchor(k) &&
830 job_type_is_redundant(k->type, unit_active_state(k->unit)))
831 continue;
832
833 changes_something = true;
834 break;
835 }
836
837 if (changes_something)
838 continue;
839
840 log_debug("Found redundant job %s/%s, dropping.", j->unit->meta.id, job_type_to_string(j->type));
841 transaction_delete_job(m, j, false);
842 again = true;
843 break;
844 }
845
846 } while (again);
847 }
848
849 static bool unit_matters_to_anchor(Unit *u, Job *j) {
850 assert(u);
851 assert(!j->transaction_prev);
852
853 /* Checks whether at least one of the jobs for this unit
854 * matters to the anchor. */
855
856 LIST_FOREACH(transaction, j, j)
857 if (j->matters_to_anchor)
858 return true;
859
860 return false;
861 }
862
863 static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
864 Iterator i;
865 Unit *u;
866 int r;
867
868 assert(m);
869 assert(j);
870 assert(!j->transaction_prev);
871
872 /* Does a recursive sweep through the ordering graph, looking
873 * for a cycle. If we find cycle we try to break it. */
874
875 /* Have we seen this before? */
876 if (j->generation == generation) {
877 Job *k;
878
879 /* If the marker is NULL we have been here already and
880 * decided the job was loop-free from here. Hence
881 * shortcut things and return right-away. */
882 if (!j->marker)
883 return 0;
884
885 /* So, the marker is not NULL and we already have been
886 * here. We have a cycle. Let's try to break it. We go
887 * backwards in our path and try to find a suitable
888 * job to remove. We use the marker to find our way
889 * back, since smart how we are we stored our way back
890 * in there. */
891 log_warning("Found ordering cycle on %s/%s", j->unit->meta.id, job_type_to_string(j->type));
892
893 for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
894
895 log_info("Walked on cycle path to %s/%s", k->unit->meta.id, job_type_to_string(k->type));
896
897 if (!k->installed &&
898 !unit_matters_to_anchor(k->unit, k)) {
899 /* Ok, we can drop this one, so let's
900 * do so. */
901 log_warning("Breaking order cycle by deleting job %s/%s", k->unit->meta.id, job_type_to_string(k->type));
902 transaction_delete_unit(m, k->unit);
903 return -EAGAIN;
904 }
905
906 /* Check if this in fact was the beginning of
907 * the cycle */
908 if (k == j)
909 break;
910 }
911
912 log_error("Unable to break cycle");
913
914 dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See logs for details.");
915 return -ENOEXEC;
916 }
917
918 /* Make the marker point to where we come from, so that we can
919 * find our way backwards if we want to break a cycle. We use
920 * a special marker for the beginning: we point to
921 * ourselves. */
922 j->marker = from ? from : j;
923 j->generation = generation;
924
925 /* We assume that the the dependencies are bidirectional, and
926 * hence can ignore UNIT_AFTER */
927 SET_FOREACH(u, j->unit->meta.dependencies[UNIT_BEFORE], i) {
928 Job *o;
929
930 /* Is there a job for this unit? */
931 if (!(o = hashmap_get(m->transaction_jobs, u)))
932
933 /* Ok, there is no job for this in the
934 * transaction, but maybe there is already one
935 * running? */
936 if (!(o = u->meta.job))
937 continue;
938
939 if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
940 return r;
941 }
942
943 /* Ok, let's backtrack, and remember that this entry is not on
944 * our path anymore. */
945 j->marker = NULL;
946
947 return 0;
948 }
949
950 static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
951 Job *j;
952 int r;
953 Iterator i;
954 unsigned g;
955
956 assert(m);
957 assert(generation);
958
959 /* Check if the ordering graph is cyclic. If it is, try to fix
960 * that up by dropping one of the jobs. */
961
962 g = (*generation)++;
963
964 HASHMAP_FOREACH(j, m->transaction_jobs, i)
965 if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
966 return r;
967
968 return 0;
969 }
970
971 static void transaction_collect_garbage(Manager *m) {
972 bool again;
973
974 assert(m);
975
976 /* Drop jobs that are not required by any other job */
977
978 do {
979 Iterator i;
980 Job *j;
981
982 again = false;
983
984 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
985 if (j->object_list)
986 continue;
987
988 log_debug("Garbage collecting job %s/%s", j->unit->meta.id, job_type_to_string(j->type));
989 transaction_delete_job(m, j, true);
990 again = true;
991 break;
992 }
993
994 } while (again);
995 }
996
997 static int transaction_is_destructive(Manager *m, DBusError *e) {
998 Iterator i;
999 Job *j;
1000
1001 assert(m);
1002
1003 /* Checks whether applying this transaction means that
1004 * existing jobs would be replaced */
1005
1006 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1007
1008 /* Assume merged */
1009 assert(!j->transaction_prev);
1010 assert(!j->transaction_next);
1011
1012 if (j->unit->meta.job &&
1013 j->unit->meta.job != j &&
1014 !job_type_is_superset(j->type, j->unit->meta.job->type)) {
1015
1016 dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
1017 return -EEXIST;
1018 }
1019 }
1020
1021 return 0;
1022 }
1023
1024 static void transaction_minimize_impact(Manager *m) {
1025 bool again;
1026 assert(m);
1027
1028 /* Drops all unnecessary jobs that reverse already active jobs
1029 * or that stop a running service. */
1030
1031 do {
1032 Job *j;
1033 Iterator i;
1034
1035 again = false;
1036
1037 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1038 LIST_FOREACH(transaction, j, j) {
1039 bool stops_running_service, changes_existing_job;
1040
1041 /* If it matters, we shouldn't drop it */
1042 if (j->matters_to_anchor)
1043 continue;
1044
1045 /* Would this stop a running service?
1046 * Would this change an existing job?
1047 * If so, let's drop this entry */
1048
1049 stops_running_service =
1050 j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
1051
1052 changes_existing_job =
1053 j->unit->meta.job && job_type_is_conflicting(j->type, j->unit->meta.job->state);
1054
1055 if (!stops_running_service && !changes_existing_job)
1056 continue;
1057
1058 if (stops_running_service)
1059 log_info("%s/%s would stop a running service.", j->unit->meta.id, job_type_to_string(j->type));
1060
1061 if (changes_existing_job)
1062 log_info("%s/%s would change existing job.", j->unit->meta.id, job_type_to_string(j->type));
1063
1064 /* Ok, let's get rid of this */
1065 log_info("Deleting %s/%s to minimize impact.", j->unit->meta.id, job_type_to_string(j->type));
1066
1067 transaction_delete_job(m, j, true);
1068 again = true;
1069 break;
1070 }
1071
1072 if (again)
1073 break;
1074 }
1075
1076 } while (again);
1077 }
1078
1079 static int transaction_apply(Manager *m) {
1080 Iterator i;
1081 Job *j;
1082 int r;
1083
1084 /* Moves the transaction jobs to the set of active jobs */
1085
1086 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1087 /* Assume merged */
1088 assert(!j->transaction_prev);
1089 assert(!j->transaction_next);
1090
1091 if (j->installed)
1092 continue;
1093
1094 if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
1095 goto rollback;
1096 }
1097
1098 while ((j = hashmap_steal_first(m->transaction_jobs))) {
1099 if (j->installed)
1100 continue;
1101
1102 if (j->unit->meta.job)
1103 job_free(j->unit->meta.job);
1104
1105 j->unit->meta.job = j;
1106 j->installed = true;
1107
1108 /* We're fully installed. Now let's free data we don't
1109 * need anymore. */
1110
1111 assert(!j->transaction_next);
1112 assert(!j->transaction_prev);
1113
1114 job_add_to_run_queue(j);
1115 job_add_to_dbus_queue(j);
1116 }
1117
1118 /* As last step, kill all remaining job dependencies. */
1119 transaction_clean_dependencies(m);
1120
1121 return 0;
1122
1123 rollback:
1124
1125 HASHMAP_FOREACH(j, m->transaction_jobs, i) {
1126 if (j->installed)
1127 continue;
1128
1129 hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
1130 }
1131
1132 return r;
1133 }
1134
1135 static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
1136 int r;
1137 unsigned generation = 1;
1138
1139 assert(m);
1140
1141 /* This applies the changes recorded in transaction_jobs to
1142 * the actual list of jobs, if possible. */
1143
1144 /* First step: figure out which jobs matter */
1145 transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
1146
1147 /* Second step: Try not to stop any running services if
1148 * we don't have to. Don't try to reverse running
1149 * jobs if we don't have to. */
1150 transaction_minimize_impact(m);
1151
1152 /* Third step: Drop redundant jobs */
1153 transaction_drop_redundant(m);
1154
1155 for (;;) {
1156 /* Fourth step: Let's remove unneeded jobs that might
1157 * be lurking. */
1158 transaction_collect_garbage(m);
1159
1160 /* Fifth step: verify order makes sense and correct
1161 * cycles if necessary and possible */
1162 if ((r = transaction_verify_order(m, &generation, e)) >= 0)
1163 break;
1164
1165 if (r != -EAGAIN) {
1166 log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
1167 goto rollback;
1168 }
1169
1170 /* Let's see if the resulting transaction ordering
1171 * graph is still cyclic... */
1172 }
1173
1174 for (;;) {
1175 /* Sixth step: let's drop unmergeable entries if
1176 * necessary and possible, merge entries we can
1177 * merge */
1178 if ((r = transaction_merge_jobs(m, e)) >= 0)
1179 break;
1180
1181 if (r != -EAGAIN) {
1182 log_warning("Requested transaction contains unmergable jobs: %s", bus_error(e, r));
1183 goto rollback;
1184 }
1185
1186 /* Seventh step: an entry got dropped, let's garbage
1187 * collect its dependencies. */
1188 transaction_collect_garbage(m);
1189
1190 /* Let's see if the resulting transaction still has
1191 * unmergeable entries ... */
1192 }
1193
1194 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1195 transaction_drop_redundant(m);
1196
1197 /* Ninth step: check whether we can actually apply this */
1198 if (mode == JOB_FAIL)
1199 if ((r = transaction_is_destructive(m, e)) < 0) {
1200 log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
1201 goto rollback;
1202 }
1203
1204 /* Tenth step: apply changes */
1205 if ((r = transaction_apply(m)) < 0) {
1206 log_warning("Failed to apply transaction: %s", strerror(-r));
1207 goto rollback;
1208 }
1209
1210 assert(hashmap_isempty(m->transaction_jobs));
1211 assert(!m->transaction_anchor);
1212
1213 return 0;
1214
1215 rollback:
1216 transaction_abort(m);
1217 return r;
1218 }
1219
1220 static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
1221 Job *j, *f;
1222 int r;
1223
1224 assert(m);
1225 assert(unit);
1226
1227 /* Looks for an axisting prospective job and returns that. If
1228 * it doesn't exist it is created and added to the prospective
1229 * jobs list. */
1230
1231 f = hashmap_get(m->transaction_jobs, unit);
1232
1233 LIST_FOREACH(transaction, j, f) {
1234 assert(j->unit == unit);
1235
1236 if (j->type == type) {
1237 if (is_new)
1238 *is_new = false;
1239 return j;
1240 }
1241 }
1242
1243 if (unit->meta.job && unit->meta.job->type == type)
1244 j = unit->meta.job;
1245 else if (!(j = job_new(m, type, unit)))
1246 return NULL;
1247
1248 j->generation = 0;
1249 j->marker = NULL;
1250 j->matters_to_anchor = false;
1251 j->override = override;
1252
1253 LIST_PREPEND(Job, transaction, f, j);
1254
1255 if ((r = hashmap_replace(m->transaction_jobs, unit, f)) < 0) {
1256 job_free(j);
1257 return NULL;
1258 }
1259
1260 if (is_new)
1261 *is_new = true;
1262
1263 log_debug("Added job %s/%s to transaction.", unit->meta.id, job_type_to_string(type));
1264
1265 return j;
1266 }
1267
1268 void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
1269 assert(m);
1270 assert(j);
1271
1272 if (j->transaction_prev)
1273 j->transaction_prev->transaction_next = j->transaction_next;
1274 else if (j->transaction_next)
1275 hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
1276 else
1277 hashmap_remove_value(m->transaction_jobs, j->unit, j);
1278
1279 if (j->transaction_next)
1280 j->transaction_next->transaction_prev = j->transaction_prev;
1281
1282 j->transaction_prev = j->transaction_next = NULL;
1283
1284 while (j->subject_list)
1285 job_dependency_free(j->subject_list);
1286
1287 while (j->object_list) {
1288 Job *other = j->object_list->matters ? j->object_list->subject : NULL;
1289
1290 job_dependency_free(j->object_list);
1291
1292 if (other && delete_dependencies) {
1293 log_info("Deleting job %s/%s as dependency of job %s/%s",
1294 other->unit->meta.id, job_type_to_string(other->type),
1295 j->unit->meta.id, job_type_to_string(j->type));
1296 transaction_delete_job(m, other, delete_dependencies);
1297 }
1298 }
1299 }
1300
1301 static int transaction_add_job_and_dependencies(
1302 Manager *m,
1303 JobType type,
1304 Unit *unit,
1305 Job *by,
1306 bool matters,
1307 bool override,
1308 DBusError *e,
1309 Job **_ret) {
1310 Job *ret;
1311 Iterator i;
1312 Unit *dep;
1313 int r;
1314 bool is_new;
1315
1316 assert(m);
1317 assert(type < _JOB_TYPE_MAX);
1318 assert(unit);
1319
1320 if (unit->meta.load_state != UNIT_LOADED) {
1321 dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s failed to load. See logs for details.", unit->meta.id);
1322 return -EINVAL;
1323 }
1324
1325 if (!unit_job_is_applicable(unit, type)) {
1326 dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->meta.id);
1327 return -EBADR;
1328 }
1329
1330 /* First add the job. */
1331 if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
1332 return -ENOMEM;
1333
1334 /* Then, add a link to the job. */
1335 if (!job_dependency_new(by, ret, matters))
1336 return -ENOMEM;
1337
1338 if (is_new) {
1339 /* Finally, recursively add in all dependencies. */
1340 if (type == JOB_START || type == JOB_RELOAD_OR_START) {
1341 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES], i)
1342 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, e, NULL)) < 0 && r != -EBADR)
1343 goto fail;
1344
1345 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1346 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, e, NULL)) < 0 && r != -EBADR) {
1347 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1348 dbus_error_free(e);
1349 }
1350
1351 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_WANTS], i)
1352 if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, e, NULL)) < 0) {
1353 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1354 dbus_error_free(e);
1355 }
1356
1357 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE], i)
1358 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, e, NULL)) < 0 && r != -EBADR)
1359 goto fail;
1360
1361 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1362 if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, e, NULL)) < 0 && r != -EBADR) {
1363 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->meta.id, bus_error(e, r));
1364 dbus_error_free(e);
1365 }
1366
1367 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_CONFLICTS], i)
1368 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, e, NULL)) < 0 && r != -EBADR)
1369 goto fail;
1370
1371 } else if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
1372
1373 SET_FOREACH(dep, ret->unit->meta.dependencies[UNIT_REQUIRED_BY], i)
1374 if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, e, NULL)) < 0 && r != -EBADR)
1375 goto fail;
1376 }
1377
1378 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1379 }
1380
1381 if (_ret)
1382 *_ret = ret;
1383
1384 return 0;
1385
1386 fail:
1387 return r;
1388 }
1389
1390 static int transaction_add_isolate_jobs(Manager *m) {
1391 Iterator i;
1392 Unit *u;
1393 char *k;
1394 int r;
1395
1396 assert(m);
1397
1398 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1399
1400 /* ignore aliases */
1401 if (u->meta.id != k)
1402 continue;
1403
1404 if (UNIT_VTABLE(u)->no_isolate)
1405 continue;
1406
1407 /* No need to stop inactive jobs */
1408 if (UNIT_IS_INACTIVE_OR_MAINTENANCE(unit_active_state(u)))
1409 continue;
1410
1411 /* Is there already something listed for this? */
1412 if (hashmap_get(m->transaction_jobs, u))
1413 continue;
1414
1415 if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, NULL, NULL)) < 0)
1416 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->meta.id, strerror(-r));
1417 }
1418
1419 return 0;
1420 }
1421
1422 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
1423 int r;
1424 Job *ret;
1425
1426 assert(m);
1427 assert(type < _JOB_TYPE_MAX);
1428 assert(unit);
1429 assert(mode < _JOB_MODE_MAX);
1430
1431 if (mode == JOB_ISOLATE && type != JOB_START) {
1432 dbus_set_error(e, BUS_ERROR_INVALID_JOB_MODE, "Isolate is only valid for start.");
1433 return -EINVAL;
1434 }
1435
1436 log_debug("Trying to enqueue job %s/%s", unit->meta.id, job_type_to_string(type));
1437
1438 if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, e, &ret)) < 0) {
1439 transaction_abort(m);
1440 return r;
1441 }
1442
1443 if (mode == JOB_ISOLATE)
1444 if ((r = transaction_add_isolate_jobs(m)) < 0) {
1445 transaction_abort(m);
1446 return r;
1447 }
1448
1449 if ((r = transaction_activate(m, mode, e)) < 0)
1450 return r;
1451
1452 log_debug("Enqueued job %s/%s as %u", unit->meta.id, job_type_to_string(type), (unsigned) ret->id);
1453
1454 if (_ret)
1455 *_ret = ret;
1456
1457 return 0;
1458 }
1459
1460 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
1461 Unit *unit;
1462 int r;
1463
1464 assert(m);
1465 assert(type < _JOB_TYPE_MAX);
1466 assert(name);
1467 assert(mode < _JOB_MODE_MAX);
1468
1469 if ((r = manager_load_unit(m, name, NULL, NULL, &unit)) < 0)
1470 return r;
1471
1472 return manager_add_job(m, type, unit, mode, override, e, _ret);
1473 }
1474
1475 Job *manager_get_job(Manager *m, uint32_t id) {
1476 assert(m);
1477
1478 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1479 }
1480
1481 Unit *manager_get_unit(Manager *m, const char *name) {
1482 assert(m);
1483 assert(name);
1484
1485 return hashmap_get(m->units, name);
1486 }
1487
1488 unsigned manager_dispatch_load_queue(Manager *m) {
1489 Meta *meta;
1490 unsigned n = 0;
1491
1492 assert(m);
1493
1494 /* Make sure we are not run recursively */
1495 if (m->dispatching_load_queue)
1496 return 0;
1497
1498 m->dispatching_load_queue = true;
1499
1500 /* Dispatches the load queue. Takes a unit from the queue and
1501 * tries to load its data until the queue is empty */
1502
1503 while ((meta = m->load_queue)) {
1504 assert(meta->in_load_queue);
1505
1506 unit_load((Unit*) meta);
1507 n++;
1508 }
1509
1510 m->dispatching_load_queue = false;
1511 return n;
1512 }
1513
1514 int manager_load_unit_prepare(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1515 Unit *ret;
1516 int r;
1517
1518 assert(m);
1519 assert(name || path);
1520
1521 /* This will prepare the unit for loading, but not actually
1522 * load anything from disk. */
1523
1524 if (path && !is_path(path)) {
1525 dbus_set_error(e, BUS_ERROR_INVALID_PATH, "Path %s is not absolute.", path);
1526 return -EINVAL;
1527 }
1528
1529 if (!name)
1530 name = file_name_from_path(path);
1531
1532 if (!unit_name_is_valid(name)) {
1533 dbus_set_error(e, BUS_ERROR_INVALID_NAME, "Unit name %s is not valid.", name);
1534 return -EINVAL;
1535 }
1536
1537 if ((ret = manager_get_unit(m, name))) {
1538 *_ret = ret;
1539 return 1;
1540 }
1541
1542 if (!(ret = unit_new(m)))
1543 return -ENOMEM;
1544
1545 if (path)
1546 if (!(ret->meta.fragment_path = strdup(path))) {
1547 unit_free(ret);
1548 return -ENOMEM;
1549 }
1550
1551 if ((r = unit_add_name(ret, name)) < 0) {
1552 unit_free(ret);
1553 return r;
1554 }
1555
1556 unit_add_to_load_queue(ret);
1557 unit_add_to_dbus_queue(ret);
1558 unit_add_to_gc_queue(ret);
1559
1560 if (_ret)
1561 *_ret = ret;
1562
1563 return 0;
1564 }
1565
1566 int manager_load_unit(Manager *m, const char *name, const char *path, DBusError *e, Unit **_ret) {
1567 int r;
1568
1569 assert(m);
1570
1571 /* This will load the service information files, but not actually
1572 * start any services or anything. */
1573
1574 if ((r = manager_load_unit_prepare(m, name, path, e, _ret)) != 0)
1575 return r;
1576
1577 manager_dispatch_load_queue(m);
1578
1579 if (_ret)
1580 *_ret = unit_follow_merge(*_ret);
1581
1582 return 0;
1583 }
1584
1585 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1586 Iterator i;
1587 Job *j;
1588
1589 assert(s);
1590 assert(f);
1591
1592 HASHMAP_FOREACH(j, s->jobs, i)
1593 job_dump(j, f, prefix);
1594 }
1595
1596 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1597 Iterator i;
1598 Unit *u;
1599 const char *t;
1600
1601 assert(s);
1602 assert(f);
1603
1604 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1605 if (u->meta.id == t)
1606 unit_dump(u, f, prefix);
1607 }
1608
1609 void manager_clear_jobs(Manager *m) {
1610 Job *j;
1611
1612 assert(m);
1613
1614 transaction_abort(m);
1615
1616 while ((j = hashmap_first(m->jobs)))
1617 job_free(j);
1618 }
1619
1620 unsigned manager_dispatch_run_queue(Manager *m) {
1621 Job *j;
1622 unsigned n = 0;
1623
1624 if (m->dispatching_run_queue)
1625 return 0;
1626
1627 m->dispatching_run_queue = true;
1628
1629 while ((j = m->run_queue)) {
1630 assert(j->installed);
1631 assert(j->in_run_queue);
1632
1633 job_run_and_invalidate(j);
1634 n++;
1635 }
1636
1637 m->dispatching_run_queue = false;
1638 return n;
1639 }
1640
1641 unsigned manager_dispatch_dbus_queue(Manager *m) {
1642 Job *j;
1643 Meta *meta;
1644 unsigned n = 0;
1645
1646 assert(m);
1647
1648 if (m->dispatching_dbus_queue)
1649 return 0;
1650
1651 m->dispatching_dbus_queue = true;
1652
1653 while ((meta = m->dbus_unit_queue)) {
1654 assert(meta->in_dbus_queue);
1655
1656 bus_unit_send_change_signal((Unit*) meta);
1657 n++;
1658 }
1659
1660 while ((j = m->dbus_job_queue)) {
1661 assert(j->in_dbus_queue);
1662
1663 bus_job_send_change_signal(j);
1664 n++;
1665 }
1666
1667 m->dispatching_dbus_queue = false;
1668 return n;
1669 }
1670
1671 static int manager_process_notify_fd(Manager *m) {
1672 ssize_t n;
1673
1674 assert(m);
1675
1676 for (;;) {
1677 char buf[4096];
1678 struct msghdr msghdr;
1679 struct iovec iovec;
1680 struct ucred *ucred;
1681 union {
1682 struct cmsghdr cmsghdr;
1683 uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
1684 } control;
1685 Unit *u;
1686 char **tags;
1687
1688 zero(iovec);
1689 iovec.iov_base = buf;
1690 iovec.iov_len = sizeof(buf)-1;
1691
1692 zero(control);
1693 zero(msghdr);
1694 msghdr.msg_iov = &iovec;
1695 msghdr.msg_iovlen = 1;
1696 msghdr.msg_control = &control;
1697 msghdr.msg_controllen = sizeof(control);
1698
1699 if ((n = recvmsg(m->notify_watch.fd, &msghdr, MSG_DONTWAIT)) <= 0) {
1700 if (n >= 0)
1701 return -EIO;
1702
1703 if (errno == EAGAIN)
1704 break;
1705
1706 return -errno;
1707 }
1708
1709 if (msghdr.msg_controllen < CMSG_LEN(sizeof(struct ucred)) ||
1710 control.cmsghdr.cmsg_level != SOL_SOCKET ||
1711 control.cmsghdr.cmsg_type != SCM_CREDENTIALS ||
1712 control.cmsghdr.cmsg_len != CMSG_LEN(sizeof(struct ucred))) {
1713 log_warning("Received notify message without credentials. Ignoring.");
1714 continue;
1715 }
1716
1717 ucred = (struct ucred*) CMSG_DATA(&control.cmsghdr);
1718
1719 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(ucred->pid))))
1720 if (!(u = cgroup_unit_by_pid(m, ucred->pid))) {
1721 log_warning("Cannot find unit for notify message of PID %lu.", (unsigned long) ucred->pid);
1722 continue;
1723 }
1724
1725 assert((size_t) n < sizeof(buf));
1726 buf[n] = 0;
1727 if (!(tags = strv_split(buf, "\n\r")))
1728 return -ENOMEM;
1729
1730 log_debug("Got notification message for unit %s", u->meta.id);
1731
1732 if (UNIT_VTABLE(u)->notify_message)
1733 UNIT_VTABLE(u)->notify_message(u, ucred->pid, tags);
1734
1735 strv_free(tags);
1736 }
1737
1738 return 0;
1739 }
1740
1741 static int manager_dispatch_sigchld(Manager *m) {
1742 assert(m);
1743
1744 for (;;) {
1745 siginfo_t si;
1746 Unit *u;
1747 int r;
1748
1749 zero(si);
1750
1751 /* First we call waitd() for a PID and do not reap the
1752 * zombie. That way we can still access /proc/$PID for
1753 * it while it is a zombie. */
1754 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
1755
1756 if (errno == ECHILD)
1757 break;
1758
1759 if (errno == EINTR)
1760 continue;
1761
1762 return -errno;
1763 }
1764
1765 if (si.si_pid <= 0)
1766 break;
1767
1768 if (si.si_code == CLD_EXITED || si.si_code == CLD_KILLED || si.si_code == CLD_DUMPED) {
1769 char *name = NULL;
1770
1771 get_process_name(si.si_pid, &name);
1772 log_debug("Got SIGCHLD for process %lu (%s)", (unsigned long) si.si_pid, strna(name));
1773 free(name);
1774 }
1775
1776 /* Let's flush any message the dying child might still
1777 * have queued for us. This ensures that the process
1778 * still exists in /proc so that we can figure out
1779 * which cgroup and hence unit it belongs to. */
1780 if ((r = manager_process_notify_fd(m)) < 0)
1781 return r;
1782
1783 /* And now figure out the unit this belongs to */
1784 if (!(u = hashmap_get(m->watch_pids, LONG_TO_PTR(si.si_pid))))
1785 u = cgroup_unit_by_pid(m, si.si_pid);
1786
1787 /* And now, we actually reap the zombie. */
1788 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
1789 if (errno == EINTR)
1790 continue;
1791
1792 return -errno;
1793 }
1794
1795 if (si.si_code != CLD_EXITED && si.si_code != CLD_KILLED && si.si_code != CLD_DUMPED)
1796 continue;
1797
1798 log_debug("Child %lu died (code=%s, status=%i/%s)",
1799 (long unsigned) si.si_pid,
1800 sigchld_code_to_string(si.si_code),
1801 si.si_status,
1802 strna(si.si_code == CLD_EXITED ? exit_status_to_string(si.si_status) : signal_to_string(si.si_status)));
1803
1804 if (!u)
1805 continue;
1806
1807 log_debug("Child %lu belongs to %s", (long unsigned) si.si_pid, u->meta.id);
1808
1809 hashmap_remove(m->watch_pids, LONG_TO_PTR(si.si_pid));
1810 UNIT_VTABLE(u)->sigchld_event(u, si.si_pid, si.si_code, si.si_status);
1811 }
1812
1813 return 0;
1814 }
1815
1816 static int manager_start_target(Manager *m, const char *name, JobMode mode) {
1817 int r;
1818 DBusError error;
1819
1820 dbus_error_init(&error);
1821
1822 log_info("Activating special unit %s", name);
1823
1824 if ((r = manager_add_job_by_name(m, JOB_START, name, mode, true, &error, NULL)) < 0)
1825 log_error("Failed to enqueue %s job: %s", name, bus_error(&error, r));
1826
1827 dbus_error_free(&error);
1828
1829 return r;
1830 }
1831
1832 static int manager_process_signal_fd(Manager *m) {
1833 ssize_t n;
1834 struct signalfd_siginfo sfsi;
1835 bool sigchld = false;
1836
1837 assert(m);
1838
1839 for (;;) {
1840 if ((n = read(m->signal_watch.fd, &sfsi, sizeof(sfsi))) != sizeof(sfsi)) {
1841
1842 if (n >= 0)
1843 return -EIO;
1844
1845 if (errno == EAGAIN)
1846 break;
1847
1848 return -errno;
1849 }
1850
1851 log_debug("Received SIG%s", strna(signal_to_string(sfsi.ssi_signo)));
1852
1853 switch (sfsi.ssi_signo) {
1854
1855 case SIGCHLD:
1856 sigchld = true;
1857 break;
1858
1859 case SIGTERM:
1860 if (m->running_as == MANAGER_SYSTEM) {
1861 /* This is for compatibility with the
1862 * original sysvinit */
1863 m->exit_code = MANAGER_REEXECUTE;
1864 break;
1865 }
1866
1867 /* Fall through */
1868
1869 case SIGINT:
1870 if (m->running_as == MANAGER_SYSTEM) {
1871 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE);
1872 break;
1873 }
1874
1875 /* Run the exit target if there is one, if not, just exit. */
1876 if (manager_start_target(m, SPECIAL_EXIT_SERVICE, JOB_REPLACE) < 0) {
1877 m->exit_code = MANAGER_EXIT;
1878 return 0;
1879 }
1880
1881 break;
1882
1883 case SIGWINCH:
1884 if (m->running_as == MANAGER_SYSTEM)
1885 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
1886
1887 /* This is a nop on non-init */
1888 break;
1889
1890 case SIGPWR:
1891 if (m->running_as == MANAGER_SYSTEM)
1892 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
1893
1894 /* This is a nop on non-init */
1895 break;
1896
1897 case SIGUSR1: {
1898 Unit *u;
1899
1900 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
1901
1902 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
1903 log_info("Trying to reconnect to bus...");
1904 bus_init(m);
1905 }
1906
1907 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
1908 log_info("Loading D-Bus service...");
1909 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
1910 }
1911
1912 break;
1913 }
1914
1915 case SIGUSR2: {
1916 FILE *f;
1917 char *dump = NULL;
1918 size_t size;
1919
1920 if (!(f = open_memstream(&dump, &size))) {
1921 log_warning("Failed to allocate memory stream.");
1922 break;
1923 }
1924
1925 manager_dump_units(m, f, "\t");
1926 manager_dump_jobs(m, f, "\t");
1927
1928 if (ferror(f)) {
1929 fclose(f);
1930 free(dump);
1931 log_warning("Failed to write status stream");
1932 break;
1933 }
1934
1935 fclose(f);
1936 log_dump(LOG_INFO, dump);
1937 free(dump);
1938
1939 break;
1940 }
1941
1942 case SIGHUP:
1943 m->exit_code = MANAGER_RELOAD;
1944 break;
1945
1946 default: {
1947 static const char * const table[] = {
1948 [0] = SPECIAL_DEFAULT_TARGET,
1949 [1] = SPECIAL_RESCUE_TARGET,
1950 [2] = SPECIAL_EMERGENCY_SERVICE,
1951 [3] = SPECIAL_HALT_TARGET,
1952 [4] = SPECIAL_POWEROFF_TARGET,
1953 [5] = SPECIAL_REBOOT_TARGET
1954 };
1955
1956 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
1957 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(table)) {
1958 manager_start_target(m, table[sfsi.ssi_signo - SIGRTMIN],
1959 (sfsi.ssi_signo == 1 || sfsi.ssi_signo == 2) ? JOB_ISOLATE : JOB_REPLACE);
1960 break;
1961 }
1962
1963 log_warning("Got unhandled signal <%s>.", strna(signal_to_string(sfsi.ssi_signo)));
1964 }
1965 }
1966 }
1967
1968 if (sigchld)
1969 return manager_dispatch_sigchld(m);
1970
1971 return 0;
1972 }
1973
1974 static int process_event(Manager *m, struct epoll_event *ev) {
1975 int r;
1976 Watch *w;
1977
1978 assert(m);
1979 assert(ev);
1980
1981 assert(w = ev->data.ptr);
1982
1983 switch (w->type) {
1984
1985 case WATCH_SIGNAL:
1986
1987 /* An incoming signal? */
1988 if (ev->events != EPOLLIN)
1989 return -EINVAL;
1990
1991 if ((r = manager_process_signal_fd(m)) < 0)
1992 return r;
1993
1994 break;
1995
1996 case WATCH_NOTIFY:
1997
1998 /* An incoming daemon notification event? */
1999 if (ev->events != EPOLLIN)
2000 return -EINVAL;
2001
2002 if ((r = manager_process_notify_fd(m)) < 0)
2003 return r;
2004
2005 break;
2006
2007 case WATCH_FD:
2008
2009 /* Some fd event, to be dispatched to the units */
2010 UNIT_VTABLE(w->data.unit)->fd_event(w->data.unit, w->fd, ev->events, w);
2011 break;
2012
2013 case WATCH_TIMER: {
2014 uint64_t v;
2015 ssize_t k;
2016
2017 /* Some timer event, to be dispatched to the units */
2018 if ((k = read(w->fd, &v, sizeof(v))) != sizeof(v)) {
2019
2020 if (k < 0 && (errno == EINTR || errno == EAGAIN))
2021 break;
2022
2023 return k < 0 ? -errno : -EIO;
2024 }
2025
2026 UNIT_VTABLE(w->data.unit)->timer_event(w->data.unit, v, w);
2027 break;
2028 }
2029
2030 case WATCH_MOUNT:
2031 /* Some mount table change, intended for the mount subsystem */
2032 mount_fd_event(m, ev->events);
2033 break;
2034
2035 case WATCH_UDEV:
2036 /* Some notification from udev, intended for the device subsystem */
2037 device_fd_event(m, ev->events);
2038 break;
2039
2040 case WATCH_DBUS_WATCH:
2041 bus_watch_event(m, w, ev->events);
2042 break;
2043
2044 case WATCH_DBUS_TIMEOUT:
2045 bus_timeout_event(m, w, ev->events);
2046 break;
2047
2048 default:
2049 assert_not_reached("Unknown epoll event type.");
2050 }
2051
2052 return 0;
2053 }
2054
2055 int manager_loop(Manager *m) {
2056 int r;
2057
2058 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 1000);
2059
2060 assert(m);
2061 m->exit_code = MANAGER_RUNNING;
2062
2063 /* Release the path cache */
2064 set_free_free(m->unit_path_cache);
2065 m->unit_path_cache = NULL;
2066
2067 /* There might still be some zombies hanging around from
2068 * before we were exec()'ed. Leat's reap them */
2069 if ((r = manager_dispatch_sigchld(m)) < 0)
2070 return r;
2071
2072 while (m->exit_code == MANAGER_RUNNING) {
2073 struct epoll_event event;
2074 int n;
2075
2076 if (!ratelimit_test(&rl)) {
2077 /* Yay, something is going seriously wrong, pause a little */
2078 log_warning("Looping too fast. Throttling execution a little.");
2079 sleep(1);
2080 }
2081
2082 if (manager_dispatch_load_queue(m) > 0)
2083 continue;
2084
2085 if (manager_dispatch_run_queue(m) > 0)
2086 continue;
2087
2088 if (bus_dispatch(m) > 0)
2089 continue;
2090
2091 if (manager_dispatch_cleanup_queue(m) > 0)
2092 continue;
2093
2094 if (manager_dispatch_gc_queue(m) > 0)
2095 continue;
2096
2097 if (manager_dispatch_dbus_queue(m) > 0)
2098 continue;
2099
2100 if ((n = epoll_wait(m->epoll_fd, &event, 1, -1)) < 0) {
2101
2102 if (errno == EINTR)
2103 continue;
2104
2105 return -errno;
2106 }
2107
2108 assert(n == 1);
2109
2110 if ((r = process_event(m, &event)) < 0)
2111 return r;
2112 }
2113
2114 return m->exit_code;
2115 }
2116
2117 int manager_get_unit_from_dbus_path(Manager *m, const char *s, Unit **_u) {
2118 char *n;
2119 Unit *u;
2120
2121 assert(m);
2122 assert(s);
2123 assert(_u);
2124
2125 if (!startswith(s, "/org/freedesktop/systemd1/unit/"))
2126 return -EINVAL;
2127
2128 if (!(n = bus_path_unescape(s+31)))
2129 return -ENOMEM;
2130
2131 u = manager_get_unit(m, n);
2132 free(n);
2133
2134 if (!u)
2135 return -ENOENT;
2136
2137 *_u = u;
2138
2139 return 0;
2140 }
2141
2142 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2143 Job *j;
2144 unsigned id;
2145 int r;
2146
2147 assert(m);
2148 assert(s);
2149 assert(_j);
2150
2151 if (!startswith(s, "/org/freedesktop/systemd1/job/"))
2152 return -EINVAL;
2153
2154 if ((r = safe_atou(s + 30, &id)) < 0)
2155 return r;
2156
2157 if (!(j = manager_get_job(m, id)))
2158 return -ENOENT;
2159
2160 *_j = j;
2161
2162 return 0;
2163 }
2164
2165 static bool manager_utmp_good(Manager *m) {
2166 int r;
2167
2168 assert(m);
2169
2170 if ((r = mount_path_is_mounted(m, _PATH_UTMPX)) <= 0) {
2171
2172 if (r < 0)
2173 log_warning("Failed to determine whether " _PATH_UTMPX " is mounted: %s", strerror(-r));
2174
2175 return false;
2176 }
2177
2178 return true;
2179 }
2180
2181 void manager_write_utmp_reboot(Manager *m) {
2182 int r;
2183
2184 assert(m);
2185
2186 if (m->utmp_reboot_written)
2187 return;
2188
2189 if (m->running_as != MANAGER_SYSTEM)
2190 return;
2191
2192 if (!manager_utmp_good(m))
2193 return;
2194
2195 if ((r = utmp_put_reboot(m->startup_timestamp.realtime)) < 0) {
2196
2197 if (r != -ENOENT && r != -EROFS)
2198 log_warning("Failed to write utmp/wtmp: %s", strerror(-r));
2199
2200 return;
2201 }
2202
2203 m->utmp_reboot_written = true;
2204 }
2205
2206 void manager_write_utmp_runlevel(Manager *m, Unit *u) {
2207 int runlevel, r;
2208
2209 assert(m);
2210 assert(u);
2211
2212 if (u->meta.type != UNIT_TARGET)
2213 return;
2214
2215 if (m->running_as != MANAGER_SYSTEM)
2216 return;
2217
2218 if (!manager_utmp_good(m))
2219 return;
2220
2221 if ((runlevel = target_get_runlevel(TARGET(u))) <= 0)
2222 return;
2223
2224 if ((r = utmp_put_runlevel(0, runlevel, 0)) < 0) {
2225
2226 if (r != -ENOENT && r != -EROFS)
2227 log_warning("Failed to write utmp/wtmp: %s", strerror(-r));
2228 }
2229 }
2230
2231 void manager_dispatch_bus_name_owner_changed(
2232 Manager *m,
2233 const char *name,
2234 const char* old_owner,
2235 const char *new_owner) {
2236
2237 Unit *u;
2238
2239 assert(m);
2240 assert(name);
2241
2242 if (!(u = hashmap_get(m->watch_bus, name)))
2243 return;
2244
2245 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2246 }
2247
2248 void manager_dispatch_bus_query_pid_done(
2249 Manager *m,
2250 const char *name,
2251 pid_t pid) {
2252
2253 Unit *u;
2254
2255 assert(m);
2256 assert(name);
2257 assert(pid >= 1);
2258
2259 if (!(u = hashmap_get(m->watch_bus, name)))
2260 return;
2261
2262 UNIT_VTABLE(u)->bus_query_pid_done(u, name, pid);
2263 }
2264
2265 int manager_open_serialization(FILE **_f) {
2266 char *path;
2267 mode_t saved_umask;
2268 int fd;
2269 FILE *f;
2270
2271 assert(_f);
2272
2273 if (asprintf(&path, "/dev/shm/systemd-%u.dump-XXXXXX", (unsigned) getpid()) < 0)
2274 return -ENOMEM;
2275
2276 saved_umask = umask(0077);
2277 fd = mkostemp(path, O_RDWR|O_CLOEXEC);
2278 umask(saved_umask);
2279
2280 if (fd < 0) {
2281 free(path);
2282 return -errno;
2283 }
2284
2285 unlink(path);
2286
2287 log_debug("Serializing state to %s", path);
2288 free(path);
2289
2290 if (!(f = fdopen(fd, "w+")) < 0)
2291 return -errno;
2292
2293 *_f = f;
2294
2295 return 0;
2296 }
2297
2298 int manager_serialize(Manager *m, FILE *f, FDSet *fds) {
2299 Iterator i;
2300 Unit *u;
2301 const char *t;
2302 int r;
2303
2304 assert(m);
2305 assert(f);
2306 assert(fds);
2307
2308 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2309 if (u->meta.id != t)
2310 continue;
2311
2312 if (!unit_can_serialize(u))
2313 continue;
2314
2315 /* Start marker */
2316 fputs(u->meta.id, f);
2317 fputc('\n', f);
2318
2319 if ((r = unit_serialize(u, f, fds)) < 0)
2320 return r;
2321 }
2322
2323 if (ferror(f))
2324 return -EIO;
2325
2326 return 0;
2327 }
2328
2329 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2330 int r = 0;
2331
2332 assert(m);
2333 assert(f);
2334
2335 log_debug("Deserializing state...");
2336
2337 m->deserializing = true;
2338
2339 for (;;) {
2340 Unit *u;
2341 char name[UNIT_NAME_MAX+2];
2342
2343 /* Start marker */
2344 if (!fgets(name, sizeof(name), f)) {
2345 if (feof(f))
2346 break;
2347
2348 r = -errno;
2349 goto finish;
2350 }
2351
2352 char_array_0(name);
2353
2354 if ((r = manager_load_unit(m, strstrip(name), NULL, NULL, &u)) < 0)
2355 goto finish;
2356
2357 if ((r = unit_deserialize(u, f, fds)) < 0)
2358 goto finish;
2359 }
2360
2361 if (ferror(f)) {
2362 r = -EIO;
2363 goto finish;
2364 }
2365
2366 r = 0;
2367
2368 finish:
2369 m->deserializing = false;
2370
2371 return r;
2372 }
2373
2374 int manager_reload(Manager *m) {
2375 int r, q;
2376 FILE *f;
2377 FDSet *fds;
2378
2379 assert(m);
2380
2381 if ((r = manager_open_serialization(&f)) < 0)
2382 return r;
2383
2384 if (!(fds = fdset_new())) {
2385 r = -ENOMEM;
2386 goto finish;
2387 }
2388
2389 if ((r = manager_serialize(m, f, fds)) < 0)
2390 goto finish;
2391
2392 if (fseeko(f, 0, SEEK_SET) < 0) {
2393 r = -errno;
2394 goto finish;
2395 }
2396
2397 /* From here on there is no way back. */
2398 manager_clear_jobs_and_units(m);
2399
2400 /* Find new unit paths */
2401 lookup_paths_free(&m->lookup_paths);
2402 if ((q = lookup_paths_init(&m->lookup_paths, m->running_as)) < 0)
2403 r = q;
2404
2405 /* First, enumerate what we can from all config files */
2406 if ((q = manager_enumerate(m)) < 0)
2407 r = q;
2408
2409 /* Second, deserialize our stored data */
2410 if ((q = manager_deserialize(m, f, fds)) < 0)
2411 r = q;
2412
2413 fclose(f);
2414 f = NULL;
2415
2416 /* Third, fire things up! */
2417 if ((q = manager_coldplug(m)) < 0)
2418 r = q;
2419
2420 finish:
2421 if (f)
2422 fclose(f);
2423
2424 if (fds)
2425 fdset_free(fds);
2426
2427 return r;
2428 }
2429
2430 bool manager_is_booting_or_shutting_down(Manager *m) {
2431 Unit *u;
2432
2433 assert(m);
2434
2435 /* Is the initial job still around? */
2436 if (manager_get_job(m, 1))
2437 return true;
2438
2439 /* Is there a job for the shutdown target? */
2440 if (((u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET))))
2441 return !!u->meta.job;
2442
2443 return false;
2444 }
2445
2446 static const char* const manager_running_as_table[_MANAGER_RUNNING_AS_MAX] = {
2447 [MANAGER_SYSTEM] = "system",
2448 [MANAGER_SESSION] = "session"
2449 };
2450
2451 DEFINE_STRING_TABLE_LOOKUP(manager_running_as, ManagerRunningAs);