1 /*-*- Mode: C; c-basic-offset: 8 -*-*/
4 This file is part of systemd.
6 Copyright 2010 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include <sys/epoll.h>
27 #include <sys/signalfd.h>
32 #include <sys/reboot.h>
33 #include <sys/ioctl.h>
35 #include <libcgroup.h>
38 #include <sys/types.h>
47 #include "ratelimit.h"
49 #include "mount-setup.h"
50 #include "utmp-wtmp.h"
51 #include "unit-name.h"
52 #include "dbus-unit.h"
55 /* As soon as 16 units are in our GC queue, make sure to run a gc sweep */
56 #define GC_QUEUE_ENTRIES_MAX 16
58 /* As soon as 5s passed since a unit was added to our GC queue, make sure to run a gc sweep */
59 #define GC_QUEUE_USEC_MAX (5*USEC_PER_SEC)
61 static int enable_special_signals(Manager
*m
) {
66 /* Enable that we get SIGINT on control-alt-del */
67 if (reboot(RB_DISABLE_CAD
) < 0)
68 log_warning("Failed to enable ctrl-alt-del handling: %m");
70 if ((fd
= open_terminal("/dev/tty0", O_RDWR
)) < 0)
71 log_warning("Failed to open /dev/tty0: %m");
73 /* Enable that we get SIGWINCH on kbrequest */
74 if (ioctl(fd
, KDSIGACCEPT
, SIGWINCH
) < 0)
75 log_warning("Failed to enable kbrequest handling: %s", strerror(errno
));
77 close_nointr_nofail(fd
);
83 static int manager_setup_signals(Manager
*m
) {
85 struct epoll_event ev
;
90 /* We are not interested in SIGSTOP and friends. */
92 sa
.sa_handler
= SIG_DFL
;
93 sa
.sa_flags
= SA_NOCLDSTOP
|SA_RESTART
;
94 assert_se(sigaction(SIGCHLD
, &sa
, NULL
) == 0);
96 assert_se(sigemptyset(&mask
) == 0);
97 assert_se(sigaddset(&mask
, SIGCHLD
) == 0);
98 assert_se(sigaddset(&mask
, SIGTERM
) == 0);
99 assert_se(sigaddset(&mask
, SIGHUP
) == 0);
100 assert_se(sigaddset(&mask
, SIGUSR1
) == 0);
101 assert_se(sigaddset(&mask
, SIGUSR2
) == 0);
102 assert_se(sigaddset(&mask
, SIGINT
) == 0); /* Kernel sends us this on control-alt-del */
103 assert_se(sigaddset(&mask
, SIGWINCH
) == 0); /* Kernel sends us this on kbrequest (alt-arrowup) */
104 assert_se(sigaddset(&mask
, SIGPWR
) == 0); /* Some kernel drivers and upsd send us this on power failure */
105 assert_se(sigprocmask(SIG_SETMASK
, &mask
, NULL
) == 0);
107 m
->signal_watch
.type
= WATCH_SIGNAL
;
108 if ((m
->signal_watch
.fd
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
)) < 0)
113 ev
.data
.ptr
= &m
->signal_watch
;
115 if (epoll_ctl(m
->epoll_fd
, EPOLL_CTL_ADD
, m
->signal_watch
.fd
, &ev
) < 0)
118 if (m
->running_as
== MANAGER_INIT
)
119 return enable_special_signals(m
);
124 static char** session_dirs(void) {
125 const char *home
, *e
;
126 char *config_home
= NULL
, *data_home
= NULL
;
127 char **config_dirs
= NULL
, **data_dirs
= NULL
;
128 char **r
= NULL
, **t
;
130 /* Implement the mechanisms defined in
132 * http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html
134 * We look in both the config and the data dirs because we
135 * want to encourage that distributors ship their unit files
136 * as data, and allow overriding as configuration.
139 home
= getenv("HOME");
141 if ((e
= getenv("XDG_CONFIG_HOME"))) {
142 if (asprintf(&config_home
, "%s/systemd/session", e
) < 0)
146 if (asprintf(&config_home
, "%s/.config/systemd/session", home
) < 0)
150 if ((e
= getenv("XDG_CONFIG_DIRS")))
151 config_dirs
= strv_split(e
, ":");
153 config_dirs
= strv_new("/etc/xdg", NULL
);
158 if ((e
= getenv("XDG_DATA_HOME"))) {
159 if (asprintf(&data_home
, "%s/systemd/session", e
) < 0)
163 if (asprintf(&data_home
, "%s/.local/share/systemd/session", home
) < 0)
167 if ((e
= getenv("XDG_DATA_DIRS")))
168 data_dirs
= strv_split(e
, ":");
170 data_dirs
= strv_new("/usr/local/share", "/usr/share", NULL
);
175 /* Now merge everything we found. */
177 if (!(t
= strv_append(r
, config_home
)))
183 if (!(t
= strv_merge_concat(r
, config_dirs
, "/systemd/session")))
188 if (!(t
= strv_append(r
, SESSION_CONFIG_UNIT_PATH
)))
194 if (!(t
= strv_append(r
, data_home
)))
200 if (!(t
= strv_merge_concat(r
, data_dirs
, "/systemd/session")))
205 if (!(t
= strv_append(r
, SESSION_DATA_UNIT_PATH
)))
210 if (!strv_path_make_absolute_cwd(r
))
215 strv_free(config_dirs
);
217 strv_free(data_dirs
);
227 static int manager_find_paths(Manager
*m
) {
233 /* First priority is whatever has been passed to us via env
235 if ((e
= getenv("SYSTEMD_UNIT_PATH")))
236 if (!(m
->unit_path
= split_path_and_make_absolute(e
)))
239 if (strv_isempty(m
->unit_path
)) {
241 /* Nothing is set, so let's figure something out. */
242 strv_free(m
->unit_path
);
244 if (m
->running_as
== MANAGER_SESSION
) {
245 if (!(m
->unit_path
= session_dirs()))
248 if (!(m
->unit_path
= strv_new(
249 SYSTEM_CONFIG_UNIT_PATH
, /* /etc/systemd/system/ */
250 SYSTEM_DATA_UNIT_PATH
, /* /lib/systemd/system/ */
255 if (m
->running_as
== MANAGER_INIT
) {
256 /* /etc/init.d/ compativility does not matter to users */
258 if ((e
= getenv("SYSTEMD_SYSVINIT_PATH")))
259 if (!(m
->sysvinit_path
= split_path_and_make_absolute(e
)))
262 if (strv_isempty(m
->sysvinit_path
)) {
263 strv_free(m
->sysvinit_path
);
265 if (!(m
->sysvinit_path
= strv_new(
266 SYSTEM_SYSVINIT_PATH
, /* /etc/init.d/ */
271 if ((e
= getenv("SYSTEMD_SYSVRCND_PATH")))
272 if (!(m
->sysvrcnd_path
= split_path_and_make_absolute(e
)))
275 if (strv_isempty(m
->sysvrcnd_path
)) {
276 strv_free(m
->sysvrcnd_path
);
278 if (!(m
->sysvrcnd_path
= strv_new(
279 SYSTEM_SYSVRCND_PATH
, /* /etc/rcN.d/ */
285 strv_uniq(m
->unit_path
);
286 strv_uniq(m
->sysvinit_path
);
287 strv_uniq(m
->sysvrcnd_path
);
289 assert(!strv_isempty(m
->unit_path
));
290 if (!(t
= strv_join(m
->unit_path
, "\n\t")))
292 log_debug("Looking for unit files in:\n\t%s", t
);
295 if (!strv_isempty(m
->sysvinit_path
)) {
297 if (!(t
= strv_join(m
->sysvinit_path
, "\n\t")))
300 log_debug("Looking for SysV init scripts in:\n\t%s", t
);
303 log_debug("Ignoring SysV init scripts.");
305 if (!strv_isempty(m
->sysvrcnd_path
)) {
307 if (!(t
= strv_join(m
->sysvrcnd_path
, "\n\t")))
310 log_debug("Looking for SysV rcN.d links in:\n\t%s", t
);
313 log_debug("Ignoring SysV rcN.d links.");
318 int manager_new(ManagerRunningAs running_as
, bool confirm_spawn
, Manager
**_m
) {
323 assert(running_as
>= 0);
324 assert(running_as
< _MANAGER_RUNNING_AS_MAX
);
326 if (!(m
= new0(Manager
, 1)))
329 m
->boot_timestamp
= now(CLOCK_REALTIME
);
331 m
->running_as
= running_as
;
332 m
->confirm_spawn
= confirm_spawn
;
333 m
->name_data_slot
= -1;
334 m
->exit_code
= _MANAGER_EXIT_CODE_INVALID
;
336 m
->signal_watch
.fd
= m
->mount_watch
.fd
= m
->udev_watch
.fd
= m
->epoll_fd
= m
->dev_autofs_fd
= -1;
337 m
->current_job_id
= 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
339 if (!(m
->units
= hashmap_new(string_hash_func
, string_compare_func
)))
342 if (!(m
->jobs
= hashmap_new(trivial_hash_func
, trivial_compare_func
)))
345 if (!(m
->transaction_jobs
= hashmap_new(trivial_hash_func
, trivial_compare_func
)))
348 if (!(m
->watch_pids
= hashmap_new(trivial_hash_func
, trivial_compare_func
)))
351 if (!(m
->cgroup_bondings
= hashmap_new(string_hash_func
, string_compare_func
)))
354 if (!(m
->watch_bus
= hashmap_new(string_hash_func
, string_compare_func
)))
357 if ((m
->epoll_fd
= epoll_create1(EPOLL_CLOEXEC
)) < 0)
360 if ((r
= manager_find_paths(m
)) < 0)
363 if ((r
= manager_setup_signals(m
)) < 0)
366 if ((r
= manager_setup_cgroup(m
)) < 0)
369 /* Try to connect to the busses, if possible. */
370 if ((r
= bus_init_system(m
)) < 0 ||
371 (r
= bus_init_api(m
)) < 0)
382 static unsigned manager_dispatch_cleanup_queue(Manager
*m
) {
388 while ((meta
= m
->cleanup_queue
)) {
389 assert(meta
->in_cleanup_queue
);
391 unit_free(UNIT(meta
));
399 GC_OFFSET_IN_PATH
, /* This one is on the path we were travelling */
400 GC_OFFSET_UNSURE
, /* No clue */
401 GC_OFFSET_GOOD
, /* We still need this unit */
402 GC_OFFSET_BAD
, /* We don't need this unit anymore */
406 static void unit_gc_sweep(Unit
*u
, unsigned gc_marker
) {
413 if (u
->meta
.gc_marker
== gc_marker
+ GC_OFFSET_GOOD
||
414 u
->meta
.gc_marker
== gc_marker
+ GC_OFFSET_BAD
||
415 u
->meta
.gc_marker
== gc_marker
+ GC_OFFSET_IN_PATH
)
418 if (u
->meta
.in_cleanup_queue
)
421 if (unit_check_gc(u
))
424 u
->meta
.gc_marker
= gc_marker
+ GC_OFFSET_IN_PATH
;
428 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REFERENCED_BY
], i
) {
429 unit_gc_sweep(other
, gc_marker
);
431 if (other
->meta
.gc_marker
== gc_marker
+ GC_OFFSET_GOOD
)
434 if (other
->meta
.gc_marker
!= gc_marker
+ GC_OFFSET_BAD
)
441 /* We were unable to find anything out about this entry, so
442 * let's investigate it later */
443 u
->meta
.gc_marker
= gc_marker
+ GC_OFFSET_UNSURE
;
444 unit_add_to_gc_queue(u
);
448 /* We definitely know that this one is not useful anymore, so
449 * let's mark it for deletion */
450 u
->meta
.gc_marker
= gc_marker
+ GC_OFFSET_BAD
;
451 unit_add_to_cleanup_queue(u
);
455 u
->meta
.gc_marker
= gc_marker
+ GC_OFFSET_GOOD
;
458 static unsigned manager_dispatch_gc_queue(Manager
*m
) {
465 if ((m
->n_in_gc_queue
< GC_QUEUE_ENTRIES_MAX
) &&
466 (m
->gc_queue_timestamp
<= 0 ||
467 (m
->gc_queue_timestamp
+ GC_QUEUE_USEC_MAX
) > now(CLOCK_MONOTONIC
)))
470 log_debug("Running GC...");
472 m
->gc_marker
+= _GC_OFFSET_MAX
;
473 if (m
->gc_marker
+ _GC_OFFSET_MAX
<= _GC_OFFSET_MAX
)
476 gc_marker
= m
->gc_marker
;
478 while ((meta
= m
->gc_queue
)) {
479 assert(meta
->in_gc_queue
);
481 unit_gc_sweep(UNIT(meta
), gc_marker
);
483 LIST_REMOVE(Meta
, gc_queue
, m
->gc_queue
, meta
);
484 meta
->in_gc_queue
= false;
488 if (meta
->gc_marker
== gc_marker
+ GC_OFFSET_BAD
||
489 meta
->gc_marker
== gc_marker
+ GC_OFFSET_UNSURE
) {
490 log_debug("Collecting %s", meta
->id
);
491 meta
->gc_marker
= gc_marker
+ GC_OFFSET_BAD
;
492 unit_add_to_cleanup_queue(UNIT(meta
));
496 m
->n_in_gc_queue
= 0;
497 m
->gc_queue_timestamp
= 0;
502 static void manager_clear_jobs_and_units(Manager
*m
) {
508 while ((j
= hashmap_first(m
->transaction_jobs
)))
511 while ((u
= hashmap_first(m
->units
)))
515 void manager_free(Manager
*m
) {
520 manager_clear_jobs_and_units(m
);
522 for (c
= 0; c
< _UNIT_TYPE_MAX
; c
++)
523 if (unit_vtable
[c
]->shutdown
)
524 unit_vtable
[c
]->shutdown(m
);
526 /* If we reexecute ourselves, we keep the root cgroup
528 manager_shutdown_cgroup(m
, m
->exit_code
!= MANAGER_REEXECUTE
);
533 hashmap_free(m
->units
);
534 hashmap_free(m
->jobs
);
535 hashmap_free(m
->transaction_jobs
);
536 hashmap_free(m
->watch_pids
);
537 hashmap_free(m
->watch_bus
);
539 if (m
->epoll_fd
>= 0)
540 close_nointr_nofail(m
->epoll_fd
);
541 if (m
->signal_watch
.fd
>= 0)
542 close_nointr_nofail(m
->signal_watch
.fd
);
544 strv_free(m
->unit_path
);
545 strv_free(m
->sysvinit_path
);
546 strv_free(m
->sysvrcnd_path
);
548 free(m
->cgroup_controller
);
549 free(m
->cgroup_hierarchy
);
551 hashmap_free(m
->cgroup_bondings
);
556 int manager_enumerate(Manager
*m
) {
562 /* Let's ask every type to load all units from disk/kernel
563 * that it might know */
564 for (c
= 0; c
< _UNIT_TYPE_MAX
; c
++)
565 if (unit_vtable
[c
]->enumerate
)
566 if ((q
= unit_vtable
[c
]->enumerate(m
)) < 0)
569 manager_dispatch_load_queue(m
);
573 int manager_coldplug(Manager
*m
) {
581 /* Then, let's set up their initial state. */
582 HASHMAP_FOREACH_KEY(u
, k
, m
->units
, i
) {
588 if (UNIT_VTABLE(u
)->coldplug
)
589 if ((q
= UNIT_VTABLE(u
)->coldplug(u
)) < 0)
596 int manager_startup(Manager
*m
, FILE *serialization
, FDSet
*fds
) {
601 /* First, enumerate what we can from all config files */
602 r
= manager_enumerate(m
);
604 /* Second, deserialize if there is something to deserialize */
606 if ((q
= manager_deserialize(m
, serialization
, fds
)) < 0)
609 /* Third, fire things up! */
610 if ((q
= manager_coldplug(m
)) < 0)
613 /* Now that the initial devices are available, let's see if we
614 * can write the utmp file */
615 manager_write_utmp_reboot(m
);
620 static void transaction_delete_job(Manager
*m
, Job
*j
, bool delete_dependencies
) {
624 /* Deletes one job from the transaction */
626 manager_transaction_unlink_job(m
, j
, delete_dependencies
);
632 static void transaction_delete_unit(Manager
*m
, Unit
*u
) {
635 /* Deletes all jobs associated with a certain unit from the
638 while ((j
= hashmap_get(m
->transaction_jobs
, u
)))
639 transaction_delete_job(m
, j
, true);
642 static void transaction_clean_dependencies(Manager
*m
) {
648 /* Drops all dependencies of all installed jobs */
650 HASHMAP_FOREACH(j
, m
->jobs
, i
) {
651 while (j
->subject_list
)
652 job_dependency_free(j
->subject_list
);
653 while (j
->object_list
)
654 job_dependency_free(j
->object_list
);
657 assert(!m
->transaction_anchor
);
660 static void transaction_abort(Manager
*m
) {
665 while ((j
= hashmap_first(m
->transaction_jobs
)))
667 transaction_delete_job(m
, j
, true);
671 assert(hashmap_isempty(m
->transaction_jobs
));
673 transaction_clean_dependencies(m
);
676 static void transaction_find_jobs_that_matter_to_anchor(Manager
*m
, Job
*j
, unsigned generation
) {
681 /* A recursive sweep through the graph that marks all units
682 * that matter to the anchor job, i.e. are directly or
683 * indirectly a dependency of the anchor job via paths that
684 * are fully marked as mattering. */
689 l
= m
->transaction_anchor
;
691 LIST_FOREACH(subject
, l
, l
) {
693 /* This link does not matter */
697 /* This unit has already been marked */
698 if (l
->object
->generation
== generation
)
701 l
->object
->matters_to_anchor
= true;
702 l
->object
->generation
= generation
;
704 transaction_find_jobs_that_matter_to_anchor(m
, l
->object
, generation
);
708 static void transaction_merge_and_delete_job(Manager
*m
, Job
*j
, Job
*other
, JobType t
) {
709 JobDependency
*l
, *last
;
713 assert(j
->unit
== other
->unit
);
714 assert(!j
->installed
);
716 /* Merges 'other' into 'j' and then deletes j. */
719 j
->state
= JOB_WAITING
;
720 j
->override
= j
->override
|| other
->override
;
722 j
->matters_to_anchor
= j
->matters_to_anchor
|| other
->matters_to_anchor
;
724 /* Patch us in as new owner of the JobDependency objects */
726 LIST_FOREACH(subject
, l
, other
->subject_list
) {
727 assert(l
->subject
== other
);
732 /* Merge both lists */
734 last
->subject_next
= j
->subject_list
;
736 j
->subject_list
->subject_prev
= last
;
737 j
->subject_list
= other
->subject_list
;
740 /* Patch us in as new owner of the JobDependency objects */
742 LIST_FOREACH(object
, l
, other
->object_list
) {
743 assert(l
->object
== other
);
748 /* Merge both lists */
750 last
->object_next
= j
->object_list
;
752 j
->object_list
->object_prev
= last
;
753 j
->object_list
= other
->object_list
;
756 /* Kill the other job */
757 other
->subject_list
= NULL
;
758 other
->object_list
= NULL
;
759 transaction_delete_job(m
, other
, true);
762 static int delete_one_unmergeable_job(Manager
*m
, Job
*j
) {
767 /* Tries to delete one item in the linked list
768 * j->transaction_next->transaction_next->... that conflicts
769 * whith another one, in an attempt to make an inconsistent
770 * transaction work. */
772 /* We rely here on the fact that if a merged with b does not
773 * merge with c, either a or b merge with c neither */
774 LIST_FOREACH(transaction
, j
, j
)
775 LIST_FOREACH(transaction
, k
, j
->transaction_next
) {
778 /* Is this one mergeable? Then skip it */
779 if (job_type_is_mergeable(j
->type
, k
->type
))
782 /* Ok, we found two that conflict, let's see if we can
783 * drop one of them */
784 if (!j
->matters_to_anchor
)
786 else if (!k
->matters_to_anchor
)
791 /* Ok, we can drop one, so let's do so. */
792 log_debug("Trying to fix job merging by deleting job %s/%s", d
->unit
->meta
.id
, job_type_to_string(d
->type
));
793 transaction_delete_job(m
, d
, true);
800 static int transaction_merge_jobs(Manager
*m
) {
807 /* First step, check whether any of the jobs for one specific
808 * task conflict. If so, try to drop one of them. */
809 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
814 LIST_FOREACH(transaction
, k
, j
->transaction_next
) {
815 if ((r
= job_type_merge(&t
, k
->type
)) >= 0)
818 /* OK, we could not merge all jobs for this
819 * action. Let's see if we can get rid of one
822 if ((r
= delete_one_unmergeable_job(m
, j
)) >= 0)
823 /* Ok, we managed to drop one, now
824 * let's ask our callers to call us
825 * again after garbage collecting */
828 /* We couldn't merge anything. Failure */
833 /* Second step, merge the jobs. */
834 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
838 /* Merge all transactions */
839 LIST_FOREACH(transaction
, k
, j
->transaction_next
)
840 assert_se(job_type_merge(&t
, k
->type
) == 0);
842 /* If an active job is mergeable, merge it too */
843 if (j
->unit
->meta
.job
)
844 job_type_merge(&t
, j
->unit
->meta
.job
->type
); /* Might fail. Which is OK */
846 while ((k
= j
->transaction_next
)) {
848 transaction_merge_and_delete_job(m
, k
, j
, t
);
851 transaction_merge_and_delete_job(m
, j
, k
, t
);
854 assert(!j
->transaction_next
);
855 assert(!j
->transaction_prev
);
861 static void transaction_drop_redundant(Manager
*m
) {
866 /* Goes through the transaction and removes all jobs that are
875 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
876 bool changes_something
= false;
879 LIST_FOREACH(transaction
, k
, j
) {
881 if (!job_is_anchor(k
) &&
882 job_type_is_redundant(k
->type
, unit_active_state(k
->unit
)))
885 changes_something
= true;
889 if (changes_something
)
892 log_debug("Found redundant job %s/%s, dropping.", j
->unit
->meta
.id
, job_type_to_string(j
->type
));
893 transaction_delete_job(m
, j
, false);
901 static bool unit_matters_to_anchor(Unit
*u
, Job
*j
) {
903 assert(!j
->transaction_prev
);
905 /* Checks whether at least one of the jobs for this unit
906 * matters to the anchor. */
908 LIST_FOREACH(transaction
, j
, j
)
909 if (j
->matters_to_anchor
)
915 static int transaction_verify_order_one(Manager
*m
, Job
*j
, Job
*from
, unsigned generation
) {
922 assert(!j
->transaction_prev
);
924 /* Does a recursive sweep through the ordering graph, looking
925 * for a cycle. If we find cycle we try to break it. */
927 /* Did we find a cycle? */
928 if (j
->marker
&& j
->generation
== generation
) {
931 /* So, we already have been here. We have a
932 * cycle. Let's try to break it. We go backwards in
933 * our path and try to find a suitable job to
934 * remove. We use the marker to find our way back,
935 * since smart how we are we stored our way back in
938 log_debug("Found ordering cycle on %s/%s", j
->unit
->meta
.id
, job_type_to_string(j
->type
));
940 for (k
= from
; k
; k
= (k
->generation
== generation
? k
->marker
: NULL
)) {
942 log_debug("Walked on cycle path to %s/%s", k
->unit
->meta
.id
, job_type_to_string(k
->type
));
945 !unit_matters_to_anchor(k
->unit
, k
)) {
946 /* Ok, we can drop this one, so let's
948 log_debug("Breaking order cycle by deleting job %s/%s", k
->unit
->meta
.id
, job_type_to_string(k
->type
));
949 transaction_delete_unit(m
, k
->unit
);
953 /* Check if this in fact was the beginning of
959 log_debug("Unable to break cycle");
964 /* Make the marker point to where we come from, so that we can
965 * find our way backwards if we want to break a cycle */
967 j
->generation
= generation
;
969 /* We assume that the the dependencies are bidirectional, and
970 * hence can ignore UNIT_AFTER */
971 SET_FOREACH(u
, j
->unit
->meta
.dependencies
[UNIT_BEFORE
], i
) {
974 /* Is there a job for this unit? */
975 if (!(o
= hashmap_get(m
->transaction_jobs
, u
)))
977 /* Ok, there is no job for this in the
978 * transaction, but maybe there is already one
980 if (!(o
= u
->meta
.job
))
983 if ((r
= transaction_verify_order_one(m
, o
, j
, generation
)) < 0)
987 /* Ok, let's backtrack, and remember that this entry is not on
988 * our path anymore. */
994 static int transaction_verify_order(Manager
*m
, unsigned *generation
) {
1002 /* Check if the ordering graph is cyclic. If it is, try to fix
1003 * that up by dropping one of the jobs. */
1005 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
)
1006 if ((r
= transaction_verify_order_one(m
, j
, NULL
, (*generation
)++)) < 0)
1012 static void transaction_collect_garbage(Manager
*m
) {
1017 /* Drop jobs that are not required by any other job */
1025 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
1029 log_debug("Garbage collecting job %s/%s", j
->unit
->meta
.id
, job_type_to_string(j
->type
));
1030 transaction_delete_job(m
, j
, true);
1038 static int transaction_is_destructive(Manager
*m
) {
1044 /* Checks whether applying this transaction means that
1045 * existing jobs would be replaced */
1047 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
1050 assert(!j
->transaction_prev
);
1051 assert(!j
->transaction_next
);
1053 if (j
->unit
->meta
.job
&&
1054 j
->unit
->meta
.job
!= j
&&
1055 !job_type_is_superset(j
->type
, j
->unit
->meta
.job
->type
))
1062 static void transaction_minimize_impact(Manager
*m
) {
1066 /* Drops all unnecessary jobs that reverse already active jobs
1067 * or that stop a running service. */
1075 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
1076 LIST_FOREACH(transaction
, j
, j
) {
1077 bool stops_running_service
, changes_existing_job
;
1079 /* If it matters, we shouldn't drop it */
1080 if (j
->matters_to_anchor
)
1083 /* Would this stop a running service?
1084 * Would this change an existing job?
1085 * If so, let's drop this entry */
1087 stops_running_service
=
1088 j
->type
== JOB_STOP
&& UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j
->unit
));
1090 changes_existing_job
=
1091 j
->unit
->meta
.job
&& job_type_is_conflicting(j
->type
, j
->unit
->meta
.job
->state
);
1093 if (!stops_running_service
&& !changes_existing_job
)
1096 if (stops_running_service
)
1097 log_debug("%s/%s would stop a running service.", j
->unit
->meta
.id
, job_type_to_string(j
->type
));
1099 if (changes_existing_job
)
1100 log_debug("%s/%s would change existing job.", j
->unit
->meta
.id
, job_type_to_string(j
->type
));
1102 /* Ok, let's get rid of this */
1103 log_debug("Deleting %s/%s to minimize impact.", j
->unit
->meta
.id
, job_type_to_string(j
->type
));
1105 transaction_delete_job(m
, j
, true);
1117 static int transaction_apply(Manager
*m
) {
1122 /* Moves the transaction jobs to the set of active jobs */
1124 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
1126 assert(!j
->transaction_prev
);
1127 assert(!j
->transaction_next
);
1132 if ((r
= hashmap_put(m
->jobs
, UINT32_TO_PTR(j
->id
), j
)) < 0)
1136 while ((j
= hashmap_steal_first(m
->transaction_jobs
))) {
1140 if (j
->unit
->meta
.job
)
1141 job_free(j
->unit
->meta
.job
);
1143 j
->unit
->meta
.job
= j
;
1144 j
->installed
= true;
1146 /* We're fully installed. Now let's free data we don't
1149 assert(!j
->transaction_next
);
1150 assert(!j
->transaction_prev
);
1152 job_add_to_run_queue(j
);
1153 job_add_to_dbus_queue(j
);
1156 /* As last step, kill all remaining job dependencies. */
1157 transaction_clean_dependencies(m
);
1163 HASHMAP_FOREACH(j
, m
->transaction_jobs
, i
) {
1167 hashmap_remove(m
->jobs
, UINT32_TO_PTR(j
->id
));
1173 static int transaction_activate(Manager
*m
, JobMode mode
) {
1175 unsigned generation
= 1;
1179 /* This applies the changes recorded in transaction_jobs to
1180 * the actual list of jobs, if possible. */
1182 /* First step: figure out which jobs matter */
1183 transaction_find_jobs_that_matter_to_anchor(m
, NULL
, generation
++);
1185 /* Second step: Try not to stop any running services if
1186 * we don't have to. Don't try to reverse running
1187 * jobs if we don't have to. */
1188 transaction_minimize_impact(m
);
1190 /* Third step: Drop redundant jobs */
1191 transaction_drop_redundant(m
);
1194 /* Fourth step: Let's remove unneeded jobs that might
1196 transaction_collect_garbage(m
);
1198 /* Fifth step: verify order makes sense and correct
1199 * cycles if necessary and possible */
1200 if ((r
= transaction_verify_order(m
, &generation
)) >= 0)
1204 log_debug("Requested transaction contains an unfixable cyclic ordering dependency: %s", strerror(-r
));
1208 /* Let's see if the resulting transaction ordering
1209 * graph is still cyclic... */
1213 /* Sixth step: let's drop unmergeable entries if
1214 * necessary and possible, merge entries we can
1216 if ((r
= transaction_merge_jobs(m
)) >= 0)
1220 log_debug("Requested transaction contains unmergable jobs: %s", strerror(-r
));
1224 /* Seventh step: an entry got dropped, let's garbage
1225 * collect its dependencies. */
1226 transaction_collect_garbage(m
);
1228 /* Let's see if the resulting transaction still has
1229 * unmergeable entries ... */
1232 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
1233 transaction_drop_redundant(m
);
1235 /* Ninth step: check whether we can actually apply this */
1236 if (mode
== JOB_FAIL
)
1237 if ((r
= transaction_is_destructive(m
)) < 0) {
1238 log_debug("Requested transaction contradicts existing jobs: %s", strerror(-r
));
1242 /* Tenth step: apply changes */
1243 if ((r
= transaction_apply(m
)) < 0) {
1244 log_debug("Failed to apply transaction: %s", strerror(-r
));
1248 assert(hashmap_isempty(m
->transaction_jobs
));
1249 assert(!m
->transaction_anchor
);
1254 transaction_abort(m
);
1258 static Job
* transaction_add_one_job(Manager
*m
, JobType type
, Unit
*unit
, bool override
, bool *is_new
) {
1265 /* Looks for an axisting prospective job and returns that. If
1266 * it doesn't exist it is created and added to the prospective
1269 f
= hashmap_get(m
->transaction_jobs
, unit
);
1271 LIST_FOREACH(transaction
, j
, f
) {
1272 assert(j
->unit
== unit
);
1274 if (j
->type
== type
) {
1281 if (unit
->meta
.job
&& unit
->meta
.job
->type
== type
)
1283 else if (!(j
= job_new(m
, type
, unit
)))
1288 j
->matters_to_anchor
= false;
1289 j
->override
= override
;
1291 LIST_PREPEND(Job
, transaction
, f
, j
);
1293 if ((r
= hashmap_replace(m
->transaction_jobs
, unit
, f
)) < 0) {
1301 log_debug("Added job %s/%s to transaction.", unit
->meta
.id
, job_type_to_string(type
));
1306 void manager_transaction_unlink_job(Manager
*m
, Job
*j
, bool delete_dependencies
) {
1310 if (j
->transaction_prev
)
1311 j
->transaction_prev
->transaction_next
= j
->transaction_next
;
1312 else if (j
->transaction_next
)
1313 hashmap_replace(m
->transaction_jobs
, j
->unit
, j
->transaction_next
);
1315 hashmap_remove_value(m
->transaction_jobs
, j
->unit
, j
);
1317 if (j
->transaction_next
)
1318 j
->transaction_next
->transaction_prev
= j
->transaction_prev
;
1320 j
->transaction_prev
= j
->transaction_next
= NULL
;
1322 while (j
->subject_list
)
1323 job_dependency_free(j
->subject_list
);
1325 while (j
->object_list
) {
1326 Job
*other
= j
->object_list
->matters
? j
->object_list
->subject
: NULL
;
1328 job_dependency_free(j
->object_list
);
1330 if (other
&& delete_dependencies
) {
1331 log_debug("Deleting job %s/%s as dependency of job %s/%s",
1332 other
->unit
->meta
.id
, job_type_to_string(other
->type
),
1333 j
->unit
->meta
.id
, job_type_to_string(j
->type
));
1334 transaction_delete_job(m
, other
, delete_dependencies
);
1339 static int transaction_add_job_and_dependencies(
1354 assert(type
< _JOB_TYPE_MAX
);
1357 if (unit
->meta
.load_state
!= UNIT_LOADED
)
1360 if (!unit_job_is_applicable(unit
, type
))
1363 /* First add the job. */
1364 if (!(ret
= transaction_add_one_job(m
, type
, unit
, override
, &is_new
)))
1367 /* Then, add a link to the job. */
1368 if (!job_dependency_new(by
, ret
, matters
))
1372 /* Finally, recursively add in all dependencies. */
1373 if (type
== JOB_START
|| type
== JOB_RELOAD_OR_START
) {
1374 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_REQUIRES
], i
)
1375 if ((r
= transaction_add_job_and_dependencies(m
, JOB_START
, dep
, ret
, true, override
, NULL
)) < 0 && r
!= -EBADR
)
1378 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_REQUIRES_OVERRIDABLE
], i
)
1379 if ((r
= transaction_add_job_and_dependencies(m
, JOB_START
, dep
, ret
, !override
, override
, NULL
)) < 0 && r
!= -EBADR
)
1380 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep
->meta
.id
, strerror(-r
));
1382 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_WANTS
], i
)
1383 if ((r
= transaction_add_job_and_dependencies(m
, JOB_START
, dep
, ret
, false, false, NULL
)) < 0)
1384 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep
->meta
.id
, strerror(-r
));
1386 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_REQUISITE
], i
)
1387 if ((r
= transaction_add_job_and_dependencies(m
, JOB_VERIFY_ACTIVE
, dep
, ret
, true, override
, NULL
)) < 0 && r
!= -EBADR
)
1390 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_REQUISITE_OVERRIDABLE
], i
)
1391 if ((r
= transaction_add_job_and_dependencies(m
, JOB_VERIFY_ACTIVE
, dep
, ret
, !override
, override
, NULL
)) < 0 && r
!= -EBADR
)
1392 log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep
->meta
.id
, strerror(-r
));
1394 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_CONFLICTS
], i
)
1395 if ((r
= transaction_add_job_and_dependencies(m
, JOB_STOP
, dep
, ret
, true, override
, NULL
)) < 0 && r
!= -EBADR
)
1398 } else if (type
== JOB_STOP
|| type
== JOB_RESTART
|| type
== JOB_TRY_RESTART
) {
1400 SET_FOREACH(dep
, ret
->unit
->meta
.dependencies
[UNIT_REQUIRED_BY
], i
)
1401 if ((r
= transaction_add_job_and_dependencies(m
, type
, dep
, ret
, true, override
, NULL
)) < 0 && r
!= -EBADR
)
1405 /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
1417 static int transaction_add_isolate_jobs(Manager
*m
) {
1425 HASHMAP_FOREACH_KEY(u
, k
, m
->units
, i
) {
1427 /* ignore aliases */
1428 if (u
->meta
.id
!= k
)
1431 if (UNIT_VTABLE(u
)->no_isolate
)
1434 /* No need to stop inactive jobs */
1435 if (unit_active_state(u
) == UNIT_INACTIVE
)
1438 /* Is there already something listed for this? */
1439 if (hashmap_get(m
->transaction_jobs
, u
))
1442 if ((r
= transaction_add_job_and_dependencies(m
, JOB_STOP
, u
, NULL
, true, false, NULL
)) < 0)
1443 log_warning("Cannot add isolate job for unit %s, ignoring: %s", u
->meta
.id
, strerror(-r
));
1449 int manager_add_job(Manager
*m
, JobType type
, Unit
*unit
, JobMode mode
, bool override
, Job
**_ret
) {
1454 assert(type
< _JOB_TYPE_MAX
);
1456 assert(mode
< _JOB_MODE_MAX
);
1458 if (mode
== JOB_ISOLATE
&& type
!= JOB_START
)
1461 log_debug("Trying to enqueue job %s/%s", unit
->meta
.id
, job_type_to_string(type
));
1463 if ((r
= transaction_add_job_and_dependencies(m
, type
, unit
, NULL
, true, override
, &ret
)) < 0) {
1464 transaction_abort(m
);
1468 if (mode
== JOB_ISOLATE
)
1469 if ((r
= transaction_add_isolate_jobs(m
)) < 0) {
1470 transaction_abort(m
);
1474 if ((r
= transaction_activate(m
, mode
)) < 0)
1477 log_debug("Enqueued job %s/%s as %u", unit
->meta
.id
, job_type_to_string(type
), (unsigned) ret
->id
);
1485 int manager_add_job_by_name(Manager
*m
, JobType type
, const char *name
, JobMode mode
, bool override
, Job
**_ret
) {
1490 assert(type
< _JOB_TYPE_MAX
);
1492 assert(mode
< _JOB_MODE_MAX
);
1494 if ((r
= manager_load_unit(m
, name
, NULL
, &unit
)) < 0)
1497 return manager_add_job(m
, type
, unit
, mode
, override
, _ret
);
1500 Job
*manager_get_job(Manager
*m
, uint32_t id
) {
1503 return hashmap_get(m
->jobs
, UINT32_TO_PTR(id
));
1506 Unit
*manager_get_unit(Manager
*m
, const char *name
) {
1510 return hashmap_get(m
->units
, name
);
1513 unsigned manager_dispatch_load_queue(Manager
*m
) {
1519 /* Make sure we are not run recursively */
1520 if (m
->dispatching_load_queue
)
1523 m
->dispatching_load_queue
= true;
1525 /* Dispatches the load queue. Takes a unit from the queue and
1526 * tries to load its data until the queue is empty */
1528 while ((meta
= m
->load_queue
)) {
1529 assert(meta
->in_load_queue
);
1531 unit_load(UNIT(meta
));
1535 m
->dispatching_load_queue
= false;
1539 int manager_load_unit(Manager
*m
, const char *name
, const char *path
, Unit
**_ret
) {
1544 assert(name
|| path
);
1546 /* This will load the service information files, but not actually
1547 * start any services or anything. */
1549 if (path
&& !is_path(path
))
1553 name
= file_name_from_path(path
);
1555 if (!unit_name_is_valid(name
))
1558 if ((ret
= manager_get_unit(m
, name
))) {
1563 if (!(ret
= unit_new(m
)))
1567 if (!(ret
->meta
.fragment_path
= strdup(path
))) {
1572 if ((r
= unit_add_name(ret
, name
)) < 0) {
1577 unit_add_to_load_queue(ret
);
1578 unit_add_to_dbus_queue(ret
);
1580 manager_dispatch_load_queue(m
);
1583 *_ret
= unit_follow_merge(ret
);
1588 void manager_dump_jobs(Manager
*s
, FILE *f
, const char *prefix
) {
1595 HASHMAP_FOREACH(j
, s
->jobs
, i
)
1596 job_dump(j
, f
, prefix
);
1599 void manager_dump_units(Manager
*s
, FILE *f
, const char *prefix
) {
1607 HASHMAP_FOREACH_KEY(u
, t
, s
->units
, i
)
1608 if (u
->meta
.id
== t
)
1609 unit_dump(u
, f
, prefix
);
1612 void manager_clear_jobs(Manager
*m
) {
1617 transaction_abort(m
);
1619 while ((j
= hashmap_first(m
->jobs
)))
1623 unsigned manager_dispatch_run_queue(Manager
*m
) {
1627 if (m
->dispatching_run_queue
)
1630 m
->dispatching_run_queue
= true;
1632 while ((j
= m
->run_queue
)) {
1633 assert(j
->installed
);
1634 assert(j
->in_run_queue
);
1636 job_run_and_invalidate(j
);
1640 m
->dispatching_run_queue
= false;
1644 unsigned manager_dispatch_dbus_queue(Manager
*m
) {
1651 if (m
->dispatching_dbus_queue
)
1654 m
->dispatching_dbus_queue
= true;
1656 while ((meta
= m
->dbus_unit_queue
)) {
1657 assert(meta
->in_dbus_queue
);
1659 bus_unit_send_change_signal(UNIT(meta
));
1663 while ((j
= m
->dbus_job_queue
)) {
1664 assert(j
->in_dbus_queue
);
1666 bus_job_send_change_signal(j
);
1670 m
->dispatching_dbus_queue
= false;
1674 static int manager_dispatch_sigchld(Manager
*m
) {
1683 /* First we call waitd() for a PID and do not reap the
1684 * zombie. That way we can still access /proc/$PID for
1685 * it while it is a zombie. */
1686 if (waitid(P_ALL
, 0, &si
, WEXITED
|WNOHANG
|WNOWAIT
) < 0) {
1688 if (errno
== ECHILD
)
1700 if (si
.si_code
== CLD_EXITED
|| si
.si_code
== CLD_KILLED
|| si
.si_code
== CLD_DUMPED
) {
1703 get_process_name(si
.si_pid
, &name
);
1704 log_debug("Got SIGCHLD for process %llu (%s)", (unsigned long long) si
.si_pid
, strna(name
));
1708 /* And now, we actually reap the zombie. */
1709 if (waitid(P_PID
, si
.si_pid
, &si
, WEXITED
) < 0) {
1716 if (si
.si_code
!= CLD_EXITED
&& si
.si_code
!= CLD_KILLED
&& si
.si_code
!= CLD_DUMPED
)
1719 log_debug("Child %llu died (code=%s, status=%i/%s)",
1720 (long long unsigned) si
.si_pid
,
1721 sigchld_code_to_string(si
.si_code
),
1723 strna(si
.si_code
== CLD_EXITED
? exit_status_to_string(si
.si_status
) : strsignal(si
.si_status
)));
1725 if (!(u
= hashmap_remove(m
->watch_pids
, UINT32_TO_PTR(si
.si_pid
))))
1728 log_debug("Child %llu belongs to %s", (long long unsigned) si
.si_pid
, u
->meta
.id
);
1730 UNIT_VTABLE(u
)->sigchld_event(u
, si
.si_pid
, si
.si_code
, si
.si_status
);
1736 static void manager_start_target(Manager
*m
, const char *name
) {
1739 if ((r
= manager_add_job_by_name(m
, JOB_START
, name
, JOB_REPLACE
, true, NULL
)) < 0)
1740 log_error("Failed to enqueue %s job: %s", name
, strerror(-r
));
1743 static int manager_process_signal_fd(Manager
*m
) {
1745 struct signalfd_siginfo sfsi
;
1746 bool sigchld
= false;
1751 if ((n
= read(m
->signal_watch
.fd
, &sfsi
, sizeof(sfsi
))) != sizeof(sfsi
)) {
1756 if (errno
== EAGAIN
)
1762 switch (sfsi
.ssi_signo
) {
1771 if (m
->running_as
== MANAGER_INIT
) {
1772 manager_start_target(m
, SPECIAL_CTRL_ALT_DEL_TARGET
);
1776 m
->exit_code
= MANAGER_EXIT
;
1781 if (m
->running_as
== MANAGER_INIT
)
1782 manager_start_target(m
, SPECIAL_KBREQUEST_TARGET
);
1784 /* This is a nop on non-init */
1788 if (m
->running_as
== MANAGER_INIT
)
1789 manager_start_target(m
, SPECIAL_SIGPWR_TARGET
);
1791 /* This is a nop on non-init */
1795 manager_dump_units(m
, stdout
, "\t");
1796 manager_dump_jobs(m
, stdout
, "\t");
1802 u
= manager_get_unit(m
, SPECIAL_DBUS_SERVICE
);
1804 if (!u
|| UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
))) {
1805 log_info("Trying to reconnect to bus...");
1810 if (!u
|| !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
))) {
1811 log_info("Loading D-Bus service...");
1812 manager_start_target(m
, SPECIAL_DBUS_SERVICE
);
1819 m
->exit_code
= MANAGER_RELOAD
;
1823 log_info("Got unhandled signal <%s>.", strsignal(sfsi
.ssi_signo
));
1828 return manager_dispatch_sigchld(m
);
1833 static int process_event(Manager
*m
, struct epoll_event
*ev
) {
1840 assert(w
= ev
->data
.ptr
);
1846 /* An incoming signal? */
1847 if (ev
->events
!= EPOLLIN
)
1850 if ((r
= manager_process_signal_fd(m
)) < 0)
1857 /* Some fd event, to be dispatched to the units */
1858 UNIT_VTABLE(w
->data
.unit
)->fd_event(w
->data
.unit
, w
->fd
, ev
->events
, w
);
1865 /* Some timer event, to be dispatched to the units */
1866 if ((k
= read(w
->fd
, &v
, sizeof(v
))) != sizeof(v
)) {
1868 if (k
< 0 && (errno
== EINTR
|| errno
== EAGAIN
))
1871 return k
< 0 ? -errno
: -EIO
;
1874 UNIT_VTABLE(w
->data
.unit
)->timer_event(w
->data
.unit
, v
, w
);
1879 /* Some mount table change, intended for the mount subsystem */
1880 mount_fd_event(m
, ev
->events
);
1884 /* Some notification from udev, intended for the device subsystem */
1885 device_fd_event(m
, ev
->events
);
1888 case WATCH_DBUS_WATCH
:
1889 bus_watch_event(m
, w
, ev
->events
);
1892 case WATCH_DBUS_TIMEOUT
:
1893 bus_timeout_event(m
, w
, ev
->events
);
1897 assert_not_reached("Unknown epoll event type.");
1903 int manager_loop(Manager
*m
) {
1906 RATELIMIT_DEFINE(rl
, 1*USEC_PER_SEC
, 1000);
1909 m
->exit_code
= MANAGER_RUNNING
;
1911 while (m
->exit_code
== MANAGER_RUNNING
) {
1912 struct epoll_event event
;
1915 if (!ratelimit_test(&rl
)) {
1916 /* Yay, something is going seriously wrong, pause a little */
1917 log_warning("Looping too fast. Throttling execution a little.");
1921 if (manager_dispatch_cleanup_queue(m
) > 0)
1924 if (manager_dispatch_gc_queue(m
) > 0)
1927 if (manager_dispatch_load_queue(m
) > 0)
1930 if (manager_dispatch_run_queue(m
) > 0)
1933 if (bus_dispatch(m
) > 0)
1936 if (manager_dispatch_dbus_queue(m
) > 0)
1939 if ((n
= epoll_wait(m
->epoll_fd
, &event
, 1, -1)) < 0) {
1941 if (errno
== -EINTR
)
1949 if ((r
= process_event(m
, &event
)) < 0)
1953 return m
->exit_code
;
1956 int manager_get_unit_from_dbus_path(Manager
*m
, const char *s
, Unit
**_u
) {
1964 if (!startswith(s
, "/org/freedesktop/systemd1/unit/"))
1967 if (!(n
= bus_path_unescape(s
+31)))
1970 u
= manager_get_unit(m
, n
);
1981 int manager_get_job_from_dbus_path(Manager
*m
, const char *s
, Job
**_j
) {
1990 if (!startswith(s
, "/org/freedesktop/systemd1/job/"))
1993 if ((r
= safe_atou(s
+ 30, &id
)) < 0)
1996 if (!(j
= manager_get_job(m
, id
)))
2004 static bool manager_utmp_good(Manager
*m
) {
2009 if ((r
= mount_path_is_mounted(m
, _PATH_UTMPX
)) <= 0) {
2012 log_warning("Failed to determine whether " _PATH_UTMPX
" is mounted: %s", strerror(-r
));
2020 void manager_write_utmp_reboot(Manager
*m
) {
2025 if (m
->utmp_reboot_written
)
2028 if (m
->running_as
!= MANAGER_INIT
)
2031 if (!manager_utmp_good(m
))
2034 if ((r
= utmp_put_reboot(m
->boot_timestamp
)) < 0) {
2036 if (r
!= -ENOENT
&& r
!= -EROFS
)
2037 log_warning("Failed to write utmp/wtmp: %s", strerror(-r
));
2042 m
->utmp_reboot_written
= true;
2045 void manager_write_utmp_runlevel(Manager
*m
, Unit
*u
) {
2051 if (u
->meta
.type
!= UNIT_TARGET
)
2054 if (m
->running_as
!= MANAGER_INIT
)
2057 if (!manager_utmp_good(m
))
2060 if ((runlevel
= target_get_runlevel(TARGET(u
))) <= 0)
2063 if ((r
= utmp_put_runlevel(0, runlevel
, 0)) < 0) {
2065 if (r
!= -ENOENT
&& r
!= -EROFS
)
2066 log_warning("Failed to write utmp/wtmp: %s", strerror(-r
));
2070 void manager_dispatch_bus_name_owner_changed(
2073 const char* old_owner
,
2074 const char *new_owner
) {
2081 if (!(u
= hashmap_get(m
->watch_bus
, name
)))
2084 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
2087 void manager_dispatch_bus_query_pid_done(
2098 if (!(u
= hashmap_get(m
->watch_bus
, name
)))
2101 UNIT_VTABLE(u
)->bus_query_pid_done(u
, name
, pid
);
2104 int manager_open_serialization(FILE **_f
) {
2112 if (asprintf(&path
, "/dev/shm/systemd-%u.dump-XXXXXX", (unsigned) getpid()) < 0)
2115 saved_umask
= umask(0077);
2116 fd
= mkostemp(path
, O_RDWR
|O_CLOEXEC
);
2126 log_debug("Serializing state to %s", path
);
2129 if (!(f
= fdopen(fd
, "w+")) < 0)
2137 int manager_serialize(Manager
*m
, FILE *f
, FDSet
*fds
) {
2147 HASHMAP_FOREACH_KEY(u
, t
, m
->units
, i
) {
2148 if (u
->meta
.id
!= t
)
2151 if (!unit_can_serialize(u
))
2155 fputs(u
->meta
.id
, f
);
2158 if ((r
= unit_serialize(u
, f
, fds
)) < 0)
2168 int manager_deserialize(Manager
*m
, FILE *f
, FDSet
*fds
) {
2174 log_debug("Deserializing state...");
2178 char name
[UNIT_NAME_MAX
+2];
2181 if (!fgets(name
, sizeof(name
), f
)) {
2190 if ((r
= manager_load_unit(m
, strstrip(name
), NULL
, &u
)) < 0)
2193 if ((r
= unit_deserialize(u
, f
, fds
)) < 0)
2203 int manager_reload(Manager
*m
) {
2210 if ((r
= manager_open_serialization(&f
)) < 0)
2213 if (!(fds
= fdset_new())) {
2218 if ((r
= manager_serialize(m
, f
, fds
)) < 0)
2221 if (fseeko(f
, 0, SEEK_SET
) < 0) {
2226 /* From here on there is no way back. */
2227 manager_clear_jobs_and_units(m
);
2229 /* First, enumerate what we can from all config files */
2230 if ((q
= manager_enumerate(m
)) < 0)
2233 /* Second, deserialize our stored data */
2234 if ((q
= manager_deserialize(m
, f
, fds
)) < 0)
2240 /* Third, fire things up! */
2241 if ((q
= manager_coldplug(m
)) < 0)
2254 static const char* const manager_running_as_table
[_MANAGER_RUNNING_AS_MAX
] = {
2255 [MANAGER_INIT
] = "init",
2256 [MANAGER_SYSTEM
] = "system",
2257 [MANAGER_SESSION
] = "session"
2260 DEFINE_STRING_TABLE_LOOKUP(manager_running_as
, ManagerRunningAs
);