1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
4 #include "sd-messages.h"
6 #include "alloc-util.h"
7 #include "ansi-color.h"
8 #include "bus-common-errors.h"
10 #include "dbus-unit.h"
14 #include "string-util.h"
16 #include "transaction.h"
18 static void transaction_unlink_job(Transaction
*tr
, Job
*j
, bool delete_dependencies
);
20 static void transaction_delete_job(Transaction
*tr
, Job
*j
, bool delete_dependencies
) {
24 /* Deletes one job from the transaction */
26 transaction_unlink_job(tr
, j
, delete_dependencies
);
31 static void transaction_delete_unit(Transaction
*tr
, Unit
*u
) {
34 /* Deletes all jobs associated with a certain unit from the
37 while ((j
= hashmap_get(tr
->jobs
, u
)))
38 transaction_delete_job(tr
, j
, true);
41 static void transaction_abort(Transaction
*tr
) {
46 while ((j
= hashmap_first(tr
->jobs
)))
47 transaction_delete_job(tr
, j
, false);
49 assert(hashmap_isempty(tr
->jobs
));
52 static void transaction_find_jobs_that_matter_to_anchor(Job
*j
, unsigned generation
) {
55 /* A recursive sweep through the graph that marks all units
56 * that matter to the anchor job, i.e. are directly or
57 * indirectly a dependency of the anchor job via paths that
58 * are fully marked as mattering. */
60 j
->matters_to_anchor
= true;
61 j
->generation
= generation
;
63 LIST_FOREACH(subject
, l
, j
->subject_list
) {
65 /* This link does not matter */
69 /* This unit has already been marked */
70 if (l
->object
->generation
== generation
)
73 transaction_find_jobs_that_matter_to_anchor(l
->object
, generation
);
77 static void transaction_merge_and_delete_job(Transaction
*tr
, Job
*j
, Job
*other
, JobType t
) {
82 assert(j
->unit
== other
->unit
);
83 assert(!j
->installed
);
85 /* Merges 'other' into 'j' and then deletes 'other'. */
88 j
->state
= JOB_WAITING
;
89 j
->irreversible
= j
->irreversible
|| other
->irreversible
;
90 j
->matters_to_anchor
= j
->matters_to_anchor
|| other
->matters_to_anchor
;
92 /* Patch us in as new owner of the JobDependency objects */
94 LIST_FOREACH(subject
, l
, other
->subject_list
) {
95 assert(l
->subject
== other
);
100 /* Merge both lists */
102 last
->subject_next
= j
->subject_list
;
104 j
->subject_list
->subject_prev
= last
;
105 j
->subject_list
= other
->subject_list
;
108 /* Patch us in as new owner of the JobDependency objects */
110 LIST_FOREACH(object
, l
, other
->object_list
) {
111 assert(l
->object
== other
);
116 /* Merge both lists */
118 last
->object_next
= j
->object_list
;
120 j
->object_list
->object_prev
= last
;
121 j
->object_list
= other
->object_list
;
124 /* Kill the other job */
125 other
->subject_list
= NULL
;
126 other
->object_list
= NULL
;
127 transaction_delete_job(tr
, other
, true);
130 static bool job_is_conflicted_by(Job
*j
) {
133 /* Returns true if this job is pulled in by a least one
134 * ConflictedBy dependency. */
136 LIST_FOREACH(object
, l
, j
->object_list
)
143 static int delete_one_unmergeable_job(Transaction
*tr
, Job
*job
) {
146 /* Tries to delete one item in the linked list
147 * j->transaction_next->transaction_next->... that conflicts
148 * with another one, in an attempt to make an inconsistent
149 * transaction work. */
151 /* We rely here on the fact that if a merged with b does not
152 * merge with c, either a or b merge with c neither */
153 LIST_FOREACH(transaction
, j
, job
)
154 LIST_FOREACH(transaction
, k
, j
->transaction_next
) {
157 /* Is this one mergeable? Then skip it */
158 if (job_type_is_mergeable(j
->type
, k
->type
))
161 /* Ok, we found two that conflict, let's see if we can
162 * drop one of them */
163 if (!j
->matters_to_anchor
&& !k
->matters_to_anchor
) {
165 /* Both jobs don't matter, so let's
166 * find the one that is smarter to
167 * remove. Let's think positive and
168 * rather remove stops then starts --
169 * except if something is being
170 * stopped because it is conflicted by
171 * another unit in which case we
172 * rather remove the start. */
174 log_unit_debug(j
->unit
,
175 "Looking at job %s/%s conflicted_by=%s",
176 j
->unit
->id
, job_type_to_string(j
->type
),
177 yes_no(j
->type
== JOB_STOP
&& job_is_conflicted_by(j
)));
178 log_unit_debug(k
->unit
,
179 "Looking at job %s/%s conflicted_by=%s",
180 k
->unit
->id
, job_type_to_string(k
->type
),
181 yes_no(k
->type
== JOB_STOP
&& job_is_conflicted_by(k
)));
183 if (j
->type
== JOB_STOP
) {
185 if (job_is_conflicted_by(j
))
190 } else if (k
->type
== JOB_STOP
) {
192 if (job_is_conflicted_by(k
))
199 } else if (!j
->matters_to_anchor
)
201 else if (!k
->matters_to_anchor
)
206 /* Ok, we can drop one, so let's do so. */
207 log_unit_debug(d
->unit
,
208 "Fixing conflicting jobs %s/%s,%s/%s by deleting job %s/%s",
209 j
->unit
->id
, job_type_to_string(j
->type
),
210 k
->unit
->id
, job_type_to_string(k
->type
),
211 d
->unit
->id
, job_type_to_string(d
->type
));
212 transaction_delete_job(tr
, d
, true);
219 static int transaction_merge_jobs(Transaction
*tr
, sd_bus_error
*e
) {
225 /* First step, check whether any of the jobs for one specific
226 * task conflict. If so, try to drop one of them. */
227 HASHMAP_FOREACH(j
, tr
->jobs
) {
231 LIST_FOREACH(transaction
, k
, j
->transaction_next
) {
232 if (job_type_merge_and_collapse(&t
, k
->type
, j
->unit
) >= 0)
235 /* OK, we could not merge all jobs for this
236 * action. Let's see if we can get rid of one
239 r
= delete_one_unmergeable_job(tr
, j
);
241 /* Ok, we managed to drop one, now
242 * let's ask our callers to call us
243 * again after garbage collecting */
246 /* We couldn't merge anything. Failure */
247 return sd_bus_error_setf(e
, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING
,
248 "Transaction contains conflicting jobs '%s' and '%s' for %s. "
249 "Probably contradicting requirement dependencies configured.",
250 job_type_to_string(t
),
251 job_type_to_string(k
->type
),
256 /* Second step, merge the jobs. */
257 HASHMAP_FOREACH(j
, tr
->jobs
) {
260 /* Merge all transaction jobs for j->unit */
261 LIST_FOREACH(transaction
, k
, j
->transaction_next
)
262 assert_se(job_type_merge_and_collapse(&t
, k
->type
, j
->unit
) == 0);
265 while ((k
= j
->transaction_next
)) {
266 if (tr
->anchor_job
== k
) {
267 transaction_merge_and_delete_job(tr
, k
, j
, t
);
270 transaction_merge_and_delete_job(tr
, j
, k
, t
);
273 assert(!j
->transaction_next
);
274 assert(!j
->transaction_prev
);
280 static void transaction_drop_redundant(Transaction
*tr
) {
283 /* Goes through the transaction and removes all jobs of the units whose jobs are all noops. If not
284 * all of a unit's jobs are redundant, they are kept. */
293 HASHMAP_FOREACH(j
, tr
->jobs
) {
296 LIST_FOREACH(transaction
, k
, j
)
297 if (tr
->anchor_job
== k
||
298 !job_type_is_redundant(k
->type
, unit_active_state(k
->unit
)) ||
299 (k
->unit
->job
&& job_type_is_conflicting(k
->type
, k
->unit
->job
->type
))) {
305 log_trace("Found redundant job %s/%s, dropping from transaction.",
306 j
->unit
->id
, job_type_to_string(j
->type
));
307 transaction_delete_job(tr
, j
, false);
315 static bool job_matters_to_anchor(Job
*job
) {
317 assert(!job
->transaction_prev
);
319 /* Checks whether at least one of the jobs for this transaction matters to the anchor. */
321 LIST_FOREACH(transaction
, j
, job
)
322 if (j
->matters_to_anchor
)
328 static int transaction_verify_order_one(Transaction
*tr
, Job
*j
, Job
*from
, unsigned generation
, sd_bus_error
*e
) {
330 static const UnitDependencyAtom directions
[] = {
339 assert(!j
->transaction_prev
);
341 /* Does a recursive sweep through the ordering graph, looking for a cycle. If we find a cycle we try
344 /* Have we seen this before? */
345 if (j
->generation
== generation
) {
346 _cleanup_free_
char **array
= NULL
;
349 /* If the marker is NULL we have been here already and decided the job was loop-free from
350 * here. Hence shortcut things and return right-away. */
354 /* So, the marker is not NULL and we already have been here. We have a cycle. Let's try to
355 * break it. We go backwards in our path and try to find a suitable job to remove. We use the
356 * marker to find our way back, since smart how we are we stored our way back in there. */
357 for (Job
*k
= from
; k
; k
= (k
->generation
== generation
&& k
->marker
!= k
) ? k
->marker
: NULL
) {
359 /* For logging below */
360 if (strv_push_pair(&array
, k
->unit
->id
, (char*) job_type_to_string(k
->type
)) < 0)
361 (void) log_oom_warning();
363 if (!delete && hashmap_contains(tr
->jobs
, k
->unit
) && !job_matters_to_anchor(k
))
364 /* Ok, we can drop this one, so let's do so. */
367 /* Check if this in fact was the beginning of the cycle */
372 _cleanup_free_
char *unit_ids
= NULL
;
373 STRV_FOREACH_PAIR(unit_id
, job_type
, array
)
374 (void) strextendf_with_separator(&unit_ids
, "\n", "%s%s", unit_log_field(j
->unit
), *unit_id
);
376 _cleanup_free_
char *cycle_path_text
= strdup("Found ordering cycle");
377 if (!strv_isempty(array
)) {
378 (void) strextendf(&cycle_path_text
, ": %s/%s", array
[0], array
[1]);
380 STRV_FOREACH_PAIR(unit_id
, job_type
, strv_skip(array
, 2))
381 (void) strextendf(&cycle_path_text
, " after %s/%s", *unit_id
, *job_type
);
383 (void) strextendf(&cycle_path_text
, " - after %s", array
[0]);
386 /* logging for j not k here to provide a consistent narrative */
389 LOG_UNIT_MESSAGE(j
->unit
, "%s", cycle_path_text
),
390 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_ORDERING_CYCLE_STR
),
391 LOG_ITEM("%s", strempty(unit_ids
)));
395 /* logging for j not k here to provide a consistent narrative */
396 log_struct(LOG_WARNING
,
397 LOG_UNIT_MESSAGE(j
->unit
,
398 "Job %s/%s deleted to break ordering cycle starting with %s/%s",
399 delete->unit
->id
, job_type_to_string(delete->type
),
400 j
->unit
->id
, job_type_to_string(j
->type
)),
401 LOG_MESSAGE_ID(SD_MESSAGE_DELETING_JOB_BECAUSE_ORDERING_CYCLE_STR
),
402 LOG_ITEM("DELETED_UNIT=%s", delete->unit
->id
),
403 LOG_ITEM("DELETED_TYPE=%s", job_type_to_string(delete->type
)),
404 LOG_ITEM("%s", strempty(unit_ids
)));
406 if (log_get_show_color())
407 status
= ANSI_HIGHLIGHT_RED
" SKIP " ANSI_NORMAL
;
411 unit_status_printf(delete->unit
,
414 "Ordering cycle found, skipping %s",
415 unit_status_string(delete->unit
, NULL
));
416 transaction_delete_unit(tr
, delete->unit
);
421 LOG_UNIT_MESSAGE(j
->unit
, "Unable to break cycle starting with %s/%s",
422 j
->unit
->id
, job_type_to_string(j
->type
)),
423 LOG_MESSAGE_ID(SD_MESSAGE_CANT_BREAK_ORDERING_CYCLE_STR
),
424 LOG_ITEM("%s", strempty(unit_ids
)));
426 return sd_bus_error_setf(e
, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC
,
427 "Transaction order is cyclic. See system logs for details.");
430 /* Make the marker point to where we come from, so that we can
431 * find our way backwards if we want to break a cycle. We use
432 * a special marker for the beginning: we point to
434 j
->marker
= from
?: j
;
435 j
->generation
= generation
;
437 /* Actual ordering of jobs depends on the unit ordering dependency and job types. We need to traverse
438 * the graph over 'before' edges in the actual job execution order. We traverse over both unit
439 * ordering dependencies and we test with job_compare() whether it is the 'before' edge in the job
440 * execution ordering. */
441 FOREACH_ELEMENT(d
, directions
) {
444 UNIT_FOREACH_DEPENDENCY(u
, j
->unit
, *d
) {
447 /* Is there a job for this unit? */
448 o
= hashmap_get(tr
->jobs
, u
);
450 /* Ok, there is no job for this in the transaction, but maybe there is
451 * already one running? */
457 /* Cut traversing if the job j is not really *before* o. */
458 if (job_compare(j
, o
, *d
) >= 0)
461 r
= transaction_verify_order_one(tr
, o
, j
, generation
, e
);
467 /* Ok, let's backtrack, and remember that this entry is not on
468 * our path anymore. */
474 static int transaction_verify_order(Transaction
*tr
, unsigned *generation
, sd_bus_error
*e
) {
482 /* Check if the ordering graph is cyclic. If it is, try to fix
483 * that up by dropping one of the jobs. */
487 HASHMAP_FOREACH(j
, tr
->jobs
) {
488 r
= transaction_verify_order_one(tr
, j
, NULL
, g
, e
);
496 static void transaction_collect_garbage(Transaction
*tr
) {
501 /* Drop jobs that are not required by any other job */
508 HASHMAP_FOREACH(j
, tr
->jobs
) {
509 if (tr
->anchor_job
== j
)
512 if (!j
->object_list
) {
513 log_trace("Garbage collecting job %s/%s", j
->unit
->id
, job_type_to_string(j
->type
));
514 transaction_delete_job(tr
, j
, true);
519 log_trace("Keeping job %s/%s because of %s/%s",
520 j
->unit
->id
, job_type_to_string(j
->type
),
521 j
->object_list
->subject
? j
->object_list
->subject
->unit
->id
: "root",
522 j
->object_list
->subject
? job_type_to_string(j
->object_list
->subject
->type
) : "root");
528 static int transaction_is_destructive(Transaction
*tr
, JobMode mode
, sd_bus_error
*e
) {
533 /* Checks whether applying this transaction means that
534 * existing jobs would be replaced */
536 HASHMAP_FOREACH(j
, tr
->jobs
) {
539 assert(!j
->transaction_prev
);
540 assert(!j
->transaction_next
);
542 if (j
->unit
->job
&& (IN_SET(mode
, JOB_FAIL
, JOB_LENIENT
) || j
->unit
->job
->irreversible
) &&
543 job_type_is_conflicting(j
->unit
->job
->type
, j
->type
))
544 return sd_bus_error_setf(e
, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE
,
545 "Transaction for %s/%s is destructive (%s has '%s' job queued, but '%s' is included in transaction).",
546 tr
->anchor_job
->unit
->id
, job_type_to_string(tr
->anchor_job
->type
),
547 j
->unit
->id
, job_type_to_string(j
->unit
->job
->type
), job_type_to_string(j
->type
));
553 static int transaction_minimize_impact(Transaction
*tr
, JobMode mode
, sd_bus_error
*e
) {
558 /* Drops all unnecessary jobs that reverse already active jobs
559 * or that stop a running service. */
561 if (!IN_SET(mode
, JOB_FAIL
, JOB_LENIENT
))
565 HASHMAP_FOREACH(head
, tr
->jobs
) {
566 LIST_FOREACH(transaction
, j
, head
) {
567 bool stops_running_service
, changes_existing_job
;
569 /* If it matters, we shouldn't drop it */
570 if (j
->matters_to_anchor
&& mode
!= JOB_LENIENT
)
573 /* Would this stop a running service?
574 * Would this change an existing job?
575 * If so, let's drop this entry */
577 stops_running_service
=
578 j
->type
== JOB_STOP
&& UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j
->unit
));
580 changes_existing_job
=
582 job_type_is_conflicting(j
->type
, j
->unit
->job
->type
);
584 if (!stops_running_service
&& !changes_existing_job
)
587 if (j
->matters_to_anchor
) {
588 assert(mode
== JOB_LENIENT
);
589 return sd_bus_error_setf(e
, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE
,
590 "%s/%s would stop a running unit or change existing job, bailing",
591 j
->unit
->id
, job_type_to_string(j
->type
));
594 if (stops_running_service
)
595 log_unit_debug(j
->unit
,
596 "%s/%s would stop a running service.",
597 j
->unit
->id
, job_type_to_string(j
->type
));
599 if (changes_existing_job
)
600 log_unit_debug(j
->unit
,
601 "%s/%s would change existing job.",
602 j
->unit
->id
, job_type_to_string(j
->type
));
604 /* Ok, let's get rid of this */
605 log_unit_debug(j
->unit
,
606 "Deleting %s/%s to minimize impact.",
607 j
->unit
->id
, job_type_to_string(j
->type
));
609 transaction_delete_job(tr
, j
, true);
617 static int transaction_apply(
621 Set
*affected_jobs
) {
629 /* Moves the transaction jobs to the set of active jobs */
631 if (IN_SET(mode
, JOB_ISOLATE
, JOB_FLUSH
)) {
633 /* When isolating first kill all installed jobs which
634 * aren't part of the new transaction */
635 HASHMAP_FOREACH(j
, m
->jobs
) {
636 assert(j
->installed
);
638 if (j
->unit
->ignore_on_isolate
)
641 if (hashmap_contains(tr
->jobs
, j
->unit
))
644 /* Not invalidating recursively. Avoids triggering
645 * OnFailure= actions of dependent jobs. Also avoids
646 * invalidating our iterator. */
647 job_finish_and_invalidate(j
, JOB_CANCELED
, false, false);
651 HASHMAP_FOREACH(j
, tr
->jobs
) {
653 assert(!j
->transaction_prev
);
654 assert(!j
->transaction_next
);
656 r
= hashmap_ensure_put(&m
->jobs
, NULL
, UINT32_TO_PTR(j
->id
), j
);
661 while ((j
= hashmap_steal_first(tr
->jobs
))) {
664 /* Clean the job dependencies */
665 transaction_unlink_job(tr
, j
, false);
667 installed_job
= job_install(j
);
668 if (installed_job
!= j
) {
669 /* j has been merged into a previously installed job */
670 if (tr
->anchor_job
== j
)
671 tr
->anchor_job
= installed_job
;
673 hashmap_remove_value(m
->jobs
, UINT32_TO_PTR(j
->id
), j
);
674 free_and_replace_full(j
, installed_job
, job_free
);
677 job_add_to_run_queue(j
);
678 job_add_to_dbus_queue(j
);
679 job_start_timer(j
, false);
680 job_shutdown_magic(j
);
682 /* When 'affected' is specified, let's track all in it all jobs that were touched because of
683 * this transaction. */
685 (void) set_put(affected_jobs
, j
);
692 HASHMAP_FOREACH(j
, tr
->jobs
)
693 hashmap_remove_value(m
->jobs
, UINT32_TO_PTR(j
->id
), j
);
698 int transaction_activate(
707 unsigned generation
= 1;
709 /* This applies the changes recorded in tr->jobs to the actual list of jobs, if possible. */
714 /* Reset the generation counter of all installed jobs. The detection of cycles
715 * looks at installed jobs. If they had a non-zero generation from some previous
716 * walk of the graph, the algorithm would break. */
717 HASHMAP_FOREACH(j
, m
->jobs
)
720 /* First step: figure out which jobs matter */
721 transaction_find_jobs_that_matter_to_anchor(tr
->anchor_job
, generation
++);
723 /* Second step: Try not to stop any running services if we don't have to. Don't try to reverse
724 * running jobs if we don't have to. */
725 r
= transaction_minimize_impact(tr
, mode
, e
);
727 return r
; /* Note that we don't log here, because for JOB_LENIENT conflicts are very much expected
728 and shouldn't appear to be fatal for the unit. Only inform the caller via bus error. */
730 /* Third step: Drop redundant jobs */
731 transaction_drop_redundant(tr
);
734 /* Fourth step: Let's remove unneeded jobs that might
736 if (mode
!= JOB_ISOLATE
)
737 transaction_collect_garbage(tr
);
739 /* Fifth step: verify order makes sense and correct
740 * cycles if necessary and possible */
741 r
= transaction_verify_order(tr
, &generation
, e
);
745 return log_warning_errno(r
, "Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error_message(e
, r
));
747 /* Let's see if the resulting transaction ordering
748 * graph is still cyclic... */
752 /* Sixth step: let's drop unmergeable entries if
753 * necessary and possible, merge entries we can
755 r
= transaction_merge_jobs(tr
, e
);
759 return log_warning_errno(r
, "Requested transaction contains unmergeable jobs: %s", bus_error_message(e
, r
));
761 /* Seventh step: an entry got dropped, let's garbage
762 * collect its dependencies. */
763 if (mode
!= JOB_ISOLATE
)
764 transaction_collect_garbage(tr
);
766 /* Let's see if the resulting transaction still has
767 * unmergeable entries ... */
770 /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
771 transaction_drop_redundant(tr
);
773 /* Ninth step: check whether we can actually apply this */
774 r
= transaction_is_destructive(tr
, mode
, e
);
776 return log_notice_errno(r
, "Requested transaction contradicts existing jobs: %s", bus_error_message(e
, r
));
778 /* Tenth step: apply changes */
779 r
= transaction_apply(tr
, m
, mode
, affected_jobs
);
781 return log_warning_errno(r
, "Failed to apply transaction: %m");
783 assert(hashmap_isempty(tr
->jobs
));
785 /* Are there any jobs now? Then make sure we have the idle pipe around. We don't really care too much
786 * whether this works or not, as the idle pipe is a feature for cosmetics, not actually useful for
787 * anything beyond that. */
788 if (!hashmap_isempty(m
->jobs
))
789 (void) manager_allocate_idle_pipe(m
);
794 static Job
* transaction_add_one_job(Transaction
*tr
, JobType type
, Unit
*unit
, bool *is_new
) {
800 /* Looks for an existing prospective job and returns that. If
801 * it doesn't exist it is created and added to the prospective
804 f
= hashmap_get(tr
->jobs
, unit
);
806 LIST_FOREACH(transaction
, i
, f
) {
807 assert(i
->unit
== unit
);
809 if (i
->type
== type
) {
816 j
= job_new(unit
, type
);
820 j
->irreversible
= tr
->irreversible
;
822 LIST_PREPEND(transaction
, f
, j
);
824 if (hashmap_replace(tr
->jobs
, unit
, f
) < 0) {
825 LIST_REMOVE(transaction
, f
, j
);
833 log_trace("Added job %s/%s to transaction.", unit
->id
, job_type_to_string(type
));
838 static void transaction_unlink_job(Transaction
*tr
, Job
*j
, bool delete_dependencies
) {
842 if (j
->transaction_prev
)
843 j
->transaction_prev
->transaction_next
= j
->transaction_next
;
844 else if (j
->transaction_next
)
845 hashmap_replace(tr
->jobs
, j
->unit
, j
->transaction_next
);
847 hashmap_remove_value(tr
->jobs
, j
->unit
, j
);
849 if (j
->transaction_next
)
850 j
->transaction_next
->transaction_prev
= j
->transaction_prev
;
852 j
->transaction_prev
= j
->transaction_next
= NULL
;
854 while (j
->subject_list
)
855 job_dependency_free(j
->subject_list
);
857 while (j
->object_list
) {
858 Job
*other
= j
->object_list
->matters
? j
->object_list
->subject
: NULL
;
860 job_dependency_free(j
->object_list
);
862 if (other
&& delete_dependencies
) {
863 log_unit_debug(other
->unit
,
864 "Deleting job %s/%s as dependency of job %s/%s",
865 other
->unit
->id
, job_type_to_string(other
->type
),
866 j
->unit
->id
, job_type_to_string(j
->type
));
867 transaction_delete_job(tr
, other
, delete_dependencies
);
872 void transaction_add_propagate_reload_jobs(
876 TransactionAddFlags flags
) {
885 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, unit
, UNIT_ATOM_PROPAGATES_RELOAD_TO
) {
886 _cleanup_(sd_bus_error_free
) sd_bus_error e
= SD_BUS_ERROR_NULL
;
888 nt
= job_type_collapse(JOB_TRY_RELOAD
, dep
);
892 r
= transaction_add_job_and_dependencies(tr
, nt
, dep
, by
, flags
, &e
);
894 log_unit_warning(dep
,
895 "Cannot add dependency reload job, ignoring: %s",
896 bus_error_message(&e
, r
));
900 static JobType
job_type_propagate_stop_graceful(Job
*j
) {
908 LIST_FOREACH(transaction
, i
, j
)
913 /* Nothing to worry about, an appropriate job is in-place */
917 /* This unit is pulled in by other dependency types in this transaction. We will run
918 * into job type conflict if we enqueue a stop job, so let's enqueue a restart job
923 default: /* We don't care about others */
931 int transaction_add_job_and_dependencies(
936 TransactionAddFlags flags
,
945 assert(type
< _JOB_TYPE_MAX
);
946 assert(type
< _JOB_TYPE_MAX_IN_TRANSACTION
);
949 /* Before adding jobs for this unit, let's ensure that its state has been loaded. This matters when
950 * jobs are spawned as part of coldplugging itself (see e. g. path_coldplug()). This way, we
951 * "recursively" coldplug units, ensuring that we do not look at state of not-yet-coldplugged
953 if (MANAGER_IS_RELOADING(unit
->manager
))
957 log_trace("Pulling in %s/%s from %s/%s", unit
->id
, job_type_to_string(type
), by
->unit
->id
, job_type_to_string(by
->type
));
959 /* Safety check that the unit is a valid state, i.e. not in UNIT_STUB or UNIT_MERGED which should only be set
961 if (!UNIT_IS_LOAD_COMPLETE(unit
->load_state
))
962 return sd_bus_error_setf(e
, BUS_ERROR_LOAD_FAILED
, "Unit %s is not loaded properly.", unit
->id
);
964 if (type
!= JOB_STOP
) {
965 /* The time-based cache allows new units to be started without daemon-reload, but if they are
966 * already referenced (because of dependencies or ordering) then we have to force a load of
967 * the fragment. As an optimization, check first if anything in the usual paths was modified
968 * since the last time the cache was loaded. Also check if the last time an attempt to load
969 * the unit was made was before the most recent cache refresh, so that we know we need to try
970 * again — even if the cache is current, it might have been updated in a different context
971 * before we had a chance to retry loading this particular unit.
973 * Given building up the transaction is a synchronous operation, attempt
974 * to load the unit immediately. */
975 if (manager_unit_cache_should_retry_load(unit
)) {
976 assert(unit
->load_state
== UNIT_NOT_FOUND
);
977 unit
->load_state
= UNIT_STUB
;
978 unit
->load_error
= 0;
979 (void) unit_load(unit
);
980 assert(unit
->load_state
!= UNIT_STUB
);
983 r
= bus_unit_validate_load_state(unit
, e
);
988 if (!unit_job_is_applicable(unit
, type
))
989 return sd_bus_error_setf(e
, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE
,
990 "Job type %s is not applicable for unit %s.",
991 job_type_to_string(type
), unit
->id
);
993 if (type
== JOB_START
) {
994 /* The hard concurrency limit for slice units we already enforce when a job is enqueued */
995 Slice
*slice
= SLICE(UNIT_GET_SLICE(unit
));
996 if (slice
&& slice_concurrency_hard_max_reached(slice
, unit
))
997 return sd_bus_error_setf(
998 e
, BUS_ERROR_CONCURRENCY_LIMIT_REACHED
,
999 "Concurrency limit of the slice unit '%s' (or any of its parents) the unit '%s' is contained in has been reached, refusing start job.",
1000 UNIT(slice
)->id
, unit
->id
);
1003 /* First add the job. */
1004 job
= transaction_add_one_job(tr
, type
, unit
, &is_new
);
1008 if (FLAGS_SET(flags
, TRANSACTION_IGNORE_ORDER
))
1009 job
->ignore_order
= true;
1011 /* Then, add a link to the job. */
1013 if (!job_dependency_new(by
, job
, FLAGS_SET(flags
, TRANSACTION_MATTERS
), FLAGS_SET(flags
, TRANSACTION_CONFLICTS
)))
1016 /* If the job has no parent job, it is the anchor job. */
1017 assert(!tr
->anchor_job
);
1018 tr
->anchor_job
= job
;
1020 if (FLAGS_SET(flags
, TRANSACTION_REENQUEUE_ANCHOR
))
1021 job
->refuse_late_merge
= true;
1024 if (!is_new
|| FLAGS_SET(flags
, TRANSACTION_IGNORE_REQUIREMENTS
) || type
== JOB_NOP
)
1027 _cleanup_set_free_ Set
*following
= NULL
;
1030 /* If we are following some other unit, make sure we add all dependencies of everybody following. */
1031 if (unit_following_set(job
->unit
, &following
) > 0)
1032 SET_FOREACH(dep
, following
) {
1033 r
= transaction_add_job_and_dependencies(tr
, type
, dep
, job
, flags
& TRANSACTION_IGNORE_ORDER
, e
);
1035 log_unit_full_errno(dep
, r
== -ERFKILL
? LOG_INFO
: LOG_WARNING
, r
,
1036 "Cannot add dependency job, ignoring: %s",
1037 bus_error_message(e
, r
));
1038 sd_bus_error_free(e
);
1042 /* Finally, recursively add in all dependencies. */
1043 if (IN_SET(type
, JOB_START
, JOB_RESTART
)) {
1044 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PULL_IN_START
) {
1045 r
= transaction_add_job_and_dependencies(tr
, JOB_START
, dep
, job
, TRANSACTION_MATTERS
| (flags
& TRANSACTION_IGNORE_ORDER
), e
);
1047 if (r
!= -EBADR
) /* job type not applicable */
1050 sd_bus_error_free(e
);
1054 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PULL_IN_START_IGNORED
) {
1055 r
= transaction_add_job_and_dependencies(tr
, JOB_START
, dep
, job
, flags
& TRANSACTION_IGNORE_ORDER
, e
);
1057 /* unit masked, job type not applicable and unit not found are not considered
1059 log_unit_full_errno(dep
,
1060 IN_SET(r
, -ERFKILL
, -EBADR
, -ENOENT
) ? LOG_DEBUG
: LOG_WARNING
,
1061 r
, "Cannot add dependency job, ignoring: %s",
1062 bus_error_message(e
, r
));
1063 sd_bus_error_free(e
);
1067 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PULL_IN_VERIFY
) {
1068 r
= transaction_add_job_and_dependencies(tr
, JOB_VERIFY_ACTIVE
, dep
, job
, TRANSACTION_MATTERS
| (flags
& TRANSACTION_IGNORE_ORDER
), e
);
1070 if (r
!= -EBADR
) /* job type not applicable */
1073 sd_bus_error_free(e
);
1077 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PULL_IN_STOP
) {
1078 r
= transaction_add_job_and_dependencies(tr
, JOB_STOP
, dep
, job
, TRANSACTION_MATTERS
| TRANSACTION_CONFLICTS
| (flags
& TRANSACTION_IGNORE_ORDER
), e
);
1080 if (r
!= -EBADR
) /* job type not applicable */
1083 sd_bus_error_free(e
);
1087 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PULL_IN_STOP_IGNORED
) {
1088 r
= transaction_add_job_and_dependencies(tr
, JOB_STOP
, dep
, job
, flags
& TRANSACTION_IGNORE_ORDER
, e
);
1090 log_unit_warning(dep
,
1091 "Cannot add dependency job, ignoring: %s",
1092 bus_error_message(e
, r
));
1093 sd_bus_error_free(e
);
1098 if (IN_SET(type
, JOB_RESTART
, JOB_STOP
) || (type
== JOB_START
&& FLAGS_SET(flags
, TRANSACTION_PROPAGATE_START_AS_RESTART
))) {
1099 bool is_stop
= type
== JOB_STOP
;
1101 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PROPAGATE_STOP
) {
1102 /* We propagate RESTART only as TRY_RESTART, in order not to start dependencies that
1103 * are not around. */
1106 nt
= job_type_collapse(is_stop
? JOB_STOP
: JOB_TRY_RESTART
, dep
);
1110 r
= transaction_add_job_and_dependencies(tr
, nt
, dep
, job
, TRANSACTION_MATTERS
| (flags
& TRANSACTION_IGNORE_ORDER
), e
);
1112 if (r
!= -EBADR
) /* job type not applicable */
1115 sd_bus_error_free(e
);
1119 /* Process UNIT_ATOM_PROPAGATE_STOP_GRACEFUL (PropagatesStopTo=) units. We need to wait until
1120 * all other dependencies are processed, i.e. we're the anchor job or already in the recursion
1121 * that handles it. */
1122 if (!by
|| FLAGS_SET(flags
, TRANSACTION_PROCESS_PROPAGATE_STOP_GRACEFUL
))
1123 UNIT_FOREACH_DEPENDENCY_SAFE(dep
, job
->unit
, UNIT_ATOM_PROPAGATE_STOP_GRACEFUL
) {
1127 j
= hashmap_get(tr
->jobs
, dep
);
1128 nt
= job_type_propagate_stop_graceful(j
);
1133 r
= transaction_add_job_and_dependencies(tr
, nt
, dep
, job
, TRANSACTION_MATTERS
| (flags
& TRANSACTION_IGNORE_ORDER
) | TRANSACTION_PROCESS_PROPAGATE_STOP_GRACEFUL
, e
);
1135 if (r
!= -EBADR
) /* job type not applicable */
1138 sd_bus_error_free(e
);
1143 if (type
== JOB_RELOAD
)
1144 transaction_add_propagate_reload_jobs(tr
, job
->unit
, job
, flags
& TRANSACTION_IGNORE_ORDER
);
1146 /* JOB_VERIFY_ACTIVE requires no dependency handling */
1151 /* Recursive call failed to add required jobs so let's drop top level job as well. */
1152 log_unit_debug_errno(unit
, r
, "Cannot add dependency job to transaction, deleting job %s/%s again: %s",
1153 unit
->id
, job_type_to_string(type
), bus_error_message(e
, r
));
1155 transaction_delete_job(tr
, job
, /* delete_dependencies= */ false);
1159 static bool shall_stop_on_isolate(Transaction
*tr
, Unit
*u
) {
1163 if (u
->ignore_on_isolate
)
1166 /* Is there already something listed for this? */
1167 if (hashmap_contains(tr
->jobs
, u
))
1173 int transaction_add_isolate_jobs(Transaction
*tr
, Manager
*m
) {
1181 HASHMAP_FOREACH_KEY(u
, k
, m
->units
) {
1182 _cleanup_(sd_bus_error_free
) sd_bus_error e
= SD_BUS_ERROR_NULL
;
1185 /* Ignore aliases */
1189 /* No need to stop inactive units */
1190 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) && !u
->job
)
1193 if (!shall_stop_on_isolate(tr
, u
))
1196 /* Keep units that are triggered by units we want to keep around. */
1198 UNIT_FOREACH_DEPENDENCY(o
, u
, UNIT_ATOM_TRIGGERED_BY
)
1199 if (!shall_stop_on_isolate(tr
, o
)) {
1206 r
= transaction_add_job_and_dependencies(tr
, JOB_STOP
, u
, tr
->anchor_job
, TRANSACTION_MATTERS
, &e
);
1208 log_unit_warning_errno(u
, r
, "Cannot add isolate job, ignoring: %s", bus_error_message(&e
, r
));
1214 int transaction_add_triggering_jobs(Transaction
*tr
, Unit
*u
) {
1221 UNIT_FOREACH_DEPENDENCY_SAFE(trigger
, u
, UNIT_ATOM_TRIGGERED_BY
) {
1222 _cleanup_(sd_bus_error_free
) sd_bus_error e
= SD_BUS_ERROR_NULL
;
1224 /* No need to stop inactive jobs */
1225 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(trigger
)) && !trigger
->job
)
1228 /* Is there already something listed for this? */
1229 if (hashmap_contains(tr
->jobs
, trigger
))
1232 r
= transaction_add_job_and_dependencies(tr
, JOB_STOP
, trigger
, tr
->anchor_job
, TRANSACTION_MATTERS
, &e
);
1234 log_unit_warning_errno(u
, r
, "Cannot add triggered by job, ignoring: %s", bus_error_message(&e
, r
));
1240 Transaction
*transaction_new(bool irreversible
) {
1243 tr
= new0(Transaction
, 1);
1247 tr
->jobs
= hashmap_new(NULL
);
1251 tr
->irreversible
= irreversible
;
1256 Transaction
*transaction_free(Transaction
*tr
) {
1260 assert(hashmap_isempty(tr
->jobs
));
1261 hashmap_free(tr
->jobs
);
1266 Transaction
*transaction_abort_and_free(Transaction
*tr
) {
1270 transaction_abort(tr
);
1272 return transaction_free(tr
);