]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | ||
3 | #include <fcntl.h> | |
4 | #include <unistd.h> | |
5 | ||
6 | #include "alloc-util.h" | |
7 | #include "bus-common-errors.h" | |
8 | #include "bus-error.h" | |
9 | #include "dbus-unit.h" | |
10 | #include "strv.h" | |
11 | #include "terminal-util.h" | |
12 | #include "transaction.h" | |
13 | ||
14 | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies); | |
15 | ||
16 | static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) { | |
17 | assert(tr); | |
18 | assert(j); | |
19 | ||
20 | /* Deletes one job from the transaction */ | |
21 | ||
22 | transaction_unlink_job(tr, j, delete_dependencies); | |
23 | ||
24 | job_free(j); | |
25 | } | |
26 | ||
27 | static void transaction_delete_unit(Transaction *tr, Unit *u) { | |
28 | Job *j; | |
29 | ||
30 | /* Deletes all jobs associated with a certain unit from the | |
31 | * transaction */ | |
32 | ||
33 | while ((j = hashmap_get(tr->jobs, u))) | |
34 | transaction_delete_job(tr, j, true); | |
35 | } | |
36 | ||
37 | static void transaction_abort(Transaction *tr) { | |
38 | Job *j; | |
39 | ||
40 | assert(tr); | |
41 | ||
42 | while ((j = hashmap_first(tr->jobs))) | |
43 | transaction_delete_job(tr, j, false); | |
44 | ||
45 | assert(hashmap_isempty(tr->jobs)); | |
46 | } | |
47 | ||
48 | static void transaction_find_jobs_that_matter_to_anchor(Job *j, unsigned generation) { | |
49 | assert(j); | |
50 | ||
51 | /* A recursive sweep through the graph that marks all units | |
52 | * that matter to the anchor job, i.e. are directly or | |
53 | * indirectly a dependency of the anchor job via paths that | |
54 | * are fully marked as mattering. */ | |
55 | ||
56 | j->matters_to_anchor = true; | |
57 | j->generation = generation; | |
58 | ||
59 | LIST_FOREACH(subject, l, j->subject_list) { | |
60 | ||
61 | /* This link does not matter */ | |
62 | if (!l->matters) | |
63 | continue; | |
64 | ||
65 | /* This unit has already been marked */ | |
66 | if (l->object->generation == generation) | |
67 | continue; | |
68 | ||
69 | transaction_find_jobs_that_matter_to_anchor(l->object, generation); | |
70 | } | |
71 | } | |
72 | ||
73 | static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) { | |
74 | JobDependency *last; | |
75 | ||
76 | assert(j); | |
77 | assert(other); | |
78 | assert(j->unit == other->unit); | |
79 | assert(!j->installed); | |
80 | ||
81 | /* Merges 'other' into 'j' and then deletes 'other'. */ | |
82 | ||
83 | j->type = t; | |
84 | j->state = JOB_WAITING; | |
85 | j->irreversible = j->irreversible || other->irreversible; | |
86 | j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor; | |
87 | ||
88 | /* Patch us in as new owner of the JobDependency objects */ | |
89 | last = NULL; | |
90 | LIST_FOREACH(subject, l, other->subject_list) { | |
91 | assert(l->subject == other); | |
92 | l->subject = j; | |
93 | last = l; | |
94 | } | |
95 | ||
96 | /* Merge both lists */ | |
97 | if (last) { | |
98 | last->subject_next = j->subject_list; | |
99 | if (j->subject_list) | |
100 | j->subject_list->subject_prev = last; | |
101 | j->subject_list = other->subject_list; | |
102 | } | |
103 | ||
104 | /* Patch us in as new owner of the JobDependency objects */ | |
105 | last = NULL; | |
106 | LIST_FOREACH(object, l, other->object_list) { | |
107 | assert(l->object == other); | |
108 | l->object = j; | |
109 | last = l; | |
110 | } | |
111 | ||
112 | /* Merge both lists */ | |
113 | if (last) { | |
114 | last->object_next = j->object_list; | |
115 | if (j->object_list) | |
116 | j->object_list->object_prev = last; | |
117 | j->object_list = other->object_list; | |
118 | } | |
119 | ||
120 | /* Kill the other job */ | |
121 | other->subject_list = NULL; | |
122 | other->object_list = NULL; | |
123 | transaction_delete_job(tr, other, true); | |
124 | } | |
125 | ||
126 | _pure_ static bool job_is_conflicted_by(Job *j) { | |
127 | assert(j); | |
128 | ||
129 | /* Returns true if this job is pulled in by a least one | |
130 | * ConflictedBy dependency. */ | |
131 | ||
132 | LIST_FOREACH(object, l, j->object_list) | |
133 | if (l->conflicts) | |
134 | return true; | |
135 | ||
136 | return false; | |
137 | } | |
138 | ||
139 | static int delete_one_unmergeable_job(Transaction *tr, Job *job) { | |
140 | assert(job); | |
141 | ||
142 | /* Tries to delete one item in the linked list | |
143 | * j->transaction_next->transaction_next->... that conflicts | |
144 | * with another one, in an attempt to make an inconsistent | |
145 | * transaction work. */ | |
146 | ||
147 | /* We rely here on the fact that if a merged with b does not | |
148 | * merge with c, either a or b merge with c neither */ | |
149 | LIST_FOREACH(transaction, j, job) | |
150 | LIST_FOREACH(transaction, k, j->transaction_next) { | |
151 | Job *d; | |
152 | ||
153 | /* Is this one mergeable? Then skip it */ | |
154 | if (job_type_is_mergeable(j->type, k->type)) | |
155 | continue; | |
156 | ||
157 | /* Ok, we found two that conflict, let's see if we can | |
158 | * drop one of them */ | |
159 | if (!j->matters_to_anchor && !k->matters_to_anchor) { | |
160 | ||
161 | /* Both jobs don't matter, so let's | |
162 | * find the one that is smarter to | |
163 | * remove. Let's think positive and | |
164 | * rather remove stops then starts -- | |
165 | * except if something is being | |
166 | * stopped because it is conflicted by | |
167 | * another unit in which case we | |
168 | * rather remove the start. */ | |
169 | ||
170 | log_unit_debug(j->unit, | |
171 | "Looking at job %s/%s conflicted_by=%s", | |
172 | j->unit->id, job_type_to_string(j->type), | |
173 | yes_no(j->type == JOB_STOP && job_is_conflicted_by(j))); | |
174 | log_unit_debug(k->unit, | |
175 | "Looking at job %s/%s conflicted_by=%s", | |
176 | k->unit->id, job_type_to_string(k->type), | |
177 | yes_no(k->type == JOB_STOP && job_is_conflicted_by(k))); | |
178 | ||
179 | if (j->type == JOB_STOP) { | |
180 | ||
181 | if (job_is_conflicted_by(j)) | |
182 | d = k; | |
183 | else | |
184 | d = j; | |
185 | ||
186 | } else if (k->type == JOB_STOP) { | |
187 | ||
188 | if (job_is_conflicted_by(k)) | |
189 | d = j; | |
190 | else | |
191 | d = k; | |
192 | } else | |
193 | d = j; | |
194 | ||
195 | } else if (!j->matters_to_anchor) | |
196 | d = j; | |
197 | else if (!k->matters_to_anchor) | |
198 | d = k; | |
199 | else | |
200 | return -ENOEXEC; | |
201 | ||
202 | /* Ok, we can drop one, so let's do so. */ | |
203 | log_unit_debug(d->unit, | |
204 | "Fixing conflicting jobs %s/%s,%s/%s by deleting job %s/%s", | |
205 | j->unit->id, job_type_to_string(j->type), | |
206 | k->unit->id, job_type_to_string(k->type), | |
207 | d->unit->id, job_type_to_string(d->type)); | |
208 | transaction_delete_job(tr, d, true); | |
209 | return 0; | |
210 | } | |
211 | ||
212 | return -EINVAL; | |
213 | } | |
214 | ||
215 | static int transaction_merge_jobs(Transaction *tr, sd_bus_error *e) { | |
216 | Job *j; | |
217 | int r; | |
218 | ||
219 | assert(tr); | |
220 | ||
221 | /* First step, check whether any of the jobs for one specific | |
222 | * task conflict. If so, try to drop one of them. */ | |
223 | HASHMAP_FOREACH(j, tr->jobs) { | |
224 | JobType t; | |
225 | ||
226 | t = j->type; | |
227 | LIST_FOREACH(transaction, k, j->transaction_next) { | |
228 | if (job_type_merge_and_collapse(&t, k->type, j->unit) >= 0) | |
229 | continue; | |
230 | ||
231 | /* OK, we could not merge all jobs for this | |
232 | * action. Let's see if we can get rid of one | |
233 | * of them */ | |
234 | ||
235 | r = delete_one_unmergeable_job(tr, j); | |
236 | if (r >= 0) | |
237 | /* Ok, we managed to drop one, now | |
238 | * let's ask our callers to call us | |
239 | * again after garbage collecting */ | |
240 | return -EAGAIN; | |
241 | ||
242 | /* We couldn't merge anything. Failure */ | |
243 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, | |
244 | "Transaction contains conflicting jobs '%s' and '%s' for %s. " | |
245 | "Probably contradicting requirement dependencies configured.", | |
246 | job_type_to_string(t), | |
247 | job_type_to_string(k->type), | |
248 | k->unit->id); | |
249 | } | |
250 | } | |
251 | ||
252 | /* Second step, merge the jobs. */ | |
253 | HASHMAP_FOREACH(j, tr->jobs) { | |
254 | JobType t = j->type; | |
255 | ||
256 | /* Merge all transaction jobs for j->unit */ | |
257 | LIST_FOREACH(transaction, k, j->transaction_next) | |
258 | assert_se(job_type_merge_and_collapse(&t, k->type, j->unit) == 0); | |
259 | ||
260 | Job *k; | |
261 | while ((k = j->transaction_next)) { | |
262 | if (tr->anchor_job == k) { | |
263 | transaction_merge_and_delete_job(tr, k, j, t); | |
264 | j = k; | |
265 | } else | |
266 | transaction_merge_and_delete_job(tr, j, k, t); | |
267 | } | |
268 | ||
269 | assert(!j->transaction_next); | |
270 | assert(!j->transaction_prev); | |
271 | } | |
272 | ||
273 | return 0; | |
274 | } | |
275 | ||
276 | static void transaction_drop_redundant(Transaction *tr) { | |
277 | bool again; | |
278 | ||
279 | /* Goes through the transaction and removes all jobs of the units whose jobs are all noops. If not | |
280 | * all of a unit's jobs are redundant, they are kept. */ | |
281 | ||
282 | assert(tr); | |
283 | ||
284 | do { | |
285 | Job *j; | |
286 | ||
287 | again = false; | |
288 | ||
289 | HASHMAP_FOREACH(j, tr->jobs) { | |
290 | bool keep = false; | |
291 | ||
292 | LIST_FOREACH(transaction, k, j) | |
293 | if (tr->anchor_job == k || | |
294 | !job_type_is_redundant(k->type, unit_active_state(k->unit)) || | |
295 | (k->unit->job && job_type_is_conflicting(k->type, k->unit->job->type))) { | |
296 | keep = true; | |
297 | break; | |
298 | } | |
299 | ||
300 | if (!keep) { | |
301 | log_trace("Found redundant job %s/%s, dropping from transaction.", | |
302 | j->unit->id, job_type_to_string(j->type)); | |
303 | transaction_delete_job(tr, j, false); | |
304 | again = true; | |
305 | break; | |
306 | } | |
307 | } | |
308 | } while (again); | |
309 | } | |
310 | ||
311 | _pure_ static bool job_matters_to_anchor(Job *job) { | |
312 | assert(job); | |
313 | assert(!job->transaction_prev); | |
314 | ||
315 | /* Checks whether at least one of the jobs for this transaction matters to the anchor. */ | |
316 | ||
317 | LIST_FOREACH(transaction, j, job) | |
318 | if (j->matters_to_anchor) | |
319 | return true; | |
320 | ||
321 | return false; | |
322 | } | |
323 | ||
324 | static char* merge_unit_ids(const char* unit_log_field, char * const* pairs) { | |
325 | _cleanup_free_ char *ans = NULL; | |
326 | size_t size = 0; | |
327 | ||
328 | assert(unit_log_field); | |
329 | ||
330 | STRV_FOREACH_PAIR(unit_id, job_type, pairs) { | |
331 | size_t next; | |
332 | ||
333 | if (size > 0) | |
334 | ans[size - 1] = '\n'; | |
335 | ||
336 | next = strlen(unit_log_field) + strlen(*unit_id); | |
337 | if (!GREEDY_REALLOC(ans, size + next + 1)) | |
338 | return NULL; | |
339 | ||
340 | sprintf(ans + size, "%s%s", unit_log_field, *unit_id); | |
341 | size += next + 1; | |
342 | } | |
343 | ||
344 | if (!ans) | |
345 | return strdup(""); | |
346 | ||
347 | return TAKE_PTR(ans); | |
348 | } | |
349 | ||
350 | static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, sd_bus_error *e) { | |
351 | ||
352 | static const UnitDependencyAtom directions[] = { | |
353 | UNIT_ATOM_BEFORE, | |
354 | UNIT_ATOM_AFTER, | |
355 | }; | |
356 | ||
357 | int r; | |
358 | ||
359 | assert(tr); | |
360 | assert(j); | |
361 | assert(!j->transaction_prev); | |
362 | ||
363 | /* Does a recursive sweep through the ordering graph, looking for a cycle. If we find a cycle we try | |
364 | * to break it. */ | |
365 | ||
366 | /* Have we seen this before? */ | |
367 | if (j->generation == generation) { | |
368 | Job *k, *delete = NULL; | |
369 | _cleanup_free_ char **array = NULL, *unit_ids = NULL; | |
370 | ||
371 | /* If the marker is NULL we have been here already and decided the job was loop-free from | |
372 | * here. Hence shortcut things and return right-away. */ | |
373 | if (!j->marker) | |
374 | return 0; | |
375 | ||
376 | /* So, the marker is not NULL and we already have been here. We have a cycle. Let's try to | |
377 | * break it. We go backwards in our path and try to find a suitable job to remove. We use the | |
378 | * marker to find our way back, since smart how we are we stored our way back in there. */ | |
379 | for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) { | |
380 | ||
381 | /* For logging below */ | |
382 | if (strv_push_pair(&array, k->unit->id, (char*) job_type_to_string(k->type)) < 0) | |
383 | log_oom(); | |
384 | ||
385 | if (!delete && hashmap_contains(tr->jobs, k->unit) && !job_matters_to_anchor(k)) | |
386 | /* Ok, we can drop this one, so let's do so. */ | |
387 | delete = k; | |
388 | ||
389 | /* Check if this in fact was the beginning of the cycle */ | |
390 | if (k == j) | |
391 | break; | |
392 | } | |
393 | ||
394 | unit_ids = merge_unit_ids(j->manager->unit_log_field, array); /* ignore error */ | |
395 | ||
396 | STRV_FOREACH_PAIR(unit_id, job_type, array) | |
397 | /* logging for j not k here to provide a consistent narrative */ | |
398 | log_struct(LOG_WARNING, | |
399 | LOG_UNIT_MESSAGE(j->unit, | |
400 | "Found %s on %s/%s", | |
401 | unit_id == array ? "ordering cycle" : "dependency", | |
402 | *unit_id, *job_type), | |
403 | "%s", strna(unit_ids)); | |
404 | ||
405 | if (delete) { | |
406 | const char *status; | |
407 | /* logging for j not k here to provide a consistent narrative */ | |
408 | log_struct(LOG_ERR, | |
409 | LOG_UNIT_MESSAGE(j->unit, | |
410 | "Job %s/%s deleted to break ordering cycle starting with %s/%s", | |
411 | delete->unit->id, job_type_to_string(delete->type), | |
412 | j->unit->id, job_type_to_string(j->type)), | |
413 | "%s", strna(unit_ids)); | |
414 | ||
415 | if (log_get_show_color()) | |
416 | status = ANSI_HIGHLIGHT_RED " SKIP " ANSI_NORMAL; | |
417 | else | |
418 | status = " SKIP "; | |
419 | ||
420 | unit_status_printf(delete->unit, | |
421 | STATUS_TYPE_NOTICE, | |
422 | status, | |
423 | "Ordering cycle found, skipping %s", | |
424 | unit_status_string(delete->unit, NULL)); | |
425 | transaction_delete_unit(tr, delete->unit); | |
426 | return -EAGAIN; | |
427 | } | |
428 | ||
429 | log_struct(LOG_ERR, | |
430 | LOG_UNIT_MESSAGE(j->unit, "Unable to break cycle starting with %s/%s", | |
431 | j->unit->id, job_type_to_string(j->type)), | |
432 | "%s", strna(unit_ids)); | |
433 | ||
434 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, | |
435 | "Transaction order is cyclic. See system logs for details."); | |
436 | } | |
437 | ||
438 | /* Make the marker point to where we come from, so that we can | |
439 | * find our way backwards if we want to break a cycle. We use | |
440 | * a special marker for the beginning: we point to | |
441 | * ourselves. */ | |
442 | j->marker = from ?: j; | |
443 | j->generation = generation; | |
444 | ||
445 | /* Actual ordering of jobs depends on the unit ordering dependency and job types. We need to traverse | |
446 | * the graph over 'before' edges in the actual job execution order. We traverse over both unit | |
447 | * ordering dependencies and we test with job_compare() whether it is the 'before' edge in the job | |
448 | * execution ordering. */ | |
449 | for (size_t d = 0; d < ELEMENTSOF(directions); d++) { | |
450 | Unit *u; | |
451 | ||
452 | UNIT_FOREACH_DEPENDENCY(u, j->unit, directions[d]) { | |
453 | Job *o; | |
454 | ||
455 | /* Is there a job for this unit? */ | |
456 | o = hashmap_get(tr->jobs, u); | |
457 | if (!o) { | |
458 | /* Ok, there is no job for this in the transaction, but maybe there is | |
459 | * already one running? */ | |
460 | o = u->job; | |
461 | if (!o) | |
462 | continue; | |
463 | } | |
464 | ||
465 | /* Cut traversing if the job j is not really *before* o. */ | |
466 | if (job_compare(j, o, directions[d]) >= 0) | |
467 | continue; | |
468 | ||
469 | r = transaction_verify_order_one(tr, o, j, generation, e); | |
470 | if (r < 0) | |
471 | return r; | |
472 | } | |
473 | } | |
474 | ||
475 | /* Ok, let's backtrack, and remember that this entry is not on | |
476 | * our path anymore. */ | |
477 | j->marker = NULL; | |
478 | ||
479 | return 0; | |
480 | } | |
481 | ||
482 | static int transaction_verify_order(Transaction *tr, unsigned *generation, sd_bus_error *e) { | |
483 | Job *j; | |
484 | int r; | |
485 | unsigned g; | |
486 | ||
487 | assert(tr); | |
488 | assert(generation); | |
489 | ||
490 | /* Check if the ordering graph is cyclic. If it is, try to fix | |
491 | * that up by dropping one of the jobs. */ | |
492 | ||
493 | g = (*generation)++; | |
494 | ||
495 | HASHMAP_FOREACH(j, tr->jobs) { | |
496 | r = transaction_verify_order_one(tr, j, NULL, g, e); | |
497 | if (r < 0) | |
498 | return r; | |
499 | } | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
504 | static void transaction_collect_garbage(Transaction *tr) { | |
505 | bool again; | |
506 | ||
507 | assert(tr); | |
508 | ||
509 | /* Drop jobs that are not required by any other job */ | |
510 | ||
511 | do { | |
512 | Job *j; | |
513 | ||
514 | again = false; | |
515 | ||
516 | HASHMAP_FOREACH(j, tr->jobs) { | |
517 | if (tr->anchor_job == j) | |
518 | continue; | |
519 | ||
520 | if (!j->object_list) { | |
521 | log_trace("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); | |
522 | transaction_delete_job(tr, j, true); | |
523 | again = true; | |
524 | break; | |
525 | } | |
526 | ||
527 | log_trace("Keeping job %s/%s because of %s/%s", | |
528 | j->unit->id, job_type_to_string(j->type), | |
529 | j->object_list->subject ? j->object_list->subject->unit->id : "root", | |
530 | j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); | |
531 | } | |
532 | ||
533 | } while (again); | |
534 | } | |
535 | ||
536 | static int transaction_is_destructive(Transaction *tr, JobMode mode, sd_bus_error *e) { | |
537 | Job *j; | |
538 | ||
539 | assert(tr); | |
540 | ||
541 | /* Checks whether applying this transaction means that | |
542 | * existing jobs would be replaced */ | |
543 | ||
544 | HASHMAP_FOREACH(j, tr->jobs) { | |
545 | ||
546 | /* Assume merged */ | |
547 | assert(!j->transaction_prev); | |
548 | assert(!j->transaction_next); | |
549 | ||
550 | if (j->unit->job && (mode == JOB_FAIL || j->unit->job->irreversible) && | |
551 | job_type_is_conflicting(j->unit->job->type, j->type)) | |
552 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, | |
553 | "Transaction for %s/%s is destructive (%s has '%s' job queued, but '%s' is included in transaction).", | |
554 | tr->anchor_job->unit->id, job_type_to_string(tr->anchor_job->type), | |
555 | j->unit->id, job_type_to_string(j->unit->job->type), job_type_to_string(j->type)); | |
556 | } | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | static void transaction_minimize_impact(Transaction *tr) { | |
562 | Job *head; | |
563 | ||
564 | assert(tr); | |
565 | ||
566 | /* Drops all unnecessary jobs that reverse already active jobs | |
567 | * or that stop a running service. */ | |
568 | ||
569 | rescan: | |
570 | HASHMAP_FOREACH(head, tr->jobs) { | |
571 | LIST_FOREACH(transaction, j, head) { | |
572 | bool stops_running_service, changes_existing_job; | |
573 | ||
574 | /* If it matters, we shouldn't drop it */ | |
575 | if (j->matters_to_anchor) | |
576 | continue; | |
577 | ||
578 | /* Would this stop a running service? | |
579 | * Would this change an existing job? | |
580 | * If so, let's drop this entry */ | |
581 | ||
582 | stops_running_service = | |
583 | j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit)); | |
584 | ||
585 | changes_existing_job = | |
586 | j->unit->job && | |
587 | job_type_is_conflicting(j->type, j->unit->job->type); | |
588 | ||
589 | if (!stops_running_service && !changes_existing_job) | |
590 | continue; | |
591 | ||
592 | if (stops_running_service) | |
593 | log_unit_debug(j->unit, | |
594 | "%s/%s would stop a running service.", | |
595 | j->unit->id, job_type_to_string(j->type)); | |
596 | ||
597 | if (changes_existing_job) | |
598 | log_unit_debug(j->unit, | |
599 | "%s/%s would change existing job.", | |
600 | j->unit->id, job_type_to_string(j->type)); | |
601 | ||
602 | /* Ok, let's get rid of this */ | |
603 | log_unit_debug(j->unit, | |
604 | "Deleting %s/%s to minimize impact.", | |
605 | j->unit->id, job_type_to_string(j->type)); | |
606 | ||
607 | transaction_delete_job(tr, j, true); | |
608 | goto rescan; | |
609 | } | |
610 | } | |
611 | } | |
612 | ||
613 | static int transaction_apply( | |
614 | Transaction *tr, | |
615 | Manager *m, | |
616 | JobMode mode, | |
617 | Set *affected_jobs) { | |
618 | ||
619 | Job *j; | |
620 | int r; | |
621 | ||
622 | /* Moves the transaction jobs to the set of active jobs */ | |
623 | ||
624 | if (IN_SET(mode, JOB_ISOLATE, JOB_FLUSH)) { | |
625 | ||
626 | /* When isolating first kill all installed jobs which | |
627 | * aren't part of the new transaction */ | |
628 | HASHMAP_FOREACH(j, m->jobs) { | |
629 | assert(j->installed); | |
630 | ||
631 | if (j->unit->ignore_on_isolate) | |
632 | continue; | |
633 | ||
634 | if (hashmap_contains(tr->jobs, j->unit)) | |
635 | continue; | |
636 | ||
637 | /* Not invalidating recursively. Avoids triggering | |
638 | * OnFailure= actions of dependent jobs. Also avoids | |
639 | * invalidating our iterator. */ | |
640 | job_finish_and_invalidate(j, JOB_CANCELED, false, false); | |
641 | } | |
642 | } | |
643 | ||
644 | HASHMAP_FOREACH(j, tr->jobs) { | |
645 | /* Assume merged */ | |
646 | assert(!j->transaction_prev); | |
647 | assert(!j->transaction_next); | |
648 | ||
649 | r = hashmap_ensure_put(&m->jobs, NULL, UINT32_TO_PTR(j->id), j); | |
650 | if (r < 0) | |
651 | goto rollback; | |
652 | } | |
653 | ||
654 | while ((j = hashmap_steal_first(tr->jobs))) { | |
655 | Job *installed_job; | |
656 | ||
657 | /* Clean the job dependencies */ | |
658 | transaction_unlink_job(tr, j, false); | |
659 | ||
660 | installed_job = job_install(j); | |
661 | if (installed_job != j) { | |
662 | /* j has been merged into a previously installed job */ | |
663 | if (tr->anchor_job == j) | |
664 | tr->anchor_job = installed_job; | |
665 | hashmap_remove_value(m->jobs, UINT32_TO_PTR(j->id), j); | |
666 | job_free(j); | |
667 | j = installed_job; | |
668 | } | |
669 | ||
670 | job_add_to_run_queue(j); | |
671 | job_add_to_dbus_queue(j); | |
672 | job_start_timer(j, false); | |
673 | job_shutdown_magic(j); | |
674 | ||
675 | /* When 'affected' is specified, let's track all in it all jobs that were touched because of | |
676 | * this transaction. */ | |
677 | if (affected_jobs) | |
678 | (void) set_put(affected_jobs, j); | |
679 | } | |
680 | ||
681 | return 0; | |
682 | ||
683 | rollback: | |
684 | ||
685 | HASHMAP_FOREACH(j, tr->jobs) | |
686 | hashmap_remove_value(m->jobs, UINT32_TO_PTR(j->id), j); | |
687 | ||
688 | return r; | |
689 | } | |
690 | ||
691 | int transaction_activate( | |
692 | Transaction *tr, | |
693 | Manager *m, | |
694 | JobMode mode, | |
695 | Set *affected_jobs, | |
696 | sd_bus_error *e) { | |
697 | ||
698 | Job *j; | |
699 | int r; | |
700 | unsigned generation = 1; | |
701 | ||
702 | assert(tr); | |
703 | ||
704 | /* This applies the changes recorded in tr->jobs to | |
705 | * the actual list of jobs, if possible. */ | |
706 | ||
707 | /* Reset the generation counter of all installed jobs. The detection of cycles | |
708 | * looks at installed jobs. If they had a non-zero generation from some previous | |
709 | * walk of the graph, the algorithm would break. */ | |
710 | HASHMAP_FOREACH(j, m->jobs) | |
711 | j->generation = 0; | |
712 | ||
713 | /* First step: figure out which jobs matter */ | |
714 | transaction_find_jobs_that_matter_to_anchor(tr->anchor_job, generation++); | |
715 | ||
716 | /* Second step: Try not to stop any running services if | |
717 | * we don't have to. Don't try to reverse running | |
718 | * jobs if we don't have to. */ | |
719 | if (mode == JOB_FAIL) | |
720 | transaction_minimize_impact(tr); | |
721 | ||
722 | /* Third step: Drop redundant jobs */ | |
723 | transaction_drop_redundant(tr); | |
724 | ||
725 | for (;;) { | |
726 | /* Fourth step: Let's remove unneeded jobs that might | |
727 | * be lurking. */ | |
728 | if (mode != JOB_ISOLATE) | |
729 | transaction_collect_garbage(tr); | |
730 | ||
731 | /* Fifth step: verify order makes sense and correct | |
732 | * cycles if necessary and possible */ | |
733 | r = transaction_verify_order(tr, &generation, e); | |
734 | if (r >= 0) | |
735 | break; | |
736 | ||
737 | if (r != -EAGAIN) | |
738 | return log_warning_errno(r, "Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error_message(e, r)); | |
739 | ||
740 | /* Let's see if the resulting transaction ordering | |
741 | * graph is still cyclic... */ | |
742 | } | |
743 | ||
744 | for (;;) { | |
745 | /* Sixth step: let's drop unmergeable entries if | |
746 | * necessary and possible, merge entries we can | |
747 | * merge */ | |
748 | r = transaction_merge_jobs(tr, e); | |
749 | if (r >= 0) | |
750 | break; | |
751 | ||
752 | if (r != -EAGAIN) | |
753 | return log_warning_errno(r, "Requested transaction contains unmergeable jobs: %s", bus_error_message(e, r)); | |
754 | ||
755 | /* Seventh step: an entry got dropped, let's garbage | |
756 | * collect its dependencies. */ | |
757 | if (mode != JOB_ISOLATE) | |
758 | transaction_collect_garbage(tr); | |
759 | ||
760 | /* Let's see if the resulting transaction still has | |
761 | * unmergeable entries ... */ | |
762 | } | |
763 | ||
764 | /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */ | |
765 | transaction_drop_redundant(tr); | |
766 | ||
767 | /* Ninth step: check whether we can actually apply this */ | |
768 | r = transaction_is_destructive(tr, mode, e); | |
769 | if (r < 0) | |
770 | return log_notice_errno(r, "Requested transaction contradicts existing jobs: %s", bus_error_message(e, r)); | |
771 | ||
772 | /* Tenth step: apply changes */ | |
773 | r = transaction_apply(tr, m, mode, affected_jobs); | |
774 | if (r < 0) | |
775 | return log_warning_errno(r, "Failed to apply transaction: %m"); | |
776 | ||
777 | assert(hashmap_isempty(tr->jobs)); | |
778 | ||
779 | if (!hashmap_isempty(m->jobs)) { | |
780 | /* Are there any jobs now? Then make sure we have the | |
781 | * idle pipe around. We don't really care too much | |
782 | * whether this works or not, as the idle pipe is a | |
783 | * feature for cosmetics, not actually useful for | |
784 | * anything beyond that. */ | |
785 | ||
786 | if (m->idle_pipe[0] < 0 && m->idle_pipe[1] < 0 && | |
787 | m->idle_pipe[2] < 0 && m->idle_pipe[3] < 0) { | |
788 | (void) pipe2(m->idle_pipe, O_NONBLOCK|O_CLOEXEC); | |
789 | (void) pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC); | |
790 | } | |
791 | } | |
792 | ||
793 | return 0; | |
794 | } | |
795 | ||
796 | static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool *is_new) { | |
797 | Job *j, *f; | |
798 | ||
799 | assert(tr); | |
800 | assert(unit); | |
801 | ||
802 | /* Looks for an existing prospective job and returns that. If | |
803 | * it doesn't exist it is created and added to the prospective | |
804 | * jobs list. */ | |
805 | ||
806 | f = hashmap_get(tr->jobs, unit); | |
807 | ||
808 | LIST_FOREACH(transaction, i, f) { | |
809 | assert(i->unit == unit); | |
810 | ||
811 | if (i->type == type) { | |
812 | if (is_new) | |
813 | *is_new = false; | |
814 | return i; | |
815 | } | |
816 | } | |
817 | ||
818 | j = job_new(unit, type); | |
819 | if (!j) | |
820 | return NULL; | |
821 | ||
822 | j->generation = 0; | |
823 | j->marker = NULL; | |
824 | j->matters_to_anchor = false; | |
825 | j->irreversible = tr->irreversible; | |
826 | ||
827 | LIST_PREPEND(transaction, f, j); | |
828 | ||
829 | if (hashmap_replace(tr->jobs, unit, f) < 0) { | |
830 | LIST_REMOVE(transaction, f, j); | |
831 | job_free(j); | |
832 | return NULL; | |
833 | } | |
834 | ||
835 | if (is_new) | |
836 | *is_new = true; | |
837 | ||
838 | log_trace("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); | |
839 | ||
840 | return j; | |
841 | } | |
842 | ||
843 | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) { | |
844 | assert(tr); | |
845 | assert(j); | |
846 | ||
847 | if (j->transaction_prev) | |
848 | j->transaction_prev->transaction_next = j->transaction_next; | |
849 | else if (j->transaction_next) | |
850 | hashmap_replace(tr->jobs, j->unit, j->transaction_next); | |
851 | else | |
852 | hashmap_remove_value(tr->jobs, j->unit, j); | |
853 | ||
854 | if (j->transaction_next) | |
855 | j->transaction_next->transaction_prev = j->transaction_prev; | |
856 | ||
857 | j->transaction_prev = j->transaction_next = NULL; | |
858 | ||
859 | while (j->subject_list) | |
860 | job_dependency_free(j->subject_list); | |
861 | ||
862 | while (j->object_list) { | |
863 | Job *other = j->object_list->matters ? j->object_list->subject : NULL; | |
864 | ||
865 | job_dependency_free(j->object_list); | |
866 | ||
867 | if (other && delete_dependencies) { | |
868 | log_unit_debug(other->unit, | |
869 | "Deleting job %s/%s as dependency of job %s/%s", | |
870 | other->unit->id, job_type_to_string(other->type), | |
871 | j->unit->id, job_type_to_string(j->type)); | |
872 | transaction_delete_job(tr, other, delete_dependencies); | |
873 | } | |
874 | } | |
875 | } | |
876 | ||
877 | void transaction_add_propagate_reload_jobs( | |
878 | Transaction *tr, | |
879 | Unit *unit, | |
880 | Job *by, | |
881 | TransactionAddFlags flags, | |
882 | sd_bus_error *e) { | |
883 | ||
884 | JobType nt; | |
885 | Unit *dep; | |
886 | int r; | |
887 | ||
888 | assert(tr); | |
889 | assert(unit); | |
890 | ||
891 | UNIT_FOREACH_DEPENDENCY(dep, unit, UNIT_ATOM_PROPAGATES_RELOAD_TO) { | |
892 | nt = job_type_collapse(JOB_TRY_RELOAD, dep); | |
893 | if (nt == JOB_NOP) | |
894 | continue; | |
895 | ||
896 | r = transaction_add_job_and_dependencies(tr, nt, dep, by, flags, e); | |
897 | if (r < 0) { | |
898 | log_unit_warning(dep, | |
899 | "Cannot add dependency reload job, ignoring: %s", | |
900 | bus_error_message(e, r)); | |
901 | sd_bus_error_free(e); | |
902 | } | |
903 | } | |
904 | } | |
905 | ||
906 | int transaction_add_job_and_dependencies( | |
907 | Transaction *tr, | |
908 | JobType type, | |
909 | Unit *unit, | |
910 | Job *by, | |
911 | TransactionAddFlags flags, | |
912 | sd_bus_error *e) { | |
913 | ||
914 | bool is_new; | |
915 | Unit *dep; | |
916 | Job *ret; | |
917 | int r; | |
918 | ||
919 | assert(tr); | |
920 | assert(type < _JOB_TYPE_MAX); | |
921 | assert(type < _JOB_TYPE_MAX_IN_TRANSACTION); | |
922 | assert(unit); | |
923 | ||
924 | /* Before adding jobs for this unit, let's ensure that its state has been loaded This matters when | |
925 | * jobs are spawned as part of coldplugging itself (see e. g. path_coldplug()). This way, we | |
926 | * "recursively" coldplug units, ensuring that we do not look at state of not-yet-coldplugged | |
927 | * units. */ | |
928 | if (MANAGER_IS_RELOADING(unit->manager)) | |
929 | unit_coldplug(unit); | |
930 | ||
931 | if (by) | |
932 | log_trace("Pulling in %s/%s from %s/%s", unit->id, job_type_to_string(type), by->unit->id, job_type_to_string(by->type)); | |
933 | ||
934 | /* Safety check that the unit is a valid state, i.e. not in UNIT_STUB or UNIT_MERGED which should only be set | |
935 | * temporarily. */ | |
936 | if (!UNIT_IS_LOAD_COMPLETE(unit->load_state)) | |
937 | return sd_bus_error_setf(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id); | |
938 | ||
939 | if (type != JOB_STOP) { | |
940 | r = bus_unit_validate_load_state(unit, e); | |
941 | /* The time-based cache allows to start new units without daemon-reload, but if they are | |
942 | * already referenced (because of dependencies or ordering) then we have to force a load of | |
943 | * the fragment. As an optimization, check first if anything in the usual paths was modified | |
944 | * since the last time the cache was loaded. Also check if the last time an attempt to load | |
945 | * the unit was made was before the most recent cache refresh, so that we know we need to try | |
946 | * again — even if the cache is current, it might have been updated in a different context | |
947 | * before we had a chance to retry loading this particular unit. | |
948 | * | |
949 | * Given building up the transaction is a synchronous operation, attempt | |
950 | * to load the unit immediately. */ | |
951 | if (r < 0 && manager_unit_cache_should_retry_load(unit)) { | |
952 | sd_bus_error_free(e); | |
953 | unit->load_state = UNIT_STUB; | |
954 | r = unit_load(unit); | |
955 | if (r < 0 || unit->load_state == UNIT_STUB) | |
956 | unit->load_state = UNIT_NOT_FOUND; | |
957 | r = bus_unit_validate_load_state(unit, e); | |
958 | } | |
959 | if (r < 0) | |
960 | return r; | |
961 | } | |
962 | ||
963 | if (!unit_job_is_applicable(unit, type)) | |
964 | return sd_bus_error_setf(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, | |
965 | "Job type %s is not applicable for unit %s.", | |
966 | job_type_to_string(type), unit->id); | |
967 | ||
968 | /* First add the job. */ | |
969 | ret = transaction_add_one_job(tr, type, unit, &is_new); | |
970 | if (!ret) | |
971 | return -ENOMEM; | |
972 | ||
973 | if (FLAGS_SET(flags, TRANSACTION_IGNORE_ORDER)) | |
974 | ret->ignore_order = true; | |
975 | ||
976 | /* Then, add a link to the job. */ | |
977 | if (by) { | |
978 | if (!job_dependency_new(by, ret, FLAGS_SET(flags, TRANSACTION_MATTERS), FLAGS_SET(flags, TRANSACTION_CONFLICTS))) | |
979 | return -ENOMEM; | |
980 | } else { | |
981 | /* If the job has no parent job, it is the anchor job. */ | |
982 | assert(!tr->anchor_job); | |
983 | tr->anchor_job = ret; | |
984 | } | |
985 | ||
986 | if (is_new && !FLAGS_SET(flags, TRANSACTION_IGNORE_REQUIREMENTS) && type != JOB_NOP) { | |
987 | _cleanup_set_free_ Set *following = NULL; | |
988 | ||
989 | /* If we are following some other unit, make sure we add all dependencies of everybody | |
990 | * following. */ | |
991 | if (unit_following_set(ret->unit, &following) > 0) | |
992 | SET_FOREACH(dep, following) { | |
993 | r = transaction_add_job_and_dependencies(tr, type, dep, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
994 | if (r < 0) { | |
995 | log_unit_full_errno(dep, r == -ERFKILL ? LOG_INFO : LOG_WARNING, r, | |
996 | "Cannot add dependency job, ignoring: %s", | |
997 | bus_error_message(e, r)); | |
998 | sd_bus_error_free(e); | |
999 | } | |
1000 | } | |
1001 | ||
1002 | /* Finally, recursively add in all dependencies. */ | |
1003 | if (IN_SET(type, JOB_START, JOB_RESTART)) { | |
1004 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_START) { | |
1005 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1006 | if (r < 0) { | |
1007 | if (r != -EBADR) /* job type not applicable */ | |
1008 | goto fail; | |
1009 | ||
1010 | sd_bus_error_free(e); | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_START_IGNORED) { | |
1015 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
1016 | if (r < 0) { | |
1017 | /* unit masked, job type not applicable and unit not found are not | |
1018 | * considered as errors. */ | |
1019 | log_unit_full_errno(dep, | |
1020 | IN_SET(r, -ERFKILL, -EBADR, -ENOENT) ? LOG_DEBUG : LOG_WARNING, | |
1021 | r, "Cannot add dependency job, ignoring: %s", | |
1022 | bus_error_message(e, r)); | |
1023 | sd_bus_error_free(e); | |
1024 | } | |
1025 | } | |
1026 | ||
1027 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_VERIFY) { | |
1028 | r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1029 | if (r < 0) { | |
1030 | if (r != -EBADR) /* job type not applicable */ | |
1031 | goto fail; | |
1032 | ||
1033 | sd_bus_error_free(e); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_STOP) { | |
1038 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, TRANSACTION_MATTERS | TRANSACTION_CONFLICTS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1039 | if (r < 0) { | |
1040 | if (r != -EBADR) /* job type not applicable */ | |
1041 | goto fail; | |
1042 | ||
1043 | sd_bus_error_free(e); | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_STOP_IGNORED) { | |
1048 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
1049 | if (r < 0) { | |
1050 | log_unit_warning(dep, | |
1051 | "Cannot add dependency job, ignoring: %s", | |
1052 | bus_error_message(e, r)); | |
1053 | sd_bus_error_free(e); | |
1054 | } | |
1055 | } | |
1056 | } | |
1057 | ||
1058 | if (IN_SET(type, JOB_STOP, JOB_RESTART)) { | |
1059 | _cleanup_set_free_ Set *propagated_restart = NULL; | |
1060 | /* We propagate STOP as STOP, but RESTART only as TRY_RESTART, in order not to start | |
1061 | * dependencies that are not around. */ | |
1062 | ||
1063 | if (type == JOB_RESTART) | |
1064 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PROPAGATE_RESTART) { | |
1065 | JobType nt; | |
1066 | ||
1067 | r = set_ensure_put(&propagated_restart, NULL, dep); | |
1068 | if (r < 0) | |
1069 | return r; | |
1070 | ||
1071 | nt = job_type_collapse(JOB_TRY_RESTART, dep); | |
1072 | if (nt == JOB_NOP) | |
1073 | continue; | |
1074 | ||
1075 | r = transaction_add_job_and_dependencies(tr, nt, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1076 | if (r < 0) { | |
1077 | if (r != -EBADR) /* job type not applicable */ | |
1078 | return r; | |
1079 | ||
1080 | sd_bus_error_free(e); | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | /* The 'stop' part of a restart job is also propagated to units with | |
1085 | * UNIT_ATOM_PROPAGATE_STOP */ | |
1086 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PROPAGATE_STOP) { | |
1087 | /* Units experienced restart propagation are skipped */ | |
1088 | if (set_contains(propagated_restart, dep)) | |
1089 | continue; | |
1090 | ||
1091 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1092 | if (r < 0) { | |
1093 | if (r != -EBADR) /* job type not applicable */ | |
1094 | return r; | |
1095 | ||
1096 | sd_bus_error_free(e); | |
1097 | } | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | if (type == JOB_RELOAD) | |
1102 | transaction_add_propagate_reload_jobs(tr, ret->unit, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
1103 | ||
1104 | /* JOB_VERIFY_ACTIVE requires no dependency handling */ | |
1105 | } | |
1106 | ||
1107 | return 0; | |
1108 | fail: | |
1109 | /* Recursive call failed to add required jobs so let's drop top level job as well. */ | |
1110 | log_unit_debug_errno(unit, r, "Cannot add dependency job to transaction, deleting job %s/%s again: %s", | |
1111 | unit->id, job_type_to_string(type), bus_error_message(e, r)); | |
1112 | transaction_delete_job(tr, ret, /* delete_dependencies= */ false); | |
1113 | return r; | |
1114 | ||
1115 | } | |
1116 | ||
1117 | static bool shall_stop_on_isolate(Transaction *tr, Unit *u) { | |
1118 | assert(tr); | |
1119 | assert(u); | |
1120 | ||
1121 | if (u->ignore_on_isolate) | |
1122 | return false; | |
1123 | ||
1124 | /* Is there already something listed for this? */ | |
1125 | if (hashmap_contains(tr->jobs, u)) | |
1126 | return false; | |
1127 | ||
1128 | return true; | |
1129 | } | |
1130 | ||
1131 | int transaction_add_isolate_jobs(Transaction *tr, Manager *m) { | |
1132 | Unit *u; | |
1133 | char *k; | |
1134 | int r; | |
1135 | ||
1136 | assert(tr); | |
1137 | assert(m); | |
1138 | ||
1139 | HASHMAP_FOREACH_KEY(u, k, m->units) { | |
1140 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; | |
1141 | Unit *o; | |
1142 | ||
1143 | /* Ignore aliases */ | |
1144 | if (u->id != k) | |
1145 | continue; | |
1146 | ||
1147 | /* No need to stop inactive units */ | |
1148 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job) | |
1149 | continue; | |
1150 | ||
1151 | if (!shall_stop_on_isolate(tr, u)) | |
1152 | continue; | |
1153 | ||
1154 | /* Keep units that are triggered by units we want to keep around. */ | |
1155 | bool keep = false; | |
1156 | UNIT_FOREACH_DEPENDENCY(o, u, UNIT_ATOM_TRIGGERED_BY) | |
1157 | if (!shall_stop_on_isolate(tr, o)) { | |
1158 | keep = true; | |
1159 | break; | |
1160 | } | |
1161 | if (keep) | |
1162 | continue; | |
1163 | ||
1164 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, tr->anchor_job, TRANSACTION_MATTERS, &e); | |
1165 | if (r < 0) | |
1166 | log_unit_warning_errno(u, r, "Cannot add isolate job, ignoring: %s", bus_error_message(&e, r)); | |
1167 | } | |
1168 | ||
1169 | return 0; | |
1170 | } | |
1171 | ||
1172 | int transaction_add_triggering_jobs(Transaction *tr, Unit *u) { | |
1173 | Unit *trigger; | |
1174 | int r; | |
1175 | ||
1176 | assert(tr); | |
1177 | assert(u); | |
1178 | ||
1179 | UNIT_FOREACH_DEPENDENCY(trigger, u, UNIT_ATOM_TRIGGERED_BY) { | |
1180 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; | |
1181 | ||
1182 | /* No need to stop inactive jobs */ | |
1183 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(trigger)) && !trigger->job) | |
1184 | continue; | |
1185 | ||
1186 | /* Is there already something listed for this? */ | |
1187 | if (hashmap_contains(tr->jobs, trigger)) | |
1188 | continue; | |
1189 | ||
1190 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, trigger, tr->anchor_job, TRANSACTION_MATTERS, &e); | |
1191 | if (r < 0) | |
1192 | log_unit_warning_errno(u, r, "Cannot add triggered by job, ignoring: %s", bus_error_message(&e, r)); | |
1193 | } | |
1194 | ||
1195 | return 0; | |
1196 | } | |
1197 | ||
1198 | Transaction *transaction_new(bool irreversible) { | |
1199 | Transaction *tr; | |
1200 | ||
1201 | tr = new0(Transaction, 1); | |
1202 | if (!tr) | |
1203 | return NULL; | |
1204 | ||
1205 | tr->jobs = hashmap_new(NULL); | |
1206 | if (!tr->jobs) | |
1207 | return mfree(tr); | |
1208 | ||
1209 | tr->irreversible = irreversible; | |
1210 | ||
1211 | return tr; | |
1212 | } | |
1213 | ||
1214 | Transaction *transaction_free(Transaction *tr) { | |
1215 | if (!tr) | |
1216 | return NULL; | |
1217 | ||
1218 | assert(hashmap_isempty(tr->jobs)); | |
1219 | hashmap_free(tr->jobs); | |
1220 | ||
1221 | return mfree(tr); | |
1222 | } | |
1223 | ||
1224 | Transaction *transaction_abort_and_free(Transaction *tr) { | |
1225 | if (!tr) | |
1226 | return NULL; | |
1227 | ||
1228 | transaction_abort(tr); | |
1229 | ||
1230 | return transaction_free(tr); | |
1231 | } |