]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | ||
3 | #include <fcntl.h> | |
4 | #include <unistd.h> | |
5 | ||
6 | #include "alloc-util.h" | |
7 | #include "bus-common-errors.h" | |
8 | #include "bus-error.h" | |
9 | #include "dbus-unit.h" | |
10 | #include "strv.h" | |
11 | #include "terminal-util.h" | |
12 | #include "transaction.h" | |
13 | ||
14 | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies); | |
15 | ||
16 | static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) { | |
17 | assert(tr); | |
18 | assert(j); | |
19 | ||
20 | /* Deletes one job from the transaction */ | |
21 | ||
22 | transaction_unlink_job(tr, j, delete_dependencies); | |
23 | ||
24 | job_free(j); | |
25 | } | |
26 | ||
27 | static void transaction_delete_unit(Transaction *tr, Unit *u) { | |
28 | Job *j; | |
29 | ||
30 | /* Deletes all jobs associated with a certain unit from the | |
31 | * transaction */ | |
32 | ||
33 | while ((j = hashmap_get(tr->jobs, u))) | |
34 | transaction_delete_job(tr, j, true); | |
35 | } | |
36 | ||
37 | static void transaction_abort(Transaction *tr) { | |
38 | Job *j; | |
39 | ||
40 | assert(tr); | |
41 | ||
42 | while ((j = hashmap_first(tr->jobs))) | |
43 | transaction_delete_job(tr, j, false); | |
44 | ||
45 | assert(hashmap_isempty(tr->jobs)); | |
46 | } | |
47 | ||
48 | static void transaction_find_jobs_that_matter_to_anchor(Job *j, unsigned generation) { | |
49 | assert(j); | |
50 | ||
51 | /* A recursive sweep through the graph that marks all units | |
52 | * that matter to the anchor job, i.e. are directly or | |
53 | * indirectly a dependency of the anchor job via paths that | |
54 | * are fully marked as mattering. */ | |
55 | ||
56 | j->matters_to_anchor = true; | |
57 | j->generation = generation; | |
58 | ||
59 | LIST_FOREACH(subject, l, j->subject_list) { | |
60 | ||
61 | /* This link does not matter */ | |
62 | if (!l->matters) | |
63 | continue; | |
64 | ||
65 | /* This unit has already been marked */ | |
66 | if (l->object->generation == generation) | |
67 | continue; | |
68 | ||
69 | transaction_find_jobs_that_matter_to_anchor(l->object, generation); | |
70 | } | |
71 | } | |
72 | ||
73 | static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) { | |
74 | JobDependency *last; | |
75 | ||
76 | assert(j); | |
77 | assert(other); | |
78 | assert(j->unit == other->unit); | |
79 | assert(!j->installed); | |
80 | ||
81 | /* Merges 'other' into 'j' and then deletes 'other'. */ | |
82 | ||
83 | j->type = t; | |
84 | j->state = JOB_WAITING; | |
85 | j->irreversible = j->irreversible || other->irreversible; | |
86 | j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor; | |
87 | ||
88 | /* Patch us in as new owner of the JobDependency objects */ | |
89 | last = NULL; | |
90 | LIST_FOREACH(subject, l, other->subject_list) { | |
91 | assert(l->subject == other); | |
92 | l->subject = j; | |
93 | last = l; | |
94 | } | |
95 | ||
96 | /* Merge both lists */ | |
97 | if (last) { | |
98 | last->subject_next = j->subject_list; | |
99 | if (j->subject_list) | |
100 | j->subject_list->subject_prev = last; | |
101 | j->subject_list = other->subject_list; | |
102 | } | |
103 | ||
104 | /* Patch us in as new owner of the JobDependency objects */ | |
105 | last = NULL; | |
106 | LIST_FOREACH(object, l, other->object_list) { | |
107 | assert(l->object == other); | |
108 | l->object = j; | |
109 | last = l; | |
110 | } | |
111 | ||
112 | /* Merge both lists */ | |
113 | if (last) { | |
114 | last->object_next = j->object_list; | |
115 | if (j->object_list) | |
116 | j->object_list->object_prev = last; | |
117 | j->object_list = other->object_list; | |
118 | } | |
119 | ||
120 | /* Kill the other job */ | |
121 | other->subject_list = NULL; | |
122 | other->object_list = NULL; | |
123 | transaction_delete_job(tr, other, true); | |
124 | } | |
125 | ||
126 | _pure_ static bool job_is_conflicted_by(Job *j) { | |
127 | assert(j); | |
128 | ||
129 | /* Returns true if this job is pulled in by a least one | |
130 | * ConflictedBy dependency. */ | |
131 | ||
132 | LIST_FOREACH(object, l, j->object_list) | |
133 | if (l->conflicts) | |
134 | return true; | |
135 | ||
136 | return false; | |
137 | } | |
138 | ||
139 | static int delete_one_unmergeable_job(Transaction *tr, Job *job) { | |
140 | assert(job); | |
141 | ||
142 | /* Tries to delete one item in the linked list | |
143 | * j->transaction_next->transaction_next->... that conflicts | |
144 | * with another one, in an attempt to make an inconsistent | |
145 | * transaction work. */ | |
146 | ||
147 | /* We rely here on the fact that if a merged with b does not | |
148 | * merge with c, either a or b merge with c neither */ | |
149 | LIST_FOREACH(transaction, j, job) | |
150 | LIST_FOREACH(transaction, k, j->transaction_next) { | |
151 | Job *d; | |
152 | ||
153 | /* Is this one mergeable? Then skip it */ | |
154 | if (job_type_is_mergeable(j->type, k->type)) | |
155 | continue; | |
156 | ||
157 | /* Ok, we found two that conflict, let's see if we can | |
158 | * drop one of them */ | |
159 | if (!j->matters_to_anchor && !k->matters_to_anchor) { | |
160 | ||
161 | /* Both jobs don't matter, so let's | |
162 | * find the one that is smarter to | |
163 | * remove. Let's think positive and | |
164 | * rather remove stops then starts -- | |
165 | * except if something is being | |
166 | * stopped because it is conflicted by | |
167 | * another unit in which case we | |
168 | * rather remove the start. */ | |
169 | ||
170 | log_unit_debug(j->unit, | |
171 | "Looking at job %s/%s conflicted_by=%s", | |
172 | j->unit->id, job_type_to_string(j->type), | |
173 | yes_no(j->type == JOB_STOP && job_is_conflicted_by(j))); | |
174 | log_unit_debug(k->unit, | |
175 | "Looking at job %s/%s conflicted_by=%s", | |
176 | k->unit->id, job_type_to_string(k->type), | |
177 | yes_no(k->type == JOB_STOP && job_is_conflicted_by(k))); | |
178 | ||
179 | if (j->type == JOB_STOP) { | |
180 | ||
181 | if (job_is_conflicted_by(j)) | |
182 | d = k; | |
183 | else | |
184 | d = j; | |
185 | ||
186 | } else if (k->type == JOB_STOP) { | |
187 | ||
188 | if (job_is_conflicted_by(k)) | |
189 | d = j; | |
190 | else | |
191 | d = k; | |
192 | } else | |
193 | d = j; | |
194 | ||
195 | } else if (!j->matters_to_anchor) | |
196 | d = j; | |
197 | else if (!k->matters_to_anchor) | |
198 | d = k; | |
199 | else | |
200 | return -ENOEXEC; | |
201 | ||
202 | /* Ok, we can drop one, so let's do so. */ | |
203 | log_unit_debug(d->unit, | |
204 | "Fixing conflicting jobs %s/%s,%s/%s by deleting job %s/%s", | |
205 | j->unit->id, job_type_to_string(j->type), | |
206 | k->unit->id, job_type_to_string(k->type), | |
207 | d->unit->id, job_type_to_string(d->type)); | |
208 | transaction_delete_job(tr, d, true); | |
209 | return 0; | |
210 | } | |
211 | ||
212 | return -EINVAL; | |
213 | } | |
214 | ||
215 | static int transaction_merge_jobs(Transaction *tr, sd_bus_error *e) { | |
216 | Job *j; | |
217 | int r; | |
218 | ||
219 | assert(tr); | |
220 | ||
221 | /* First step, check whether any of the jobs for one specific | |
222 | * task conflict. If so, try to drop one of them. */ | |
223 | HASHMAP_FOREACH(j, tr->jobs) { | |
224 | JobType t; | |
225 | ||
226 | t = j->type; | |
227 | LIST_FOREACH(transaction, k, j->transaction_next) { | |
228 | if (job_type_merge_and_collapse(&t, k->type, j->unit) >= 0) | |
229 | continue; | |
230 | ||
231 | /* OK, we could not merge all jobs for this | |
232 | * action. Let's see if we can get rid of one | |
233 | * of them */ | |
234 | ||
235 | r = delete_one_unmergeable_job(tr, j); | |
236 | if (r >= 0) | |
237 | /* Ok, we managed to drop one, now | |
238 | * let's ask our callers to call us | |
239 | * again after garbage collecting */ | |
240 | return -EAGAIN; | |
241 | ||
242 | /* We couldn't merge anything. Failure */ | |
243 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, | |
244 | "Transaction contains conflicting jobs '%s' and '%s' for %s. " | |
245 | "Probably contradicting requirement dependencies configured.", | |
246 | job_type_to_string(t), | |
247 | job_type_to_string(k->type), | |
248 | k->unit->id); | |
249 | } | |
250 | } | |
251 | ||
252 | /* Second step, merge the jobs. */ | |
253 | HASHMAP_FOREACH(j, tr->jobs) { | |
254 | JobType t = j->type; | |
255 | ||
256 | /* Merge all transaction jobs for j->unit */ | |
257 | LIST_FOREACH(transaction, k, j->transaction_next) | |
258 | assert_se(job_type_merge_and_collapse(&t, k->type, j->unit) == 0); | |
259 | ||
260 | Job *k; | |
261 | while ((k = j->transaction_next)) { | |
262 | if (tr->anchor_job == k) { | |
263 | transaction_merge_and_delete_job(tr, k, j, t); | |
264 | j = k; | |
265 | } else | |
266 | transaction_merge_and_delete_job(tr, j, k, t); | |
267 | } | |
268 | ||
269 | assert(!j->transaction_next); | |
270 | assert(!j->transaction_prev); | |
271 | } | |
272 | ||
273 | return 0; | |
274 | } | |
275 | ||
276 | static void transaction_drop_redundant(Transaction *tr) { | |
277 | bool again; | |
278 | ||
279 | /* Goes through the transaction and removes all jobs of the units whose jobs are all noops. If not | |
280 | * all of a unit's jobs are redundant, they are kept. */ | |
281 | ||
282 | assert(tr); | |
283 | ||
284 | do { | |
285 | Job *j; | |
286 | ||
287 | again = false; | |
288 | ||
289 | HASHMAP_FOREACH(j, tr->jobs) { | |
290 | bool keep = false; | |
291 | ||
292 | LIST_FOREACH(transaction, k, j) | |
293 | if (tr->anchor_job == k || | |
294 | !job_type_is_redundant(k->type, unit_active_state(k->unit)) || | |
295 | (k->unit->job && job_type_is_conflicting(k->type, k->unit->job->type))) { | |
296 | keep = true; | |
297 | break; | |
298 | } | |
299 | ||
300 | if (!keep) { | |
301 | log_trace("Found redundant job %s/%s, dropping from transaction.", | |
302 | j->unit->id, job_type_to_string(j->type)); | |
303 | transaction_delete_job(tr, j, false); | |
304 | again = true; | |
305 | break; | |
306 | } | |
307 | } | |
308 | } while (again); | |
309 | } | |
310 | ||
311 | _pure_ static bool job_matters_to_anchor(Job *job) { | |
312 | assert(job); | |
313 | assert(!job->transaction_prev); | |
314 | ||
315 | /* Checks whether at least one of the jobs for this transaction matters to the anchor. */ | |
316 | ||
317 | LIST_FOREACH(transaction, j, job) | |
318 | if (j->matters_to_anchor) | |
319 | return true; | |
320 | ||
321 | return false; | |
322 | } | |
323 | ||
324 | static char* merge_unit_ids(const char* unit_log_field, char * const* pairs) { | |
325 | _cleanup_free_ char *ans = NULL; | |
326 | size_t size = 0; | |
327 | ||
328 | assert(unit_log_field); | |
329 | ||
330 | STRV_FOREACH_PAIR(unit_id, job_type, pairs) { | |
331 | size_t next; | |
332 | ||
333 | if (size > 0) | |
334 | ans[size - 1] = '\n'; | |
335 | ||
336 | next = strlen(unit_log_field) + strlen(*unit_id); | |
337 | if (!GREEDY_REALLOC(ans, size + next + 1)) | |
338 | return NULL; | |
339 | ||
340 | sprintf(ans + size, "%s%s", unit_log_field, *unit_id); | |
341 | size += next + 1; | |
342 | } | |
343 | ||
344 | if (!ans) | |
345 | return strdup(""); | |
346 | ||
347 | return TAKE_PTR(ans); | |
348 | } | |
349 | ||
350 | static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, sd_bus_error *e) { | |
351 | ||
352 | static const UnitDependencyAtom directions[] = { | |
353 | UNIT_ATOM_BEFORE, | |
354 | UNIT_ATOM_AFTER, | |
355 | }; | |
356 | ||
357 | int r; | |
358 | ||
359 | assert(tr); | |
360 | assert(j); | |
361 | assert(!j->transaction_prev); | |
362 | ||
363 | /* Does a recursive sweep through the ordering graph, looking for a cycle. If we find a cycle we try | |
364 | * to break it. */ | |
365 | ||
366 | /* Have we seen this before? */ | |
367 | if (j->generation == generation) { | |
368 | Job *k, *delete = NULL; | |
369 | _cleanup_free_ char **array = NULL, *unit_ids = NULL; | |
370 | ||
371 | /* If the marker is NULL we have been here already and decided the job was loop-free from | |
372 | * here. Hence shortcut things and return right-away. */ | |
373 | if (!j->marker) | |
374 | return 0; | |
375 | ||
376 | /* So, the marker is not NULL and we already have been here. We have a cycle. Let's try to | |
377 | * break it. We go backwards in our path and try to find a suitable job to remove. We use the | |
378 | * marker to find our way back, since smart how we are we stored our way back in there. */ | |
379 | for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) { | |
380 | ||
381 | /* For logging below */ | |
382 | if (strv_push_pair(&array, k->unit->id, (char*) job_type_to_string(k->type)) < 0) | |
383 | log_oom(); | |
384 | ||
385 | if (!delete && hashmap_contains(tr->jobs, k->unit) && !job_matters_to_anchor(k)) | |
386 | /* Ok, we can drop this one, so let's do so. */ | |
387 | delete = k; | |
388 | ||
389 | /* Check if this in fact was the beginning of the cycle */ | |
390 | if (k == j) | |
391 | break; | |
392 | } | |
393 | ||
394 | unit_ids = merge_unit_ids(j->manager->unit_log_field, array); /* ignore error */ | |
395 | ||
396 | STRV_FOREACH_PAIR(unit_id, job_type, array) | |
397 | /* logging for j not k here to provide a consistent narrative */ | |
398 | log_struct(LOG_WARNING, | |
399 | LOG_UNIT_MESSAGE(j->unit, | |
400 | "Found %s on %s/%s", | |
401 | unit_id == array ? "ordering cycle" : "dependency", | |
402 | *unit_id, *job_type), | |
403 | "%s", strna(unit_ids)); | |
404 | ||
405 | if (delete) { | |
406 | const char *status; | |
407 | /* logging for j not k here to provide a consistent narrative */ | |
408 | log_struct(LOG_ERR, | |
409 | LOG_UNIT_MESSAGE(j->unit, | |
410 | "Job %s/%s deleted to break ordering cycle starting with %s/%s", | |
411 | delete->unit->id, job_type_to_string(delete->type), | |
412 | j->unit->id, job_type_to_string(j->type)), | |
413 | "%s", strna(unit_ids)); | |
414 | ||
415 | if (log_get_show_color()) | |
416 | status = ANSI_HIGHLIGHT_RED " SKIP " ANSI_NORMAL; | |
417 | else | |
418 | status = " SKIP "; | |
419 | ||
420 | unit_status_printf(delete->unit, | |
421 | STATUS_TYPE_NOTICE, | |
422 | status, | |
423 | "Ordering cycle found, skipping %s", | |
424 | unit_status_string(delete->unit, NULL)); | |
425 | transaction_delete_unit(tr, delete->unit); | |
426 | return -EAGAIN; | |
427 | } | |
428 | ||
429 | log_struct(LOG_ERR, | |
430 | LOG_UNIT_MESSAGE(j->unit, "Unable to break cycle starting with %s/%s", | |
431 | j->unit->id, job_type_to_string(j->type)), | |
432 | "%s", strna(unit_ids)); | |
433 | ||
434 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, | |
435 | "Transaction order is cyclic. See system logs for details."); | |
436 | } | |
437 | ||
438 | /* Make the marker point to where we come from, so that we can | |
439 | * find our way backwards if we want to break a cycle. We use | |
440 | * a special marker for the beginning: we point to | |
441 | * ourselves. */ | |
442 | j->marker = from ?: j; | |
443 | j->generation = generation; | |
444 | ||
445 | /* Actual ordering of jobs depends on the unit ordering dependency and job types. We need to traverse | |
446 | * the graph over 'before' edges in the actual job execution order. We traverse over both unit | |
447 | * ordering dependencies and we test with job_compare() whether it is the 'before' edge in the job | |
448 | * execution ordering. */ | |
449 | for (size_t d = 0; d < ELEMENTSOF(directions); d++) { | |
450 | Unit *u; | |
451 | ||
452 | UNIT_FOREACH_DEPENDENCY(u, j->unit, directions[d]) { | |
453 | Job *o; | |
454 | ||
455 | /* Is there a job for this unit? */ | |
456 | o = hashmap_get(tr->jobs, u); | |
457 | if (!o) { | |
458 | /* Ok, there is no job for this in the transaction, but maybe there is | |
459 | * already one running? */ | |
460 | o = u->job; | |
461 | if (!o) | |
462 | continue; | |
463 | } | |
464 | ||
465 | /* Cut traversing if the job j is not really *before* o. */ | |
466 | if (job_compare(j, o, directions[d]) >= 0) | |
467 | continue; | |
468 | ||
469 | r = transaction_verify_order_one(tr, o, j, generation, e); | |
470 | if (r < 0) | |
471 | return r; | |
472 | } | |
473 | } | |
474 | ||
475 | /* Ok, let's backtrack, and remember that this entry is not on | |
476 | * our path anymore. */ | |
477 | j->marker = NULL; | |
478 | ||
479 | return 0; | |
480 | } | |
481 | ||
482 | static int transaction_verify_order(Transaction *tr, unsigned *generation, sd_bus_error *e) { | |
483 | Job *j; | |
484 | int r; | |
485 | unsigned g; | |
486 | ||
487 | assert(tr); | |
488 | assert(generation); | |
489 | ||
490 | /* Check if the ordering graph is cyclic. If it is, try to fix | |
491 | * that up by dropping one of the jobs. */ | |
492 | ||
493 | g = (*generation)++; | |
494 | ||
495 | HASHMAP_FOREACH(j, tr->jobs) { | |
496 | r = transaction_verify_order_one(tr, j, NULL, g, e); | |
497 | if (r < 0) | |
498 | return r; | |
499 | } | |
500 | ||
501 | return 0; | |
502 | } | |
503 | ||
504 | static void transaction_collect_garbage(Transaction *tr) { | |
505 | bool again; | |
506 | ||
507 | assert(tr); | |
508 | ||
509 | /* Drop jobs that are not required by any other job */ | |
510 | ||
511 | do { | |
512 | Job *j; | |
513 | ||
514 | again = false; | |
515 | ||
516 | HASHMAP_FOREACH(j, tr->jobs) { | |
517 | if (tr->anchor_job == j) | |
518 | continue; | |
519 | ||
520 | if (!j->object_list) { | |
521 | log_trace("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); | |
522 | transaction_delete_job(tr, j, true); | |
523 | again = true; | |
524 | break; | |
525 | } | |
526 | ||
527 | log_trace("Keeping job %s/%s because of %s/%s", | |
528 | j->unit->id, job_type_to_string(j->type), | |
529 | j->object_list->subject ? j->object_list->subject->unit->id : "root", | |
530 | j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); | |
531 | } | |
532 | ||
533 | } while (again); | |
534 | } | |
535 | ||
536 | static int transaction_is_destructive(Transaction *tr, JobMode mode, sd_bus_error *e) { | |
537 | Job *j; | |
538 | ||
539 | assert(tr); | |
540 | ||
541 | /* Checks whether applying this transaction means that | |
542 | * existing jobs would be replaced */ | |
543 | ||
544 | HASHMAP_FOREACH(j, tr->jobs) { | |
545 | ||
546 | /* Assume merged */ | |
547 | assert(!j->transaction_prev); | |
548 | assert(!j->transaction_next); | |
549 | ||
550 | if (j->unit->job && (mode == JOB_FAIL || j->unit->job->irreversible) && | |
551 | job_type_is_conflicting(j->unit->job->type, j->type)) | |
552 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, | |
553 | "Transaction for %s/%s is destructive (%s has '%s' job queued, but '%s' is included in transaction).", | |
554 | tr->anchor_job->unit->id, job_type_to_string(tr->anchor_job->type), | |
555 | j->unit->id, job_type_to_string(j->unit->job->type), job_type_to_string(j->type)); | |
556 | } | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | static void transaction_minimize_impact(Transaction *tr) { | |
562 | Job *head; | |
563 | ||
564 | assert(tr); | |
565 | ||
566 | /* Drops all unnecessary jobs that reverse already active jobs | |
567 | * or that stop a running service. */ | |
568 | ||
569 | rescan: | |
570 | HASHMAP_FOREACH(head, tr->jobs) { | |
571 | LIST_FOREACH(transaction, j, head) { | |
572 | bool stops_running_service, changes_existing_job; | |
573 | ||
574 | /* If it matters, we shouldn't drop it */ | |
575 | if (j->matters_to_anchor) | |
576 | continue; | |
577 | ||
578 | /* Would this stop a running service? | |
579 | * Would this change an existing job? | |
580 | * If so, let's drop this entry */ | |
581 | ||
582 | stops_running_service = | |
583 | j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit)); | |
584 | ||
585 | changes_existing_job = | |
586 | j->unit->job && | |
587 | job_type_is_conflicting(j->type, j->unit->job->type); | |
588 | ||
589 | if (!stops_running_service && !changes_existing_job) | |
590 | continue; | |
591 | ||
592 | if (stops_running_service) | |
593 | log_unit_debug(j->unit, | |
594 | "%s/%s would stop a running service.", | |
595 | j->unit->id, job_type_to_string(j->type)); | |
596 | ||
597 | if (changes_existing_job) | |
598 | log_unit_debug(j->unit, | |
599 | "%s/%s would change existing job.", | |
600 | j->unit->id, job_type_to_string(j->type)); | |
601 | ||
602 | /* Ok, let's get rid of this */ | |
603 | log_unit_debug(j->unit, | |
604 | "Deleting %s/%s to minimize impact.", | |
605 | j->unit->id, job_type_to_string(j->type)); | |
606 | ||
607 | transaction_delete_job(tr, j, true); | |
608 | goto rescan; | |
609 | } | |
610 | } | |
611 | } | |
612 | ||
613 | static int transaction_apply( | |
614 | Transaction *tr, | |
615 | Manager *m, | |
616 | JobMode mode, | |
617 | Set *affected_jobs) { | |
618 | ||
619 | Job *j; | |
620 | int r; | |
621 | ||
622 | /* Moves the transaction jobs to the set of active jobs */ | |
623 | ||
624 | if (IN_SET(mode, JOB_ISOLATE, JOB_FLUSH)) { | |
625 | ||
626 | /* When isolating first kill all installed jobs which | |
627 | * aren't part of the new transaction */ | |
628 | HASHMAP_FOREACH(j, m->jobs) { | |
629 | assert(j->installed); | |
630 | ||
631 | if (j->unit->ignore_on_isolate) | |
632 | continue; | |
633 | ||
634 | if (hashmap_contains(tr->jobs, j->unit)) | |
635 | continue; | |
636 | ||
637 | /* Not invalidating recursively. Avoids triggering | |
638 | * OnFailure= actions of dependent jobs. Also avoids | |
639 | * invalidating our iterator. */ | |
640 | job_finish_and_invalidate(j, JOB_CANCELED, false, false); | |
641 | } | |
642 | } | |
643 | ||
644 | HASHMAP_FOREACH(j, tr->jobs) { | |
645 | /* Assume merged */ | |
646 | assert(!j->transaction_prev); | |
647 | assert(!j->transaction_next); | |
648 | ||
649 | r = hashmap_ensure_put(&m->jobs, NULL, UINT32_TO_PTR(j->id), j); | |
650 | if (r < 0) | |
651 | goto rollback; | |
652 | } | |
653 | ||
654 | while ((j = hashmap_steal_first(tr->jobs))) { | |
655 | Job *installed_job; | |
656 | ||
657 | /* Clean the job dependencies */ | |
658 | transaction_unlink_job(tr, j, false); | |
659 | ||
660 | /* When RestartMode=direct is used, the service being restarted don't enter the inactive/failed | |
661 | * state, i.e. unit_process_job -> job_finish_and_invalidate is never called, and the previous | |
662 | * job might still be running (especially for Type=oneshot services). We need to refuse | |
663 | * late merge and re-enqueue the anchor job. */ | |
664 | installed_job = job_install(j, | |
665 | /* refuse_late_merge = */ mode == JOB_RESTART_DEPENDENCIES && j == tr->anchor_job); | |
666 | if (installed_job != j) { | |
667 | /* j has been merged into a previously installed job */ | |
668 | if (tr->anchor_job == j) | |
669 | tr->anchor_job = installed_job; | |
670 | ||
671 | hashmap_remove_value(m->jobs, UINT32_TO_PTR(j->id), j); | |
672 | free_and_replace_full(j, installed_job, job_free); | |
673 | } | |
674 | ||
675 | job_add_to_run_queue(j); | |
676 | job_add_to_dbus_queue(j); | |
677 | job_start_timer(j, false); | |
678 | job_shutdown_magic(j); | |
679 | ||
680 | /* When 'affected' is specified, let's track all in it all jobs that were touched because of | |
681 | * this transaction. */ | |
682 | if (affected_jobs) | |
683 | (void) set_put(affected_jobs, j); | |
684 | } | |
685 | ||
686 | return 0; | |
687 | ||
688 | rollback: | |
689 | ||
690 | HASHMAP_FOREACH(j, tr->jobs) | |
691 | hashmap_remove_value(m->jobs, UINT32_TO_PTR(j->id), j); | |
692 | ||
693 | return r; | |
694 | } | |
695 | ||
696 | int transaction_activate( | |
697 | Transaction *tr, | |
698 | Manager *m, | |
699 | JobMode mode, | |
700 | Set *affected_jobs, | |
701 | sd_bus_error *e) { | |
702 | ||
703 | Job *j; | |
704 | int r; | |
705 | unsigned generation = 1; | |
706 | ||
707 | assert(tr); | |
708 | ||
709 | /* This applies the changes recorded in tr->jobs to | |
710 | * the actual list of jobs, if possible. */ | |
711 | ||
712 | /* Reset the generation counter of all installed jobs. The detection of cycles | |
713 | * looks at installed jobs. If they had a non-zero generation from some previous | |
714 | * walk of the graph, the algorithm would break. */ | |
715 | HASHMAP_FOREACH(j, m->jobs) | |
716 | j->generation = 0; | |
717 | ||
718 | /* First step: figure out which jobs matter */ | |
719 | transaction_find_jobs_that_matter_to_anchor(tr->anchor_job, generation++); | |
720 | ||
721 | /* Second step: Try not to stop any running services if | |
722 | * we don't have to. Don't try to reverse running | |
723 | * jobs if we don't have to. */ | |
724 | if (mode == JOB_FAIL) | |
725 | transaction_minimize_impact(tr); | |
726 | ||
727 | /* Third step: Drop redundant jobs */ | |
728 | transaction_drop_redundant(tr); | |
729 | ||
730 | for (;;) { | |
731 | /* Fourth step: Let's remove unneeded jobs that might | |
732 | * be lurking. */ | |
733 | if (mode != JOB_ISOLATE) | |
734 | transaction_collect_garbage(tr); | |
735 | ||
736 | /* Fifth step: verify order makes sense and correct | |
737 | * cycles if necessary and possible */ | |
738 | r = transaction_verify_order(tr, &generation, e); | |
739 | if (r >= 0) | |
740 | break; | |
741 | ||
742 | if (r != -EAGAIN) | |
743 | return log_warning_errno(r, "Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error_message(e, r)); | |
744 | ||
745 | /* Let's see if the resulting transaction ordering | |
746 | * graph is still cyclic... */ | |
747 | } | |
748 | ||
749 | for (;;) { | |
750 | /* Sixth step: let's drop unmergeable entries if | |
751 | * necessary and possible, merge entries we can | |
752 | * merge */ | |
753 | r = transaction_merge_jobs(tr, e); | |
754 | if (r >= 0) | |
755 | break; | |
756 | ||
757 | if (r != -EAGAIN) | |
758 | return log_warning_errno(r, "Requested transaction contains unmergeable jobs: %s", bus_error_message(e, r)); | |
759 | ||
760 | /* Seventh step: an entry got dropped, let's garbage | |
761 | * collect its dependencies. */ | |
762 | if (mode != JOB_ISOLATE) | |
763 | transaction_collect_garbage(tr); | |
764 | ||
765 | /* Let's see if the resulting transaction still has | |
766 | * unmergeable entries ... */ | |
767 | } | |
768 | ||
769 | /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */ | |
770 | transaction_drop_redundant(tr); | |
771 | ||
772 | /* Ninth step: check whether we can actually apply this */ | |
773 | r = transaction_is_destructive(tr, mode, e); | |
774 | if (r < 0) | |
775 | return log_notice_errno(r, "Requested transaction contradicts existing jobs: %s", bus_error_message(e, r)); | |
776 | ||
777 | /* Tenth step: apply changes */ | |
778 | r = transaction_apply(tr, m, mode, affected_jobs); | |
779 | if (r < 0) | |
780 | return log_warning_errno(r, "Failed to apply transaction: %m"); | |
781 | ||
782 | assert(hashmap_isempty(tr->jobs)); | |
783 | ||
784 | if (!hashmap_isempty(m->jobs)) { | |
785 | /* Are there any jobs now? Then make sure we have the | |
786 | * idle pipe around. We don't really care too much | |
787 | * whether this works or not, as the idle pipe is a | |
788 | * feature for cosmetics, not actually useful for | |
789 | * anything beyond that. */ | |
790 | ||
791 | if (m->idle_pipe[0] < 0 && m->idle_pipe[1] < 0 && | |
792 | m->idle_pipe[2] < 0 && m->idle_pipe[3] < 0) { | |
793 | (void) pipe2(m->idle_pipe, O_NONBLOCK|O_CLOEXEC); | |
794 | (void) pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC); | |
795 | } | |
796 | } | |
797 | ||
798 | return 0; | |
799 | } | |
800 | ||
801 | static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool *is_new) { | |
802 | Job *j, *f; | |
803 | ||
804 | assert(tr); | |
805 | assert(unit); | |
806 | ||
807 | /* Looks for an existing prospective job and returns that. If | |
808 | * it doesn't exist it is created and added to the prospective | |
809 | * jobs list. */ | |
810 | ||
811 | f = hashmap_get(tr->jobs, unit); | |
812 | ||
813 | LIST_FOREACH(transaction, i, f) { | |
814 | assert(i->unit == unit); | |
815 | ||
816 | if (i->type == type) { | |
817 | if (is_new) | |
818 | *is_new = false; | |
819 | return i; | |
820 | } | |
821 | } | |
822 | ||
823 | j = job_new(unit, type); | |
824 | if (!j) | |
825 | return NULL; | |
826 | ||
827 | j->generation = 0; | |
828 | j->marker = NULL; | |
829 | j->matters_to_anchor = false; | |
830 | j->irreversible = tr->irreversible; | |
831 | ||
832 | LIST_PREPEND(transaction, f, j); | |
833 | ||
834 | if (hashmap_replace(tr->jobs, unit, f) < 0) { | |
835 | LIST_REMOVE(transaction, f, j); | |
836 | job_free(j); | |
837 | return NULL; | |
838 | } | |
839 | ||
840 | if (is_new) | |
841 | *is_new = true; | |
842 | ||
843 | log_trace("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); | |
844 | ||
845 | return j; | |
846 | } | |
847 | ||
848 | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) { | |
849 | assert(tr); | |
850 | assert(j); | |
851 | ||
852 | if (j->transaction_prev) | |
853 | j->transaction_prev->transaction_next = j->transaction_next; | |
854 | else if (j->transaction_next) | |
855 | hashmap_replace(tr->jobs, j->unit, j->transaction_next); | |
856 | else | |
857 | hashmap_remove_value(tr->jobs, j->unit, j); | |
858 | ||
859 | if (j->transaction_next) | |
860 | j->transaction_next->transaction_prev = j->transaction_prev; | |
861 | ||
862 | j->transaction_prev = j->transaction_next = NULL; | |
863 | ||
864 | while (j->subject_list) | |
865 | job_dependency_free(j->subject_list); | |
866 | ||
867 | while (j->object_list) { | |
868 | Job *other = j->object_list->matters ? j->object_list->subject : NULL; | |
869 | ||
870 | job_dependency_free(j->object_list); | |
871 | ||
872 | if (other && delete_dependencies) { | |
873 | log_unit_debug(other->unit, | |
874 | "Deleting job %s/%s as dependency of job %s/%s", | |
875 | other->unit->id, job_type_to_string(other->type), | |
876 | j->unit->id, job_type_to_string(j->type)); | |
877 | transaction_delete_job(tr, other, delete_dependencies); | |
878 | } | |
879 | } | |
880 | } | |
881 | ||
882 | void transaction_add_propagate_reload_jobs( | |
883 | Transaction *tr, | |
884 | Unit *unit, | |
885 | Job *by, | |
886 | TransactionAddFlags flags) { | |
887 | ||
888 | JobType nt; | |
889 | Unit *dep; | |
890 | int r; | |
891 | ||
892 | assert(tr); | |
893 | assert(unit); | |
894 | ||
895 | UNIT_FOREACH_DEPENDENCY(dep, unit, UNIT_ATOM_PROPAGATES_RELOAD_TO) { | |
896 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; | |
897 | ||
898 | nt = job_type_collapse(JOB_TRY_RELOAD, dep); | |
899 | if (nt == JOB_NOP) | |
900 | continue; | |
901 | ||
902 | r = transaction_add_job_and_dependencies(tr, nt, dep, by, flags, &e); | |
903 | if (r < 0) | |
904 | log_unit_warning(dep, | |
905 | "Cannot add dependency reload job, ignoring: %s", | |
906 | bus_error_message(&e, r)); | |
907 | } | |
908 | } | |
909 | ||
910 | static JobType job_type_propagate_stop_graceful(Job *j) { | |
911 | JobType type; | |
912 | ||
913 | if (!j) | |
914 | return JOB_STOP; | |
915 | ||
916 | type = JOB_STOP; | |
917 | ||
918 | LIST_FOREACH(transaction, i, j) | |
919 | switch (i->type) { | |
920 | ||
921 | case JOB_STOP: | |
922 | case JOB_RESTART: | |
923 | /* Nothing to worry about, an appropriate job is in-place */ | |
924 | return JOB_NOP; | |
925 | ||
926 | case JOB_START: | |
927 | /* This unit is pulled in by other dependency types in this transaction. We will run | |
928 | * into job type conflict if we enqueue a stop job, so let's enqueue a restart job | |
929 | * instead. */ | |
930 | type = JOB_RESTART; | |
931 | break; | |
932 | ||
933 | default: /* We don't care about others */ | |
934 | ; | |
935 | ||
936 | } | |
937 | ||
938 | return type; | |
939 | } | |
940 | ||
941 | int transaction_add_job_and_dependencies( | |
942 | Transaction *tr, | |
943 | JobType type, | |
944 | Unit *unit, | |
945 | Job *by, | |
946 | TransactionAddFlags flags, | |
947 | sd_bus_error *e) { | |
948 | ||
949 | bool is_new; | |
950 | Job *ret; | |
951 | int r; | |
952 | ||
953 | assert(tr); | |
954 | assert(type < _JOB_TYPE_MAX); | |
955 | assert(type < _JOB_TYPE_MAX_IN_TRANSACTION); | |
956 | assert(unit); | |
957 | ||
958 | /* Before adding jobs for this unit, let's ensure that its state has been loaded This matters when | |
959 | * jobs are spawned as part of coldplugging itself (see e. g. path_coldplug()). This way, we | |
960 | * "recursively" coldplug units, ensuring that we do not look at state of not-yet-coldplugged | |
961 | * units. */ | |
962 | if (MANAGER_IS_RELOADING(unit->manager)) | |
963 | unit_coldplug(unit); | |
964 | ||
965 | if (by) | |
966 | log_trace("Pulling in %s/%s from %s/%s", unit->id, job_type_to_string(type), by->unit->id, job_type_to_string(by->type)); | |
967 | ||
968 | /* Safety check that the unit is a valid state, i.e. not in UNIT_STUB or UNIT_MERGED which should only be set | |
969 | * temporarily. */ | |
970 | if (!UNIT_IS_LOAD_COMPLETE(unit->load_state)) | |
971 | return sd_bus_error_setf(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id); | |
972 | ||
973 | if (type != JOB_STOP) { | |
974 | r = bus_unit_validate_load_state(unit, e); | |
975 | /* The time-based cache allows to start new units without daemon-reload, but if they are | |
976 | * already referenced (because of dependencies or ordering) then we have to force a load of | |
977 | * the fragment. As an optimization, check first if anything in the usual paths was modified | |
978 | * since the last time the cache was loaded. Also check if the last time an attempt to load | |
979 | * the unit was made was before the most recent cache refresh, so that we know we need to try | |
980 | * again — even if the cache is current, it might have been updated in a different context | |
981 | * before we had a chance to retry loading this particular unit. | |
982 | * | |
983 | * Given building up the transaction is a synchronous operation, attempt | |
984 | * to load the unit immediately. */ | |
985 | if (r < 0 && manager_unit_cache_should_retry_load(unit)) { | |
986 | sd_bus_error_free(e); | |
987 | unit->load_state = UNIT_STUB; | |
988 | r = unit_load(unit); | |
989 | if (r < 0 || unit->load_state == UNIT_STUB) | |
990 | unit->load_state = UNIT_NOT_FOUND; | |
991 | r = bus_unit_validate_load_state(unit, e); | |
992 | } | |
993 | if (r < 0) | |
994 | return r; | |
995 | } | |
996 | ||
997 | if (!unit_job_is_applicable(unit, type)) | |
998 | return sd_bus_error_setf(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, | |
999 | "Job type %s is not applicable for unit %s.", | |
1000 | job_type_to_string(type), unit->id); | |
1001 | ||
1002 | /* First add the job. */ | |
1003 | ret = transaction_add_one_job(tr, type, unit, &is_new); | |
1004 | if (!ret) | |
1005 | return -ENOMEM; | |
1006 | ||
1007 | if (FLAGS_SET(flags, TRANSACTION_IGNORE_ORDER)) | |
1008 | ret->ignore_order = true; | |
1009 | ||
1010 | /* Then, add a link to the job. */ | |
1011 | if (by) { | |
1012 | if (!job_dependency_new(by, ret, FLAGS_SET(flags, TRANSACTION_MATTERS), FLAGS_SET(flags, TRANSACTION_CONFLICTS))) | |
1013 | return -ENOMEM; | |
1014 | } else { | |
1015 | /* If the job has no parent job, it is the anchor job. */ | |
1016 | assert(!tr->anchor_job); | |
1017 | tr->anchor_job = ret; | |
1018 | } | |
1019 | ||
1020 | if (!is_new || FLAGS_SET(flags, TRANSACTION_IGNORE_REQUIREMENTS) || type == JOB_NOP) | |
1021 | return 0; | |
1022 | ||
1023 | _cleanup_set_free_ Set *following = NULL; | |
1024 | Unit *dep; | |
1025 | ||
1026 | /* If we are following some other unit, make sure we add all dependencies of everybody following. */ | |
1027 | if (unit_following_set(ret->unit, &following) > 0) | |
1028 | SET_FOREACH(dep, following) { | |
1029 | r = transaction_add_job_and_dependencies(tr, type, dep, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
1030 | if (r < 0) { | |
1031 | log_unit_full_errno(dep, r == -ERFKILL ? LOG_INFO : LOG_WARNING, r, | |
1032 | "Cannot add dependency job, ignoring: %s", | |
1033 | bus_error_message(e, r)); | |
1034 | sd_bus_error_free(e); | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | /* Finally, recursively add in all dependencies. */ | |
1039 | if (IN_SET(type, JOB_START, JOB_RESTART)) { | |
1040 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_START) { | |
1041 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1042 | if (r < 0) { | |
1043 | if (r != -EBADR) /* job type not applicable */ | |
1044 | goto fail; | |
1045 | ||
1046 | sd_bus_error_free(e); | |
1047 | } | |
1048 | } | |
1049 | ||
1050 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_START_IGNORED) { | |
1051 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
1052 | if (r < 0) { | |
1053 | /* unit masked, job type not applicable and unit not found are not considered | |
1054 | * as errors. */ | |
1055 | log_unit_full_errno(dep, | |
1056 | IN_SET(r, -ERFKILL, -EBADR, -ENOENT) ? LOG_DEBUG : LOG_WARNING, | |
1057 | r, "Cannot add dependency job, ignoring: %s", | |
1058 | bus_error_message(e, r)); | |
1059 | sd_bus_error_free(e); | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_VERIFY) { | |
1064 | r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1065 | if (r < 0) { | |
1066 | if (r != -EBADR) /* job type not applicable */ | |
1067 | goto fail; | |
1068 | ||
1069 | sd_bus_error_free(e); | |
1070 | } | |
1071 | } | |
1072 | ||
1073 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_STOP) { | |
1074 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, TRANSACTION_MATTERS | TRANSACTION_CONFLICTS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1075 | if (r < 0) { | |
1076 | if (r != -EBADR) /* job type not applicable */ | |
1077 | goto fail; | |
1078 | ||
1079 | sd_bus_error_free(e); | |
1080 | } | |
1081 | } | |
1082 | ||
1083 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PULL_IN_STOP_IGNORED) { | |
1084 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, flags & TRANSACTION_IGNORE_ORDER, e); | |
1085 | if (r < 0) { | |
1086 | log_unit_warning(dep, | |
1087 | "Cannot add dependency job, ignoring: %s", | |
1088 | bus_error_message(e, r)); | |
1089 | sd_bus_error_free(e); | |
1090 | } | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | if (IN_SET(type, JOB_RESTART, JOB_STOP) || (type == JOB_START && FLAGS_SET(flags, TRANSACTION_PROPAGATE_START_AS_RESTART))) { | |
1095 | bool is_stop = type == JOB_STOP; | |
1096 | ||
1097 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, is_stop ? UNIT_ATOM_PROPAGATE_STOP : UNIT_ATOM_PROPAGATE_RESTART) { | |
1098 | /* We propagate RESTART only as TRY_RESTART, in order not to start dependencies that | |
1099 | * are not around. */ | |
1100 | JobType nt; | |
1101 | ||
1102 | nt = job_type_collapse(is_stop ? JOB_STOP : JOB_TRY_RESTART, dep); | |
1103 | if (nt == JOB_NOP) | |
1104 | continue; | |
1105 | ||
1106 | r = transaction_add_job_and_dependencies(tr, nt, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); | |
1107 | if (r < 0) { | |
1108 | if (r != -EBADR) /* job type not applicable */ | |
1109 | return r; | |
1110 | ||
1111 | sd_bus_error_free(e); | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | /* Process UNIT_ATOM_PROPAGATE_STOP_GRACEFUL (PropagatesStopTo=) units. We need to wait until | |
1116 | * all other dependencies are processed, i.e. we're the anchor job or already in the recursion | |
1117 | * that handles it. */ | |
1118 | if (!by || FLAGS_SET(flags, TRANSACTION_PROCESS_PROPAGATE_STOP_GRACEFUL)) | |
1119 | UNIT_FOREACH_DEPENDENCY(dep, ret->unit, UNIT_ATOM_PROPAGATE_STOP_GRACEFUL) { | |
1120 | JobType nt; | |
1121 | Job *j; | |
1122 | ||
1123 | j = hashmap_get(tr->jobs, dep); | |
1124 | nt = job_type_propagate_stop_graceful(j); | |
1125 | ||
1126 | if (nt == JOB_NOP) | |
1127 | continue; | |
1128 | ||
1129 | r = transaction_add_job_and_dependencies(tr, nt, dep, ret, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER) | TRANSACTION_PROCESS_PROPAGATE_STOP_GRACEFUL, e); | |
1130 | if (r < 0) { | |
1131 | if (r != -EBADR) /* job type not applicable */ | |
1132 | return r; | |
1133 | ||
1134 | sd_bus_error_free(e); | |
1135 | } | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | if (type == JOB_RELOAD) | |
1140 | transaction_add_propagate_reload_jobs(tr, ret->unit, ret, flags & TRANSACTION_IGNORE_ORDER); | |
1141 | ||
1142 | /* JOB_VERIFY_ACTIVE requires no dependency handling */ | |
1143 | ||
1144 | return 0; | |
1145 | ||
1146 | fail: | |
1147 | /* Recursive call failed to add required jobs so let's drop top level job as well. */ | |
1148 | log_unit_debug_errno(unit, r, "Cannot add dependency job to transaction, deleting job %s/%s again: %s", | |
1149 | unit->id, job_type_to_string(type), bus_error_message(e, r)); | |
1150 | ||
1151 | transaction_delete_job(tr, ret, /* delete_dependencies= */ false); | |
1152 | return r; | |
1153 | } | |
1154 | ||
1155 | static bool shall_stop_on_isolate(Transaction *tr, Unit *u) { | |
1156 | assert(tr); | |
1157 | assert(u); | |
1158 | ||
1159 | if (u->ignore_on_isolate) | |
1160 | return false; | |
1161 | ||
1162 | /* Is there already something listed for this? */ | |
1163 | if (hashmap_contains(tr->jobs, u)) | |
1164 | return false; | |
1165 | ||
1166 | return true; | |
1167 | } | |
1168 | ||
1169 | int transaction_add_isolate_jobs(Transaction *tr, Manager *m) { | |
1170 | Unit *u; | |
1171 | char *k; | |
1172 | int r; | |
1173 | ||
1174 | assert(tr); | |
1175 | assert(m); | |
1176 | ||
1177 | HASHMAP_FOREACH_KEY(u, k, m->units) { | |
1178 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; | |
1179 | Unit *o; | |
1180 | ||
1181 | /* Ignore aliases */ | |
1182 | if (u->id != k) | |
1183 | continue; | |
1184 | ||
1185 | /* No need to stop inactive units */ | |
1186 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job) | |
1187 | continue; | |
1188 | ||
1189 | if (!shall_stop_on_isolate(tr, u)) | |
1190 | continue; | |
1191 | ||
1192 | /* Keep units that are triggered by units we want to keep around. */ | |
1193 | bool keep = false; | |
1194 | UNIT_FOREACH_DEPENDENCY(o, u, UNIT_ATOM_TRIGGERED_BY) | |
1195 | if (!shall_stop_on_isolate(tr, o)) { | |
1196 | keep = true; | |
1197 | break; | |
1198 | } | |
1199 | if (keep) | |
1200 | continue; | |
1201 | ||
1202 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, tr->anchor_job, TRANSACTION_MATTERS, &e); | |
1203 | if (r < 0) | |
1204 | log_unit_warning_errno(u, r, "Cannot add isolate job, ignoring: %s", bus_error_message(&e, r)); | |
1205 | } | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | int transaction_add_triggering_jobs(Transaction *tr, Unit *u) { | |
1211 | Unit *trigger; | |
1212 | int r; | |
1213 | ||
1214 | assert(tr); | |
1215 | assert(u); | |
1216 | ||
1217 | UNIT_FOREACH_DEPENDENCY(trigger, u, UNIT_ATOM_TRIGGERED_BY) { | |
1218 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; | |
1219 | ||
1220 | /* No need to stop inactive jobs */ | |
1221 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(trigger)) && !trigger->job) | |
1222 | continue; | |
1223 | ||
1224 | /* Is there already something listed for this? */ | |
1225 | if (hashmap_contains(tr->jobs, trigger)) | |
1226 | continue; | |
1227 | ||
1228 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, trigger, tr->anchor_job, TRANSACTION_MATTERS, &e); | |
1229 | if (r < 0) | |
1230 | log_unit_warning_errno(u, r, "Cannot add triggered by job, ignoring: %s", bus_error_message(&e, r)); | |
1231 | } | |
1232 | ||
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | Transaction *transaction_new(bool irreversible) { | |
1237 | Transaction *tr; | |
1238 | ||
1239 | tr = new0(Transaction, 1); | |
1240 | if (!tr) | |
1241 | return NULL; | |
1242 | ||
1243 | tr->jobs = hashmap_new(NULL); | |
1244 | if (!tr->jobs) | |
1245 | return mfree(tr); | |
1246 | ||
1247 | tr->irreversible = irreversible; | |
1248 | ||
1249 | return tr; | |
1250 | } | |
1251 | ||
1252 | Transaction *transaction_free(Transaction *tr) { | |
1253 | if (!tr) | |
1254 | return NULL; | |
1255 | ||
1256 | assert(hashmap_isempty(tr->jobs)); | |
1257 | hashmap_free(tr->jobs); | |
1258 | ||
1259 | return mfree(tr); | |
1260 | } | |
1261 | ||
1262 | Transaction *transaction_abort_and_free(Transaction *tr) { | |
1263 | if (!tr) | |
1264 | return NULL; | |
1265 | ||
1266 | transaction_abort(tr); | |
1267 | ||
1268 | return transaction_free(tr); | |
1269 | } |