]>
| Commit | Line | Data |
|---|---|---|
| 1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
| 2 | ||
| 3 | #include <fnmatch.h> | |
| 4 | #include <linux/capability.h> | |
| 5 | #include <unistd.h> | |
| 6 | ||
| 7 | #include "sd-bus.h" | |
| 8 | #include "sd-id128.h" | |
| 9 | #include "sd-messages.h" | |
| 10 | ||
| 11 | #include "all-units.h" | |
| 12 | #include "alloc-util.h" | |
| 13 | #include "ansi-color.h" | |
| 14 | #include "bpf-firewall.h" | |
| 15 | #include "bpf-restrict-fs.h" | |
| 16 | #include "bus-common-errors.h" | |
| 17 | #include "bus-internal.h" | |
| 18 | #include "bus-util.h" | |
| 19 | #include "cgroup-setup.h" | |
| 20 | #include "cgroup-util.h" | |
| 21 | #include "chase.h" | |
| 22 | #include "chattr-util.h" | |
| 23 | #include "condition.h" | |
| 24 | #include "dbus-unit.h" | |
| 25 | #include "dropin.h" | |
| 26 | #include "dynamic-user.h" | |
| 27 | #include "env-util.h" | |
| 28 | #include "escape.h" | |
| 29 | #include "exec-credential.h" | |
| 30 | #include "execute.h" | |
| 31 | #include "fd-util.h" | |
| 32 | #include "fileio.h" | |
| 33 | #include "format-util.h" | |
| 34 | #include "fs-util.h" | |
| 35 | #include "id128-util.h" | |
| 36 | #include "install.h" | |
| 37 | #include "iovec-util.h" | |
| 38 | #include "load-dropin.h" | |
| 39 | #include "load-fragment.h" | |
| 40 | #include "log.h" | |
| 41 | #include "logarithm.h" | |
| 42 | #include "mkdir-label.h" | |
| 43 | #include "manager.h" | |
| 44 | #include "mount-util.h" | |
| 45 | #include "mountpoint-util.h" | |
| 46 | #include "netlink-internal.h" | |
| 47 | #include "path-util.h" | |
| 48 | #include "process-util.h" | |
| 49 | #include "quota-util.h" | |
| 50 | #include "rm-rf.h" | |
| 51 | #include "serialize.h" | |
| 52 | #include "set.h" | |
| 53 | #include "signal-util.h" | |
| 54 | #include "siphash24.h" | |
| 55 | #include "sparse-endian.h" | |
| 56 | #include "special.h" | |
| 57 | #include "specifier.h" | |
| 58 | #include "stat-util.h" | |
| 59 | #include "string-table.h" | |
| 60 | #include "string-util.h" | |
| 61 | #include "strv.h" | |
| 62 | #include "tmpfile-util.h" | |
| 63 | #include "umask-util.h" | |
| 64 | #include "unit.h" | |
| 65 | #include "unit-name.h" | |
| 66 | #include "user-util.h" | |
| 67 | #include "varlink.h" | |
| 68 | ||
| 69 | /* Thresholds for logging at INFO level about resource consumption */ | |
| 70 | #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC) | |
| 71 | #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB) | |
| 72 | #define MENTIONWORTHY_IO_BYTES (1 * U64_MB) | |
| 73 | #define MENTIONWORTHY_IP_BYTES UINT64_C(0) | |
| 74 | ||
| 75 | /* Thresholds for logging at NOTICE level about resource consumption */ | |
| 76 | #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE) | |
| 77 | #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB) | |
| 78 | #define NOTICEWORTHY_IO_BYTES (10 * U64_MB) | |
| 79 | #define NOTICEWORTHY_IP_BYTES (128 * U64_MB) | |
| 80 | ||
| 81 | const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = { | |
| 82 | [UNIT_SERVICE] = &service_vtable, | |
| 83 | [UNIT_SOCKET] = &socket_vtable, | |
| 84 | [UNIT_TARGET] = &target_vtable, | |
| 85 | [UNIT_DEVICE] = &device_vtable, | |
| 86 | [UNIT_MOUNT] = &mount_vtable, | |
| 87 | [UNIT_AUTOMOUNT] = &automount_vtable, | |
| 88 | [UNIT_SWAP] = &swap_vtable, | |
| 89 | [UNIT_TIMER] = &timer_vtable, | |
| 90 | [UNIT_PATH] = &path_vtable, | |
| 91 | [UNIT_SLICE] = &slice_vtable, | |
| 92 | [UNIT_SCOPE] = &scope_vtable, | |
| 93 | }; | |
| 94 | ||
| 95 | Unit* unit_new(Manager *m, size_t size) { | |
| 96 | Unit *u; | |
| 97 | ||
| 98 | assert(m); | |
| 99 | assert(size >= sizeof(Unit)); | |
| 100 | ||
| 101 | u = malloc0(size); | |
| 102 | if (!u) | |
| 103 | return NULL; | |
| 104 | ||
| 105 | u->manager = m; | |
| 106 | u->type = _UNIT_TYPE_INVALID; | |
| 107 | u->default_dependencies = true; | |
| 108 | u->unit_file_state = _UNIT_FILE_STATE_INVALID; | |
| 109 | u->unit_file_preset = _PRESET_ACTION_INVALID; | |
| 110 | u->on_failure_job_mode = JOB_REPLACE; | |
| 111 | u->on_success_job_mode = JOB_FAIL; | |
| 112 | u->job_timeout = USEC_INFINITY; | |
| 113 | u->job_running_timeout = USEC_INFINITY; | |
| 114 | u->ref_uid = UID_INVALID; | |
| 115 | u->ref_gid = GID_INVALID; | |
| 116 | ||
| 117 | u->failure_action_exit_status = u->success_action_exit_status = -1; | |
| 118 | ||
| 119 | u->last_section_private = -1; | |
| 120 | ||
| 121 | u->start_ratelimit = m->defaults.start_limit; | |
| 122 | ||
| 123 | u->auto_start_stop_ratelimit = (const RateLimit) { | |
| 124 | .interval = 10 * USEC_PER_SEC, | |
| 125 | .burst = 16 | |
| 126 | }; | |
| 127 | ||
| 128 | return u; | |
| 129 | } | |
| 130 | ||
| 131 | int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) { | |
| 132 | _cleanup_(unit_freep) Unit *u = NULL; | |
| 133 | int r; | |
| 134 | ||
| 135 | u = unit_new(m, size); | |
| 136 | if (!u) | |
| 137 | return -ENOMEM; | |
| 138 | ||
| 139 | r = unit_add_name(u, name); | |
| 140 | if (r < 0) | |
| 141 | return r; | |
| 142 | ||
| 143 | *ret = TAKE_PTR(u); | |
| 144 | ||
| 145 | return r; | |
| 146 | } | |
| 147 | ||
| 148 | bool unit_has_name(const Unit *u, const char *name) { | |
| 149 | assert(u); | |
| 150 | assert(name); | |
| 151 | ||
| 152 | return streq_ptr(name, u->id) || | |
| 153 | set_contains(u->aliases, name); | |
| 154 | } | |
| 155 | ||
| 156 | static void unit_init(Unit *u) { | |
| 157 | CGroupContext *cc; | |
| 158 | ExecContext *ec; | |
| 159 | KillContext *kc; | |
| 160 | ||
| 161 | assert(u); | |
| 162 | assert(u->manager); | |
| 163 | assert(u->type >= 0); | |
| 164 | ||
| 165 | cc = unit_get_cgroup_context(u); | |
| 166 | if (cc) { | |
| 167 | cgroup_context_init(cc); | |
| 168 | ||
| 169 | /* Copy in the manager defaults into the cgroup | |
| 170 | * context, _before_ the rest of the settings have | |
| 171 | * been initialized */ | |
| 172 | ||
| 173 | cc->io_accounting = u->manager->defaults.io_accounting; | |
| 174 | cc->memory_accounting = u->manager->defaults.memory_accounting; | |
| 175 | cc->tasks_accounting = u->manager->defaults.tasks_accounting; | |
| 176 | cc->ip_accounting = u->manager->defaults.ip_accounting; | |
| 177 | ||
| 178 | if (u->type != UNIT_SLICE) | |
| 179 | cc->tasks_max = u->manager->defaults.tasks_max; | |
| 180 | ||
| 181 | cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch; | |
| 182 | cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec; | |
| 183 | } | |
| 184 | ||
| 185 | ec = unit_get_exec_context(u); | |
| 186 | if (ec) { | |
| 187 | exec_context_init(ec); | |
| 188 | ||
| 189 | if (u->manager->defaults.oom_score_adjust_set) { | |
| 190 | ec->oom_score_adjust = u->manager->defaults.oom_score_adjust; | |
| 191 | ec->oom_score_adjust_set = true; | |
| 192 | } | |
| 193 | ||
| 194 | ec->restrict_suid_sgid = u->manager->defaults.restrict_suid_sgid; | |
| 195 | ||
| 196 | if (MANAGER_IS_SYSTEM(u->manager)) | |
| 197 | ec->keyring_mode = EXEC_KEYRING_SHARED; | |
| 198 | else { | |
| 199 | ec->keyring_mode = EXEC_KEYRING_INHERIT; | |
| 200 | ||
| 201 | /* User manager might have its umask redefined by PAM or UMask=. In this | |
| 202 | * case let the units it manages inherit this value by default. They can | |
| 203 | * still tune this value through their own unit file */ | |
| 204 | (void) get_process_umask(0, &ec->umask); | |
| 205 | } | |
| 206 | } | |
| 207 | ||
| 208 | kc = unit_get_kill_context(u); | |
| 209 | if (kc) | |
| 210 | kill_context_init(kc); | |
| 211 | ||
| 212 | if (UNIT_VTABLE(u)->init) | |
| 213 | UNIT_VTABLE(u)->init(u); | |
| 214 | } | |
| 215 | ||
| 216 | static int unit_add_alias(Unit *u, char *donated_name) { | |
| 217 | int r; | |
| 218 | ||
| 219 | /* Make sure that u->names is allocated. We may leave u->names | |
| 220 | * empty if we fail later, but this is not a problem. */ | |
| 221 | r = set_ensure_put(&u->aliases, &string_hash_ops_free, donated_name); | |
| 222 | if (r < 0) | |
| 223 | return r; | |
| 224 | assert(r > 0); | |
| 225 | ||
| 226 | return 0; | |
| 227 | } | |
| 228 | ||
| 229 | int unit_add_name(Unit *u, const char *text) { | |
| 230 | _cleanup_free_ char *name = NULL, *instance = NULL; | |
| 231 | UnitType t; | |
| 232 | int r; | |
| 233 | ||
| 234 | assert(u); | |
| 235 | assert(text); | |
| 236 | ||
| 237 | if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) { | |
| 238 | if (!u->instance) | |
| 239 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 240 | "Instance is not set when adding name '%s'.", text); | |
| 241 | ||
| 242 | r = unit_name_replace_instance(text, u->instance, &name); | |
| 243 | if (r < 0) | |
| 244 | return log_unit_debug_errno(u, r, | |
| 245 | "Failed to build instance name from '%s': %m", text); | |
| 246 | } else { | |
| 247 | name = strdup(text); | |
| 248 | if (!name) | |
| 249 | return -ENOMEM; | |
| 250 | } | |
| 251 | ||
| 252 | if (unit_has_name(u, name)) | |
| 253 | return 0; | |
| 254 | ||
| 255 | if (hashmap_contains(u->manager->units, name)) | |
| 256 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST), | |
| 257 | "Unit already exist when adding name '%s'.", name); | |
| 258 | ||
| 259 | if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) | |
| 260 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 261 | "Name '%s' is invalid.", name); | |
| 262 | ||
| 263 | t = unit_name_to_type(name); | |
| 264 | if (t < 0) | |
| 265 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 266 | "failed to derive unit type from name '%s'.", name); | |
| 267 | ||
| 268 | if (u->type != _UNIT_TYPE_INVALID && t != u->type) | |
| 269 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 270 | "Unit type is illegal: u->type(%d) and t(%d) for name '%s'.", | |
| 271 | u->type, t, name); | |
| 272 | ||
| 273 | r = unit_name_to_instance(name, &instance); | |
| 274 | if (r < 0) | |
| 275 | return log_unit_debug_errno(u, r, "Failed to extract instance from name '%s': %m", name); | |
| 276 | ||
| 277 | if (instance && !unit_type_may_template(t)) | |
| 278 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "Templates are not allowed for name '%s'.", name); | |
| 279 | ||
| 280 | /* Ensure that this unit either has no instance, or that the instance matches. */ | |
| 281 | if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance)) | |
| 282 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 283 | "Cannot add name %s, the instances don't match (\"%s\" != \"%s\").", | |
| 284 | name, instance, u->instance); | |
| 285 | ||
| 286 | if (u->id && !unit_type_may_alias(t)) | |
| 287 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST), | |
| 288 | "Cannot add name %s, aliases are not allowed for %s units.", | |
| 289 | name, unit_type_to_string(t)); | |
| 290 | ||
| 291 | if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES) | |
| 292 | return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "Cannot add name, manager has too many units."); | |
| 293 | ||
| 294 | /* Add name to the global hashmap first, because that's easier to undo */ | |
| 295 | r = hashmap_put(u->manager->units, name, u); | |
| 296 | if (r < 0) | |
| 297 | return log_unit_debug_errno(u, r, "Add unit to hashmap failed for name '%s': %m", text); | |
| 298 | ||
| 299 | if (u->id) { | |
| 300 | r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */ | |
| 301 | if (r < 0) { | |
| 302 | hashmap_remove(u->manager->units, name); | |
| 303 | return r; | |
| 304 | } | |
| 305 | TAKE_PTR(name); | |
| 306 | ||
| 307 | } else { | |
| 308 | /* A new name, we don't need the set yet. */ | |
| 309 | assert(u->type == _UNIT_TYPE_INVALID); | |
| 310 | assert(!u->instance); | |
| 311 | ||
| 312 | u->type = t; | |
| 313 | u->id = TAKE_PTR(name); | |
| 314 | u->instance = TAKE_PTR(instance); | |
| 315 | ||
| 316 | LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u); | |
| 317 | unit_init(u); | |
| 318 | } | |
| 319 | ||
| 320 | unit_add_to_dbus_queue(u); | |
| 321 | return 0; | |
| 322 | } | |
| 323 | ||
| 324 | int unit_choose_id(Unit *u, const char *name) { | |
| 325 | _cleanup_free_ char *t = NULL; | |
| 326 | char *s; | |
| 327 | int r; | |
| 328 | ||
| 329 | assert(u); | |
| 330 | assert(name); | |
| 331 | ||
| 332 | if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) { | |
| 333 | if (!u->instance) | |
| 334 | return -EINVAL; | |
| 335 | ||
| 336 | r = unit_name_replace_instance(name, u->instance, &t); | |
| 337 | if (r < 0) | |
| 338 | return r; | |
| 339 | ||
| 340 | name = t; | |
| 341 | } | |
| 342 | ||
| 343 | if (streq_ptr(u->id, name)) | |
| 344 | return 0; /* Nothing to do. */ | |
| 345 | ||
| 346 | /* Selects one of the aliases of this unit as the id */ | |
| 347 | s = set_get(u->aliases, (char*) name); | |
| 348 | if (!s) | |
| 349 | return -ENOENT; | |
| 350 | ||
| 351 | if (u->id) { | |
| 352 | r = set_remove_and_put(u->aliases, name, u->id); | |
| 353 | if (r < 0) | |
| 354 | return r; | |
| 355 | } else | |
| 356 | assert_se(set_remove(u->aliases, name)); /* see set_get() above… */ | |
| 357 | ||
| 358 | u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */ | |
| 359 | unit_add_to_dbus_queue(u); | |
| 360 | ||
| 361 | return 0; | |
| 362 | } | |
| 363 | ||
| 364 | int unit_set_description(Unit *u, const char *description) { | |
| 365 | int r; | |
| 366 | ||
| 367 | assert(u); | |
| 368 | ||
| 369 | r = free_and_strdup(&u->description, empty_to_null(description)); | |
| 370 | if (r < 0) | |
| 371 | return r; | |
| 372 | if (r > 0) | |
| 373 | unit_add_to_dbus_queue(u); | |
| 374 | ||
| 375 | return 0; | |
| 376 | } | |
| 377 | ||
| 378 | static bool unit_success_failure_handler_has_jobs(Unit *unit) { | |
| 379 | Unit *other; | |
| 380 | ||
| 381 | UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS) | |
| 382 | if (other->job || other->nop_job) | |
| 383 | return true; | |
| 384 | ||
| 385 | UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE) | |
| 386 | if (other->job || other->nop_job) | |
| 387 | return true; | |
| 388 | ||
| 389 | return false; | |
| 390 | } | |
| 391 | ||
| 392 | void unit_release_resources(Unit *u) { | |
| 393 | UnitActiveState state; | |
| 394 | ExecContext *ec; | |
| 395 | ||
| 396 | assert(u); | |
| 397 | ||
| 398 | if (u->job || u->nop_job) | |
| 399 | return; | |
| 400 | ||
| 401 | if (u->perpetual) | |
| 402 | return; | |
| 403 | ||
| 404 | state = unit_active_state(u); | |
| 405 | if (!UNIT_IS_INACTIVE_OR_FAILED(state)) | |
| 406 | return; | |
| 407 | ||
| 408 | if (unit_will_restart(u)) | |
| 409 | return; | |
| 410 | ||
| 411 | ec = unit_get_exec_context(u); | |
| 412 | if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART) | |
| 413 | exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]); | |
| 414 | ||
| 415 | if (UNIT_VTABLE(u)->release_resources) | |
| 416 | UNIT_VTABLE(u)->release_resources(u); | |
| 417 | } | |
| 418 | ||
| 419 | bool unit_may_gc(Unit *u) { | |
| 420 | UnitActiveState state; | |
| 421 | int r; | |
| 422 | ||
| 423 | assert(u); | |
| 424 | ||
| 425 | /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the | |
| 426 | * unit may be collected, and false if there's some reason to keep it loaded. | |
| 427 | * | |
| 428 | * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but | |
| 429 | * using markers to properly collect dependency loops. | |
| 430 | */ | |
| 431 | ||
| 432 | if (u->job || u->nop_job) | |
| 433 | return false; | |
| 434 | ||
| 435 | if (u->perpetual) | |
| 436 | return false; | |
| 437 | ||
| 438 | /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove | |
| 439 | * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued | |
| 440 | * before we release the unit. */ | |
| 441 | if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue) | |
| 442 | return false; | |
| 443 | ||
| 444 | /* Make sure to send out D-Bus events before we unload the unit */ | |
| 445 | if (u->in_dbus_queue) | |
| 446 | return false; | |
| 447 | ||
| 448 | if (sd_bus_track_count(u->bus_track) > 0) | |
| 449 | return false; | |
| 450 | ||
| 451 | state = unit_active_state(u); | |
| 452 | ||
| 453 | /* But we keep the unit object around for longer when it is referenced or configured to not be | |
| 454 | * gc'ed */ | |
| 455 | switch (u->collect_mode) { | |
| 456 | ||
| 457 | case COLLECT_INACTIVE: | |
| 458 | if (state != UNIT_INACTIVE) | |
| 459 | return false; | |
| 460 | ||
| 461 | break; | |
| 462 | ||
| 463 | case COLLECT_INACTIVE_OR_FAILED: | |
| 464 | if (!UNIT_IS_INACTIVE_OR_FAILED(state)) | |
| 465 | return false; | |
| 466 | ||
| 467 | break; | |
| 468 | ||
| 469 | default: | |
| 470 | assert_not_reached(); | |
| 471 | } | |
| 472 | ||
| 473 | /* Check if any OnFailure= or on Success= jobs may be pending */ | |
| 474 | if (unit_success_failure_handler_has_jobs(u)) | |
| 475 | return false; | |
| 476 | ||
| 477 | /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay | |
| 478 | * around. Units with active processes should never be collected. */ | |
| 479 | r = unit_cgroup_is_empty(u); | |
| 480 | if (r <= 0 && !IN_SET(r, -ENXIO, -EOWNERDEAD)) | |
| 481 | return false; /* ENXIO/EOWNERDEAD means: currently not realized */ | |
| 482 | ||
| 483 | if (!UNIT_VTABLE(u)->may_gc) | |
| 484 | return true; | |
| 485 | ||
| 486 | return UNIT_VTABLE(u)->may_gc(u); | |
| 487 | } | |
| 488 | ||
| 489 | void unit_add_to_load_queue(Unit *u) { | |
| 490 | assert(u); | |
| 491 | assert(u->type != _UNIT_TYPE_INVALID); | |
| 492 | ||
| 493 | if (u->load_state != UNIT_STUB || u->in_load_queue) | |
| 494 | return; | |
| 495 | ||
| 496 | LIST_PREPEND(load_queue, u->manager->load_queue, u); | |
| 497 | u->in_load_queue = true; | |
| 498 | } | |
| 499 | ||
| 500 | void unit_add_to_cleanup_queue(Unit *u) { | |
| 501 | assert(u); | |
| 502 | ||
| 503 | if (u->in_cleanup_queue) | |
| 504 | return; | |
| 505 | ||
| 506 | LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u); | |
| 507 | u->in_cleanup_queue = true; | |
| 508 | } | |
| 509 | ||
| 510 | void unit_add_to_gc_queue(Unit *u) { | |
| 511 | assert(u); | |
| 512 | ||
| 513 | if (u->in_gc_queue || u->in_cleanup_queue) | |
| 514 | return; | |
| 515 | ||
| 516 | if (!unit_may_gc(u)) | |
| 517 | return; | |
| 518 | ||
| 519 | LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u); | |
| 520 | u->in_gc_queue = true; | |
| 521 | } | |
| 522 | ||
| 523 | void unit_add_to_dbus_queue(Unit *u) { | |
| 524 | assert(u); | |
| 525 | assert(u->type != _UNIT_TYPE_INVALID); | |
| 526 | ||
| 527 | if (u->load_state == UNIT_STUB || u->in_dbus_queue) | |
| 528 | return; | |
| 529 | ||
| 530 | /* Shortcut things if nobody cares */ | |
| 531 | if (sd_bus_track_count(u->manager->subscribed) <= 0 && | |
| 532 | sd_bus_track_count(u->bus_track) <= 0 && | |
| 533 | set_isempty(u->manager->private_buses)) { | |
| 534 | u->sent_dbus_new_signal = true; | |
| 535 | return; | |
| 536 | } | |
| 537 | ||
| 538 | LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u); | |
| 539 | u->in_dbus_queue = true; | |
| 540 | } | |
| 541 | ||
| 542 | void unit_submit_to_stop_when_unneeded_queue(Unit *u) { | |
| 543 | assert(u); | |
| 544 | ||
| 545 | if (u->in_stop_when_unneeded_queue) | |
| 546 | return; | |
| 547 | ||
| 548 | if (!u->stop_when_unneeded) | |
| 549 | return; | |
| 550 | ||
| 551 | if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) | |
| 552 | return; | |
| 553 | ||
| 554 | LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u); | |
| 555 | u->in_stop_when_unneeded_queue = true; | |
| 556 | } | |
| 557 | ||
| 558 | void unit_submit_to_start_when_upheld_queue(Unit *u) { | |
| 559 | assert(u); | |
| 560 | ||
| 561 | if (u->in_start_when_upheld_queue) | |
| 562 | return; | |
| 563 | ||
| 564 | if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u))) | |
| 565 | return; | |
| 566 | ||
| 567 | if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL)) | |
| 568 | return; | |
| 569 | ||
| 570 | LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u); | |
| 571 | u->in_start_when_upheld_queue = true; | |
| 572 | } | |
| 573 | ||
| 574 | void unit_submit_to_stop_when_bound_queue(Unit *u) { | |
| 575 | assert(u); | |
| 576 | ||
| 577 | if (u->in_stop_when_bound_queue) | |
| 578 | return; | |
| 579 | ||
| 580 | if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) | |
| 581 | return; | |
| 582 | ||
| 583 | if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL)) | |
| 584 | return; | |
| 585 | ||
| 586 | LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u); | |
| 587 | u->in_stop_when_bound_queue = true; | |
| 588 | } | |
| 589 | ||
| 590 | static bool unit_can_release_resources(Unit *u) { | |
| 591 | ExecContext *ec; | |
| 592 | ||
| 593 | assert(u); | |
| 594 | ||
| 595 | if (UNIT_VTABLE(u)->release_resources) | |
| 596 | return true; | |
| 597 | ||
| 598 | ec = unit_get_exec_context(u); | |
| 599 | if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART) | |
| 600 | return true; | |
| 601 | ||
| 602 | return false; | |
| 603 | } | |
| 604 | ||
| 605 | void unit_submit_to_release_resources_queue(Unit *u) { | |
| 606 | assert(u); | |
| 607 | ||
| 608 | if (u->in_release_resources_queue) | |
| 609 | return; | |
| 610 | ||
| 611 | if (u->job || u->nop_job) | |
| 612 | return; | |
| 613 | ||
| 614 | if (u->perpetual) | |
| 615 | return; | |
| 616 | ||
| 617 | if (!unit_can_release_resources(u)) | |
| 618 | return; | |
| 619 | ||
| 620 | LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u); | |
| 621 | u->in_release_resources_queue = true; | |
| 622 | } | |
| 623 | ||
| 624 | void unit_add_to_stop_notify_queue(Unit *u) { | |
| 625 | assert(u); | |
| 626 | ||
| 627 | if (u->in_stop_notify_queue) | |
| 628 | return; | |
| 629 | ||
| 630 | assert(UNIT_VTABLE(u)->stop_notify); | |
| 631 | ||
| 632 | LIST_PREPEND(stop_notify_queue, u->manager->stop_notify_queue, u); | |
| 633 | u->in_stop_notify_queue = true; | |
| 634 | } | |
| 635 | ||
| 636 | void unit_remove_from_stop_notify_queue(Unit *u) { | |
| 637 | assert(u); | |
| 638 | ||
| 639 | if (!u->in_stop_notify_queue) | |
| 640 | return; | |
| 641 | ||
| 642 | LIST_REMOVE(stop_notify_queue, u->manager->stop_notify_queue, u); | |
| 643 | u->in_stop_notify_queue = false; | |
| 644 | } | |
| 645 | ||
| 646 | static void unit_clear_dependencies(Unit *u) { | |
| 647 | assert(u); | |
| 648 | ||
| 649 | /* Removes all dependencies configured on u and their reverse dependencies. */ | |
| 650 | ||
| 651 | for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) { | |
| 652 | ||
| 653 | for (Unit *other; (other = hashmap_steal_first_key(deps));) { | |
| 654 | Hashmap *other_deps; | |
| 655 | ||
| 656 | HASHMAP_FOREACH(other_deps, other->dependencies) | |
| 657 | hashmap_remove(other_deps, u); | |
| 658 | ||
| 659 | unit_add_to_gc_queue(other); | |
| 660 | other->dependency_generation++; | |
| 661 | } | |
| 662 | ||
| 663 | hashmap_free(deps); | |
| 664 | } | |
| 665 | ||
| 666 | u->dependencies = hashmap_free(u->dependencies); | |
| 667 | u->dependency_generation++; | |
| 668 | } | |
| 669 | ||
| 670 | static void unit_remove_transient(Unit *u) { | |
| 671 | assert(u); | |
| 672 | assert(u->manager); | |
| 673 | ||
| 674 | if (!u->transient) | |
| 675 | return; | |
| 676 | ||
| 677 | STRV_FOREACH(i, u->dropin_paths) { | |
| 678 | _cleanup_free_ char *p = NULL, *pp = NULL; | |
| 679 | ||
| 680 | if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */ | |
| 681 | continue; | |
| 682 | ||
| 683 | if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */ | |
| 684 | continue; | |
| 685 | ||
| 686 | /* Only drop transient drop-ins */ | |
| 687 | if (!path_equal(u->manager->lookup_paths.transient, pp)) | |
| 688 | continue; | |
| 689 | ||
| 690 | (void) unlink(*i); | |
| 691 | (void) rmdir(p); | |
| 692 | } | |
| 693 | ||
| 694 | if (u->fragment_path) { | |
| 695 | (void) unlink(u->fragment_path); | |
| 696 | (void) unit_file_remove_from_name_map( | |
| 697 | &u->manager->lookup_paths, | |
| 698 | &u->manager->unit_cache_timestamp_hash, | |
| 699 | &u->manager->unit_id_map, | |
| 700 | &u->manager->unit_name_map, | |
| 701 | &u->manager->unit_path_cache, | |
| 702 | u->fragment_path); | |
| 703 | } | |
| 704 | } | |
| 705 | ||
| 706 | static void unit_free_mounts_for(Unit *u) { | |
| 707 | assert(u); | |
| 708 | ||
| 709 | for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) { | |
| 710 | for (;;) { | |
| 711 | _cleanup_free_ char *path = NULL; | |
| 712 | ||
| 713 | path = hashmap_steal_first_key(u->mounts_for[t]); | |
| 714 | if (!path) | |
| 715 | break; | |
| 716 | ||
| 717 | char s[strlen(path) + 1]; | |
| 718 | ||
| 719 | PATH_FOREACH_PREFIX_MORE(s, path) { | |
| 720 | char *y; | |
| 721 | Set *x; | |
| 722 | ||
| 723 | x = hashmap_get2(u->manager->units_needing_mounts_for[t], s, (void**) &y); | |
| 724 | if (!x) | |
| 725 | continue; | |
| 726 | ||
| 727 | (void) set_remove(x, u); | |
| 728 | ||
| 729 | if (set_isempty(x)) { | |
| 730 | assert_se(hashmap_remove(u->manager->units_needing_mounts_for[t], y)); | |
| 731 | free(y); | |
| 732 | set_free(x); | |
| 733 | } | |
| 734 | } | |
| 735 | } | |
| 736 | ||
| 737 | u->mounts_for[t] = hashmap_free(u->mounts_for[t]); | |
| 738 | } | |
| 739 | } | |
| 740 | ||
| 741 | static void unit_done(Unit *u) { | |
| 742 | ExecContext *ec; | |
| 743 | CGroupContext *cc; | |
| 744 | ||
| 745 | assert(u); | |
| 746 | ||
| 747 | if (u->type < 0) | |
| 748 | return; | |
| 749 | ||
| 750 | if (UNIT_VTABLE(u)->done) | |
| 751 | UNIT_VTABLE(u)->done(u); | |
| 752 | ||
| 753 | ec = unit_get_exec_context(u); | |
| 754 | if (ec) | |
| 755 | exec_context_done(ec); | |
| 756 | ||
| 757 | cc = unit_get_cgroup_context(u); | |
| 758 | if (cc) | |
| 759 | cgroup_context_done(cc); | |
| 760 | } | |
| 761 | ||
| 762 | Unit* unit_free(Unit *u) { | |
| 763 | Unit *slice; | |
| 764 | char *t; | |
| 765 | ||
| 766 | if (!u) | |
| 767 | return NULL; | |
| 768 | ||
| 769 | sd_event_source_disable_unref(u->auto_start_stop_event_source); | |
| 770 | ||
| 771 | u->transient_file = safe_fclose(u->transient_file); | |
| 772 | ||
| 773 | if (!MANAGER_IS_RELOADING(u->manager)) | |
| 774 | unit_remove_transient(u); | |
| 775 | ||
| 776 | bus_unit_send_removed_signal(u); | |
| 777 | ||
| 778 | unit_done(u); | |
| 779 | ||
| 780 | u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot); | |
| 781 | u->bus_track = sd_bus_track_unref(u->bus_track); | |
| 782 | u->deserialized_refs = strv_free(u->deserialized_refs); | |
| 783 | u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation); | |
| 784 | ||
| 785 | unit_free_mounts_for(u); | |
| 786 | ||
| 787 | SET_FOREACH(t, u->aliases) | |
| 788 | hashmap_remove_value(u->manager->units, t, u); | |
| 789 | if (u->id) | |
| 790 | hashmap_remove_value(u->manager->units, u->id, u); | |
| 791 | ||
| 792 | if (!sd_id128_is_null(u->invocation_id)) | |
| 793 | hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u); | |
| 794 | ||
| 795 | if (u->job) { | |
| 796 | Job *j = u->job; | |
| 797 | job_uninstall(j); | |
| 798 | job_free(j); | |
| 799 | } | |
| 800 | ||
| 801 | if (u->nop_job) { | |
| 802 | Job *j = u->nop_job; | |
| 803 | job_uninstall(j); | |
| 804 | job_free(j); | |
| 805 | } | |
| 806 | ||
| 807 | /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we | |
| 808 | * detach the unit from slice tree in order to eliminate its effect on controller masks. */ | |
| 809 | slice = UNIT_GET_SLICE(u); | |
| 810 | unit_clear_dependencies(u); | |
| 811 | if (slice) | |
| 812 | unit_add_family_to_cgroup_realize_queue(slice); | |
| 813 | ||
| 814 | if (u->on_console) | |
| 815 | manager_unref_console(u->manager); | |
| 816 | ||
| 817 | unit_release_cgroup(u, /* drop_cgroup_runtime = */ true); | |
| 818 | ||
| 819 | if (!MANAGER_IS_RELOADING(u->manager)) | |
| 820 | unit_unlink_state_files(u); | |
| 821 | ||
| 822 | unit_unref_uid_gid(u, false); | |
| 823 | ||
| 824 | (void) manager_update_failed_units(u->manager, u, false); | |
| 825 | set_remove(u->manager->startup_units, u); | |
| 826 | ||
| 827 | unit_unwatch_all_pids(u); | |
| 828 | ||
| 829 | while (u->refs_by_target) | |
| 830 | unit_ref_unset(u->refs_by_target); | |
| 831 | ||
| 832 | if (u->type != _UNIT_TYPE_INVALID) | |
| 833 | LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u); | |
| 834 | ||
| 835 | if (u->in_load_queue) | |
| 836 | LIST_REMOVE(load_queue, u->manager->load_queue, u); | |
| 837 | ||
| 838 | if (u->in_dbus_queue) | |
| 839 | LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u); | |
| 840 | ||
| 841 | if (u->in_cleanup_queue) | |
| 842 | LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u); | |
| 843 | ||
| 844 | if (u->in_gc_queue) | |
| 845 | LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u); | |
| 846 | ||
| 847 | if (u->in_cgroup_realize_queue) | |
| 848 | LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u); | |
| 849 | ||
| 850 | if (u->in_cgroup_empty_queue) | |
| 851 | LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u); | |
| 852 | ||
| 853 | if (u->in_cgroup_oom_queue) | |
| 854 | LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u); | |
| 855 | ||
| 856 | if (u->in_target_deps_queue) | |
| 857 | LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u); | |
| 858 | ||
| 859 | if (u->in_stop_when_unneeded_queue) | |
| 860 | LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u); | |
| 861 | ||
| 862 | if (u->in_start_when_upheld_queue) | |
| 863 | LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u); | |
| 864 | ||
| 865 | if (u->in_stop_when_bound_queue) | |
| 866 | LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u); | |
| 867 | ||
| 868 | if (u->in_release_resources_queue) | |
| 869 | LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u); | |
| 870 | ||
| 871 | unit_remove_from_stop_notify_queue(u); | |
| 872 | ||
| 873 | condition_free_list(u->conditions); | |
| 874 | condition_free_list(u->asserts); | |
| 875 | ||
| 876 | free(u->description); | |
| 877 | strv_free(u->documentation); | |
| 878 | free(u->fragment_path); | |
| 879 | free(u->source_path); | |
| 880 | strv_free(u->dropin_paths); | |
| 881 | free(u->instance); | |
| 882 | ||
| 883 | free(u->job_timeout_reboot_arg); | |
| 884 | free(u->reboot_arg); | |
| 885 | ||
| 886 | free(u->access_selinux_context); | |
| 887 | ||
| 888 | set_free(u->aliases); | |
| 889 | free(u->id); | |
| 890 | ||
| 891 | activation_details_unref(u->activation_details); | |
| 892 | ||
| 893 | return mfree(u); | |
| 894 | } | |
| 895 | ||
| 896 | UnitActiveState unit_active_state(Unit *u) { | |
| 897 | assert(u); | |
| 898 | ||
| 899 | if (u->load_state == UNIT_MERGED) | |
| 900 | return unit_active_state(unit_follow_merge(u)); | |
| 901 | ||
| 902 | /* After a reload it might happen that a unit is not correctly | |
| 903 | * loaded but still has a process around. That's why we won't | |
| 904 | * shortcut failed loading to UNIT_INACTIVE_FAILED. */ | |
| 905 | ||
| 906 | return UNIT_VTABLE(u)->active_state(u); | |
| 907 | } | |
| 908 | ||
| 909 | const char* unit_sub_state_to_string(Unit *u) { | |
| 910 | assert(u); | |
| 911 | ||
| 912 | return UNIT_VTABLE(u)->sub_state_to_string(u); | |
| 913 | } | |
| 914 | ||
| 915 | static int unit_merge_names(Unit *u, Unit *other) { | |
| 916 | char *name; | |
| 917 | int r; | |
| 918 | ||
| 919 | assert(u); | |
| 920 | assert(other); | |
| 921 | ||
| 922 | r = unit_add_alias(u, other->id); | |
| 923 | if (r < 0) | |
| 924 | return r; | |
| 925 | ||
| 926 | r = set_move(u->aliases, other->aliases); | |
| 927 | if (r < 0) { | |
| 928 | set_remove(u->aliases, other->id); | |
| 929 | return r; | |
| 930 | } | |
| 931 | ||
| 932 | TAKE_PTR(other->id); | |
| 933 | other->aliases = set_free(other->aliases); | |
| 934 | ||
| 935 | SET_FOREACH(name, u->aliases) | |
| 936 | assert_se(hashmap_replace(u->manager->units, name, u) == 0); | |
| 937 | ||
| 938 | return 0; | |
| 939 | } | |
| 940 | ||
| 941 | static int unit_reserve_dependencies(Unit *u, Unit *other) { | |
| 942 | size_t n_reserve; | |
| 943 | Hashmap* deps; | |
| 944 | void *d; | |
| 945 | int r; | |
| 946 | ||
| 947 | assert(u); | |
| 948 | assert(other); | |
| 949 | ||
| 950 | /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot | |
| 951 | * fail. | |
| 952 | * | |
| 953 | * First make some room in the per dependency type hashmaps. Using the summed size of both units' | |
| 954 | * hashmaps is an estimate that is likely too high since they probably use some of the same | |
| 955 | * types. But it's never too low, and that's all we need. */ | |
| 956 | ||
| 957 | n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies))); | |
| 958 | if (n_reserve > 0) { | |
| 959 | r = hashmap_ensure_allocated(&u->dependencies, NULL); | |
| 960 | if (r < 0) | |
| 961 | return r; | |
| 962 | ||
| 963 | r = hashmap_reserve(u->dependencies, n_reserve); | |
| 964 | if (r < 0) | |
| 965 | return r; | |
| 966 | } | |
| 967 | ||
| 968 | /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the | |
| 969 | * other unit's dependencies. | |
| 970 | * | |
| 971 | * NB: If u does not have a dependency set allocated for some dependency type, there is no need to | |
| 972 | * reserve anything for. In that case other's set will be transferred as a whole to u by | |
| 973 | * complete_move(). */ | |
| 974 | ||
| 975 | HASHMAP_FOREACH_KEY(deps, d, u->dependencies) { | |
| 976 | Hashmap *other_deps; | |
| 977 | ||
| 978 | other_deps = hashmap_get(other->dependencies, d); | |
| 979 | ||
| 980 | r = hashmap_reserve(deps, hashmap_size(other_deps)); | |
| 981 | if (r < 0) | |
| 982 | return r; | |
| 983 | } | |
| 984 | ||
| 985 | return 0; | |
| 986 | } | |
| 987 | ||
| 988 | static bool unit_should_warn_about_dependency(UnitDependency dependency) { | |
| 989 | /* Only warn about some unit types */ | |
| 990 | return IN_SET(dependency, | |
| 991 | UNIT_CONFLICTS, | |
| 992 | UNIT_CONFLICTED_BY, | |
| 993 | UNIT_BEFORE, | |
| 994 | UNIT_AFTER, | |
| 995 | UNIT_ON_SUCCESS, | |
| 996 | UNIT_ON_FAILURE, | |
| 997 | UNIT_TRIGGERS, | |
| 998 | UNIT_TRIGGERED_BY); | |
| 999 | } | |
| 1000 | ||
| 1001 | static int unit_per_dependency_type_hashmap_update( | |
| 1002 | Hashmap *per_type, | |
| 1003 | Unit *other, | |
| 1004 | UnitDependencyMask origin_mask, | |
| 1005 | UnitDependencyMask destination_mask) { | |
| 1006 | ||
| 1007 | UnitDependencyInfo info; | |
| 1008 | int r; | |
| 1009 | ||
| 1010 | assert(other); | |
| 1011 | assert_cc(sizeof(void*) == sizeof(info)); | |
| 1012 | ||
| 1013 | /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it | |
| 1014 | * exists, or insert it anew if not. */ | |
| 1015 | ||
| 1016 | info.data = hashmap_get(per_type, other); | |
| 1017 | if (info.data) { | |
| 1018 | /* Entry already exists. Add in our mask. */ | |
| 1019 | ||
| 1020 | if (FLAGS_SET(origin_mask, info.origin_mask) && | |
| 1021 | FLAGS_SET(destination_mask, info.destination_mask)) | |
| 1022 | return 0; /* NOP */ | |
| 1023 | ||
| 1024 | info.origin_mask |= origin_mask; | |
| 1025 | info.destination_mask |= destination_mask; | |
| 1026 | ||
| 1027 | r = hashmap_update(per_type, other, info.data); | |
| 1028 | } else { | |
| 1029 | info = (UnitDependencyInfo) { | |
| 1030 | .origin_mask = origin_mask, | |
| 1031 | .destination_mask = destination_mask, | |
| 1032 | }; | |
| 1033 | ||
| 1034 | r = hashmap_put(per_type, other, info.data); | |
| 1035 | } | |
| 1036 | if (r < 0) | |
| 1037 | return r; | |
| 1038 | ||
| 1039 | return 1; | |
| 1040 | } | |
| 1041 | ||
| 1042 | static void unit_merge_dependencies(Unit *u, Unit *other) { | |
| 1043 | Hashmap *deps; | |
| 1044 | void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here, | |
| 1045 | * since the hashmaps all want it as void pointer. */ | |
| 1046 | ||
| 1047 | assert(u); | |
| 1048 | assert(other); | |
| 1049 | ||
| 1050 | if (u == other) | |
| 1051 | return; | |
| 1052 | ||
| 1053 | /* First, remove dependency to other. */ | |
| 1054 | HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) { | |
| 1055 | if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt))) | |
| 1056 | log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.", | |
| 1057 | unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)), | |
| 1058 | other->id, other->id, u->id); | |
| 1059 | ||
| 1060 | if (hashmap_isempty(deps)) | |
| 1061 | hashmap_free(hashmap_remove(u->dependencies, dt)); | |
| 1062 | } | |
| 1063 | ||
| 1064 | for (;;) { | |
| 1065 | _cleanup_hashmap_free_ Hashmap *other_deps = NULL; | |
| 1066 | UnitDependencyInfo di_back; | |
| 1067 | Unit *back; | |
| 1068 | ||
| 1069 | /* Let's focus on one dependency type at a time, that 'other' has defined. */ | |
| 1070 | other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt); | |
| 1071 | if (!other_deps) | |
| 1072 | break; /* done! */ | |
| 1073 | ||
| 1074 | deps = hashmap_get(u->dependencies, dt); | |
| 1075 | ||
| 1076 | /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the | |
| 1077 | * referenced units as 'back'. */ | |
| 1078 | HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) { | |
| 1079 | Hashmap *back_deps; | |
| 1080 | void *back_dt; | |
| 1081 | ||
| 1082 | if (back == u) { | |
| 1083 | /* This is a dependency pointing back to the unit we want to merge with? | |
| 1084 | * Suppress it (but warn) */ | |
| 1085 | if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt))) | |
| 1086 | log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.", | |
| 1087 | unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)), | |
| 1088 | u->id, other->id, other->id, u->id); | |
| 1089 | ||
| 1090 | hashmap_remove(other_deps, back); | |
| 1091 | continue; | |
| 1092 | } | |
| 1093 | ||
| 1094 | /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to | |
| 1095 | * point to 'u' instead. */ | |
| 1096 | HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) { | |
| 1097 | UnitDependencyInfo di_move; | |
| 1098 | ||
| 1099 | di_move.data = hashmap_remove(back_deps, other); | |
| 1100 | if (!di_move.data) | |
| 1101 | continue; | |
| 1102 | ||
| 1103 | assert_se(unit_per_dependency_type_hashmap_update( | |
| 1104 | back_deps, | |
| 1105 | u, | |
| 1106 | di_move.origin_mask, | |
| 1107 | di_move.destination_mask) >= 0); | |
| 1108 | } | |
| 1109 | ||
| 1110 | /* The target unit already has dependencies of this type, let's then merge this individually. */ | |
| 1111 | if (deps) | |
| 1112 | assert_se(unit_per_dependency_type_hashmap_update( | |
| 1113 | deps, | |
| 1114 | back, | |
| 1115 | di_back.origin_mask, | |
| 1116 | di_back.destination_mask) >= 0); | |
| 1117 | } | |
| 1118 | ||
| 1119 | /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'. | |
| 1120 | * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have | |
| 1121 | * dependencies of this type, let's move them per type wholesale. */ | |
| 1122 | if (!deps) | |
| 1123 | assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0); | |
| 1124 | } | |
| 1125 | ||
| 1126 | other->dependencies = hashmap_free(other->dependencies); | |
| 1127 | ||
| 1128 | u->dependency_generation++; | |
| 1129 | other->dependency_generation++; | |
| 1130 | } | |
| 1131 | ||
| 1132 | int unit_merge(Unit *u, Unit *other) { | |
| 1133 | int r; | |
| 1134 | ||
| 1135 | assert(u); | |
| 1136 | assert(other); | |
| 1137 | assert(u->manager == other->manager); | |
| 1138 | assert(u->type != _UNIT_TYPE_INVALID); | |
| 1139 | ||
| 1140 | other = unit_follow_merge(other); | |
| 1141 | ||
| 1142 | if (other == u) | |
| 1143 | return 0; | |
| 1144 | ||
| 1145 | if (u->type != other->type) | |
| 1146 | return -EINVAL; | |
| 1147 | ||
| 1148 | if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */ | |
| 1149 | return -EEXIST; | |
| 1150 | ||
| 1151 | if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND)) | |
| 1152 | return -EEXIST; | |
| 1153 | ||
| 1154 | if (!streq_ptr(u->instance, other->instance)) | |
| 1155 | return -EINVAL; | |
| 1156 | ||
| 1157 | if (other->job) | |
| 1158 | return -EEXIST; | |
| 1159 | ||
| 1160 | if (other->nop_job) | |
| 1161 | return -EEXIST; | |
| 1162 | ||
| 1163 | if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) | |
| 1164 | return -EEXIST; | |
| 1165 | ||
| 1166 | /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we | |
| 1167 | * fail. We don't have a way to undo reservations. A reservation is not a leak. */ | |
| 1168 | r = unit_reserve_dependencies(u, other); | |
| 1169 | if (r < 0) | |
| 1170 | return r; | |
| 1171 | ||
| 1172 | /* Redirect all references */ | |
| 1173 | while (other->refs_by_target) | |
| 1174 | unit_ref_set(other->refs_by_target, other->refs_by_target->source, u); | |
| 1175 | ||
| 1176 | /* Merge dependencies */ | |
| 1177 | unit_merge_dependencies(u, other); | |
| 1178 | ||
| 1179 | /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */ | |
| 1180 | r = unit_merge_names(u, other); | |
| 1181 | if (r < 0) | |
| 1182 | return r; | |
| 1183 | ||
| 1184 | other->load_state = UNIT_MERGED; | |
| 1185 | other->merged_into = u; | |
| 1186 | ||
| 1187 | if (!u->activation_details) | |
| 1188 | u->activation_details = activation_details_ref(other->activation_details); | |
| 1189 | ||
| 1190 | /* If there is still some data attached to the other node, we | |
| 1191 | * don't need it anymore, and can free it. */ | |
| 1192 | if (other->load_state != UNIT_STUB) | |
| 1193 | if (UNIT_VTABLE(other)->done) | |
| 1194 | UNIT_VTABLE(other)->done(other); | |
| 1195 | ||
| 1196 | unit_add_to_dbus_queue(u); | |
| 1197 | unit_add_to_cleanup_queue(other); | |
| 1198 | ||
| 1199 | return 0; | |
| 1200 | } | |
| 1201 | ||
| 1202 | int unit_merge_by_name(Unit *u, const char *name) { | |
| 1203 | _cleanup_free_ char *s = NULL; | |
| 1204 | Unit *other; | |
| 1205 | int r; | |
| 1206 | ||
| 1207 | /* Either add name to u, or if a unit with name already exists, merge it with u. | |
| 1208 | * If name is a template, do the same for name@instance, where instance is u's instance. */ | |
| 1209 | ||
| 1210 | assert(u); | |
| 1211 | assert(name); | |
| 1212 | ||
| 1213 | if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) { | |
| 1214 | if (!u->instance) | |
| 1215 | return -EINVAL; | |
| 1216 | ||
| 1217 | r = unit_name_replace_instance(name, u->instance, &s); | |
| 1218 | if (r < 0) | |
| 1219 | return r; | |
| 1220 | ||
| 1221 | name = s; | |
| 1222 | } | |
| 1223 | ||
| 1224 | other = manager_get_unit(u->manager, name); | |
| 1225 | if (other) | |
| 1226 | return unit_merge(u, other); | |
| 1227 | ||
| 1228 | return unit_add_name(u, name); | |
| 1229 | } | |
| 1230 | ||
| 1231 | Unit* unit_follow_merge(Unit *u) { | |
| 1232 | assert(u); | |
| 1233 | ||
| 1234 | while (u->load_state == UNIT_MERGED) | |
| 1235 | assert_se(u = u->merged_into); | |
| 1236 | ||
| 1237 | return u; | |
| 1238 | } | |
| 1239 | ||
| 1240 | int unit_add_exec_dependencies(Unit *u, ExecContext *c) { | |
| 1241 | int r; | |
| 1242 | ||
| 1243 | assert(u); | |
| 1244 | assert(c); | |
| 1245 | ||
| 1246 | /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */ | |
| 1247 | ||
| 1248 | if (c->working_directory) { | |
| 1249 | r = unit_add_mounts_for( | |
| 1250 | u, | |
| 1251 | c->working_directory, | |
| 1252 | UNIT_DEPENDENCY_FILE, | |
| 1253 | c->working_directory_missing_ok ? UNIT_MOUNT_WANTS : UNIT_MOUNT_REQUIRES); | |
| 1254 | if (r < 0) | |
| 1255 | return r; | |
| 1256 | } | |
| 1257 | ||
| 1258 | if (c->root_directory) { | |
| 1259 | r = unit_add_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS); | |
| 1260 | if (r < 0) | |
| 1261 | return r; | |
| 1262 | } | |
| 1263 | ||
| 1264 | if (c->root_image) { | |
| 1265 | r = unit_add_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS); | |
| 1266 | if (r < 0) | |
| 1267 | return r; | |
| 1268 | } | |
| 1269 | ||
| 1270 | for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) { | |
| 1271 | if (!u->manager->prefix[dt]) | |
| 1272 | continue; | |
| 1273 | ||
| 1274 | FOREACH_ARRAY(i, c->directories[dt].items, c->directories[dt].n_items) { | |
| 1275 | _cleanup_free_ char *p = NULL; | |
| 1276 | ||
| 1277 | p = path_join(u->manager->prefix[dt], i->path); | |
| 1278 | if (!p) | |
| 1279 | return -ENOMEM; | |
| 1280 | ||
| 1281 | r = unit_add_mounts_for(u, p, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_REQUIRES); | |
| 1282 | if (r < 0) | |
| 1283 | return r; | |
| 1284 | } | |
| 1285 | } | |
| 1286 | ||
| 1287 | if (!MANAGER_IS_SYSTEM(u->manager)) | |
| 1288 | return 0; | |
| 1289 | ||
| 1290 | /* For the following three directory types we need write access, and /var/ is possibly on the root | |
| 1291 | * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */ | |
| 1292 | if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 || | |
| 1293 | c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 || | |
| 1294 | c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) { | |
| 1295 | r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE); | |
| 1296 | if (r < 0) | |
| 1297 | return r; | |
| 1298 | } | |
| 1299 | ||
| 1300 | /* This must be already set in unit_patch_contexts(). */ | |
| 1301 | assert(c->private_var_tmp >= 0 && c->private_var_tmp < _PRIVATE_TMP_MAX); | |
| 1302 | ||
| 1303 | if (c->private_tmp == PRIVATE_TMP_CONNECTED) { | |
| 1304 | assert(c->private_var_tmp == PRIVATE_TMP_CONNECTED); | |
| 1305 | ||
| 1306 | r = unit_add_mounts_for(u, "/tmp/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS); | |
| 1307 | if (r < 0) | |
| 1308 | return r; | |
| 1309 | ||
| 1310 | r = unit_add_mounts_for(u, "/var/tmp/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS); | |
| 1311 | if (r < 0) | |
| 1312 | return r; | |
| 1313 | ||
| 1314 | r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE); | |
| 1315 | if (r < 0) | |
| 1316 | return r; | |
| 1317 | ||
| 1318 | } else if (c->private_var_tmp == PRIVATE_TMP_DISCONNECTED && !exec_context_with_rootfs(c)) { | |
| 1319 | /* Even if PrivateTmp=disconnected, we still require /var/tmp/ mountpoint to be present, | |
| 1320 | * i.e. /var/ needs to be mounted. See comments in unit_patch_contexts(). */ | |
| 1321 | r = unit_add_mounts_for(u, "/var/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS); | |
| 1322 | if (r < 0) | |
| 1323 | return r; | |
| 1324 | } | |
| 1325 | ||
| 1326 | if (c->root_image) { | |
| 1327 | /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an | |
| 1328 | * implicit dependency on udev */ | |
| 1329 | ||
| 1330 | r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE); | |
| 1331 | if (r < 0) | |
| 1332 | return r; | |
| 1333 | } | |
| 1334 | ||
| 1335 | /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon | |
| 1336 | * is run first. */ | |
| 1337 | if (c->log_namespace) { | |
| 1338 | static const struct { | |
| 1339 | const char *template; | |
| 1340 | UnitType type; | |
| 1341 | } deps[] = { | |
| 1342 | { "systemd-journald", UNIT_SOCKET, }, | |
| 1343 | { "systemd-journald-varlink", UNIT_SOCKET, }, | |
| 1344 | { "systemd-journald-sync", UNIT_SERVICE, }, | |
| 1345 | }; | |
| 1346 | ||
| 1347 | FOREACH_ELEMENT(i, deps) { | |
| 1348 | _cleanup_free_ char *unit = NULL; | |
| 1349 | ||
| 1350 | r = unit_name_build_from_type(i->template, c->log_namespace, i->type, &unit); | |
| 1351 | if (r < 0) | |
| 1352 | return r; | |
| 1353 | ||
| 1354 | r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, unit, true, UNIT_DEPENDENCY_FILE); | |
| 1355 | if (r < 0) | |
| 1356 | return r; | |
| 1357 | } | |
| 1358 | } else if (IN_SET(c->std_output, EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE, | |
| 1359 | EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) || | |
| 1360 | IN_SET(c->std_error, EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE, | |
| 1361 | EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE)) { | |
| 1362 | ||
| 1363 | r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE); | |
| 1364 | if (r < 0) | |
| 1365 | return r; | |
| 1366 | } | |
| 1367 | ||
| 1368 | return 0; | |
| 1369 | } | |
| 1370 | ||
| 1371 | const char* unit_description(Unit *u) { | |
| 1372 | assert(u); | |
| 1373 | ||
| 1374 | if (u->description) | |
| 1375 | return u->description; | |
| 1376 | ||
| 1377 | return strna(u->id); | |
| 1378 | } | |
| 1379 | ||
| 1380 | const char* unit_status_string(Unit *u, char **ret_combined_buffer) { | |
| 1381 | assert(u); | |
| 1382 | assert(u->id); | |
| 1383 | ||
| 1384 | /* Return u->id, u->description, or "{u->id} - {u->description}". | |
| 1385 | * Versions with u->description are only used if it is set. | |
| 1386 | * The last option is used if configured and the caller provided the 'ret_combined_buffer' | |
| 1387 | * pointer. | |
| 1388 | * | |
| 1389 | * Note that *ret_combined_buffer may be set to NULL. */ | |
| 1390 | ||
| 1391 | if (!u->description || | |
| 1392 | u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME || | |
| 1393 | (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) || | |
| 1394 | streq(u->description, u->id)) { | |
| 1395 | ||
| 1396 | if (ret_combined_buffer) | |
| 1397 | *ret_combined_buffer = NULL; | |
| 1398 | return u->id; | |
| 1399 | } | |
| 1400 | ||
| 1401 | if (ret_combined_buffer) { | |
| 1402 | if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) { | |
| 1403 | *ret_combined_buffer = strjoin(u->id, " - ", u->description); | |
| 1404 | if (*ret_combined_buffer) | |
| 1405 | return *ret_combined_buffer; | |
| 1406 | log_oom(); /* Fall back to ->description */ | |
| 1407 | } else | |
| 1408 | *ret_combined_buffer = NULL; | |
| 1409 | } | |
| 1410 | ||
| 1411 | return u->description; | |
| 1412 | } | |
| 1413 | ||
| 1414 | /* Common implementation for multiple backends */ | |
| 1415 | int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) { | |
| 1416 | int r; | |
| 1417 | ||
| 1418 | assert(u); | |
| 1419 | ||
| 1420 | /* Load a .{service,socket,...} file */ | |
| 1421 | r = unit_load_fragment(u); | |
| 1422 | if (r < 0) | |
| 1423 | return r; | |
| 1424 | ||
| 1425 | if (u->load_state == UNIT_MASKED) | |
| 1426 | return 0; | |
| 1427 | ||
| 1428 | if (u->load_state == UNIT_STUB) { | |
| 1429 | if (fragment_required) | |
| 1430 | return -ENOENT; | |
| 1431 | ||
| 1432 | u->load_state = UNIT_LOADED; | |
| 1433 | } | |
| 1434 | ||
| 1435 | u = unit_follow_merge(u); | |
| 1436 | ||
| 1437 | /* Load drop-in directory data. If u is an alias, we might be reloading the | |
| 1438 | * target unit needlessly. But we cannot be sure which drops-ins have already | |
| 1439 | * been loaded and which not, at least without doing complicated book-keeping, | |
| 1440 | * so let's always reread all drop-ins. */ | |
| 1441 | r = unit_load_dropin(u); | |
| 1442 | if (r < 0) | |
| 1443 | return r; | |
| 1444 | ||
| 1445 | if (u->source_path) { | |
| 1446 | struct stat st; | |
| 1447 | ||
| 1448 | if (stat(u->source_path, &st) >= 0) | |
| 1449 | u->source_mtime = timespec_load(&st.st_mtim); | |
| 1450 | else | |
| 1451 | u->source_mtime = 0; | |
| 1452 | } | |
| 1453 | ||
| 1454 | return 0; | |
| 1455 | } | |
| 1456 | ||
| 1457 | void unit_add_to_target_deps_queue(Unit *u) { | |
| 1458 | Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager); | |
| 1459 | ||
| 1460 | if (u->in_target_deps_queue) | |
| 1461 | return; | |
| 1462 | ||
| 1463 | LIST_PREPEND(target_deps_queue, m->target_deps_queue, u); | |
| 1464 | u->in_target_deps_queue = true; | |
| 1465 | } | |
| 1466 | ||
| 1467 | int unit_add_default_target_dependency(Unit *u, Unit *target) { | |
| 1468 | assert(u); | |
| 1469 | assert(target); | |
| 1470 | ||
| 1471 | if (target->type != UNIT_TARGET) | |
| 1472 | return 0; | |
| 1473 | ||
| 1474 | /* Only add the dependency if both units are loaded, so that | |
| 1475 | * that loop check below is reliable */ | |
| 1476 | if (u->load_state != UNIT_LOADED || | |
| 1477 | target->load_state != UNIT_LOADED) | |
| 1478 | return 0; | |
| 1479 | ||
| 1480 | /* If either side wants no automatic dependencies, then let's | |
| 1481 | * skip this */ | |
| 1482 | if (!u->default_dependencies || | |
| 1483 | !target->default_dependencies) | |
| 1484 | return 0; | |
| 1485 | ||
| 1486 | /* Don't create loops */ | |
| 1487 | if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u)) | |
| 1488 | return 0; | |
| 1489 | ||
| 1490 | return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT); | |
| 1491 | } | |
| 1492 | ||
| 1493 | static int unit_add_slice_dependencies(Unit *u) { | |
| 1494 | Unit *slice; | |
| 1495 | ||
| 1496 | assert(u); | |
| 1497 | ||
| 1498 | if (!UNIT_HAS_CGROUP_CONTEXT(u)) | |
| 1499 | return 0; | |
| 1500 | ||
| 1501 | /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the | |
| 1502 | name), while all other units are ordered based on configuration (as in their case Slice= configures the | |
| 1503 | relationship). */ | |
| 1504 | UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE; | |
| 1505 | ||
| 1506 | slice = UNIT_GET_SLICE(u); | |
| 1507 | if (slice) { | |
| 1508 | if (!IN_SET(slice->freezer_state, FREEZER_RUNNING, FREEZER_THAWING)) | |
| 1509 | u->freezer_state = FREEZER_FROZEN_BY_PARENT; | |
| 1510 | ||
| 1511 | return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask); | |
| 1512 | } | |
| 1513 | ||
| 1514 | if (unit_has_name(u, SPECIAL_ROOT_SLICE)) | |
| 1515 | return 0; | |
| 1516 | ||
| 1517 | return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask); | |
| 1518 | } | |
| 1519 | ||
| 1520 | static int unit_add_mount_dependencies(Unit *u) { | |
| 1521 | bool changed = false; | |
| 1522 | int r; | |
| 1523 | ||
| 1524 | assert(u); | |
| 1525 | ||
| 1526 | for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) { | |
| 1527 | UnitDependencyInfo di; | |
| 1528 | const char *path; | |
| 1529 | ||
| 1530 | HASHMAP_FOREACH_KEY(di.data, path, u->mounts_for[t]) { | |
| 1531 | ||
| 1532 | char prefix[strlen(ASSERT_PTR(path)) + 1]; | |
| 1533 | ||
| 1534 | PATH_FOREACH_PREFIX_MORE(prefix, path) { | |
| 1535 | _cleanup_free_ char *p = NULL; | |
| 1536 | Unit *m; | |
| 1537 | ||
| 1538 | r = unit_name_from_path(prefix, ".mount", &p); | |
| 1539 | if (r == -EINVAL) | |
| 1540 | continue; /* If the path cannot be converted to a mount unit name, | |
| 1541 | * then it's not manageable as a unit by systemd, and | |
| 1542 | * hence we don't need a dependency on it. Let's thus | |
| 1543 | * silently ignore the issue. */ | |
| 1544 | if (r < 0) | |
| 1545 | return r; | |
| 1546 | ||
| 1547 | m = manager_get_unit(u->manager, p); | |
| 1548 | if (!m) { | |
| 1549 | /* Make sure to load the mount unit if it exists. If so the | |
| 1550 | * dependencies on this unit will be added later during the loading | |
| 1551 | * of the mount unit. */ | |
| 1552 | (void) manager_load_unit_prepare( | |
| 1553 | u->manager, | |
| 1554 | p, | |
| 1555 | /* path= */NULL, | |
| 1556 | /* e= */NULL, | |
| 1557 | &m); | |
| 1558 | continue; | |
| 1559 | } | |
| 1560 | if (m == u) | |
| 1561 | continue; | |
| 1562 | ||
| 1563 | if (m->load_state != UNIT_LOADED) | |
| 1564 | continue; | |
| 1565 | ||
| 1566 | r = unit_add_dependency( | |
| 1567 | u, | |
| 1568 | UNIT_AFTER, | |
| 1569 | m, | |
| 1570 | /* add_reference= */ true, | |
| 1571 | di.origin_mask); | |
| 1572 | if (r < 0) | |
| 1573 | return r; | |
| 1574 | changed = changed || r > 0; | |
| 1575 | ||
| 1576 | if (m->fragment_path) { | |
| 1577 | r = unit_add_dependency( | |
| 1578 | u, | |
| 1579 | unit_mount_dependency_type_to_dependency_type(t), | |
| 1580 | m, | |
| 1581 | /* add_reference= */ true, | |
| 1582 | di.origin_mask); | |
| 1583 | if (r < 0) | |
| 1584 | return r; | |
| 1585 | changed = changed || r > 0; | |
| 1586 | } | |
| 1587 | } | |
| 1588 | } | |
| 1589 | } | |
| 1590 | ||
| 1591 | return changed; | |
| 1592 | } | |
| 1593 | ||
| 1594 | static int unit_add_oomd_dependencies(Unit *u) { | |
| 1595 | CGroupContext *c; | |
| 1596 | CGroupMask mask; | |
| 1597 | int r; | |
| 1598 | ||
| 1599 | assert(u); | |
| 1600 | ||
| 1601 | if (!u->default_dependencies) | |
| 1602 | return 0; | |
| 1603 | ||
| 1604 | c = unit_get_cgroup_context(u); | |
| 1605 | if (!c) | |
| 1606 | return 0; | |
| 1607 | ||
| 1608 | bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL; | |
| 1609 | if (!wants_oomd) | |
| 1610 | return 0; | |
| 1611 | ||
| 1612 | r = cg_mask_supported(&mask); | |
| 1613 | if (r < 0) | |
| 1614 | return log_debug_errno(r, "Failed to determine supported controllers: %m"); | |
| 1615 | ||
| 1616 | if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY)) | |
| 1617 | return 0; | |
| 1618 | ||
| 1619 | return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE); | |
| 1620 | } | |
| 1621 | ||
| 1622 | static int unit_add_startup_units(Unit *u) { | |
| 1623 | if (!unit_has_startup_cgroup_constraints(u)) | |
| 1624 | return 0; | |
| 1625 | ||
| 1626 | return set_ensure_put(&u->manager->startup_units, NULL, u); | |
| 1627 | } | |
| 1628 | ||
| 1629 | static const struct { | |
| 1630 | UnitDependencyAtom atom; | |
| 1631 | size_t job_mode_offset; | |
| 1632 | const char *dependency_name; | |
| 1633 | const char *job_mode_setting_name; | |
| 1634 | } on_termination_settings[] = { | |
| 1635 | { UNIT_ATOM_ON_SUCCESS, offsetof(Unit, on_success_job_mode), "OnSuccess=", "OnSuccessJobMode=" }, | |
| 1636 | { UNIT_ATOM_ON_FAILURE, offsetof(Unit, on_failure_job_mode), "OnFailure=", "OnFailureJobMode=" }, | |
| 1637 | }; | |
| 1638 | ||
| 1639 | static int unit_validate_on_termination_job_modes(Unit *u) { | |
| 1640 | assert(u); | |
| 1641 | ||
| 1642 | /* Verify that if On{Success,Failure}JobMode=isolate, only one unit gets specified. */ | |
| 1643 | ||
| 1644 | FOREACH_ELEMENT(setting, on_termination_settings) { | |
| 1645 | JobMode job_mode = *(JobMode*) ((uint8_t*) u + setting->job_mode_offset); | |
| 1646 | ||
| 1647 | if (job_mode != JOB_ISOLATE) | |
| 1648 | continue; | |
| 1649 | ||
| 1650 | Unit *other, *found = NULL; | |
| 1651 | UNIT_FOREACH_DEPENDENCY(other, u, setting->atom) { | |
| 1652 | if (!found) | |
| 1653 | found = other; | |
| 1654 | else if (found != other) | |
| 1655 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), | |
| 1656 | "More than one %s dependencies specified but %sisolate set. Refusing.", | |
| 1657 | setting->dependency_name, setting->job_mode_setting_name); | |
| 1658 | } | |
| 1659 | } | |
| 1660 | ||
| 1661 | return 0; | |
| 1662 | } | |
| 1663 | ||
| 1664 | int unit_load(Unit *u) { | |
| 1665 | int r; | |
| 1666 | ||
| 1667 | assert(u); | |
| 1668 | ||
| 1669 | if (u->in_load_queue) { | |
| 1670 | LIST_REMOVE(load_queue, u->manager->load_queue, u); | |
| 1671 | u->in_load_queue = false; | |
| 1672 | } | |
| 1673 | ||
| 1674 | if (u->type == _UNIT_TYPE_INVALID) | |
| 1675 | return -EINVAL; | |
| 1676 | ||
| 1677 | if (u->load_state != UNIT_STUB) | |
| 1678 | return 0; | |
| 1679 | ||
| 1680 | if (u->transient_file) { | |
| 1681 | /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup | |
| 1682 | * is complete, hence let's synchronize the unit file we just wrote to disk. */ | |
| 1683 | ||
| 1684 | r = fflush_and_check(u->transient_file); | |
| 1685 | if (r < 0) | |
| 1686 | goto fail; | |
| 1687 | ||
| 1688 | u->transient_file = safe_fclose(u->transient_file); | |
| 1689 | u->fragment_mtime = now(CLOCK_REALTIME); | |
| 1690 | } | |
| 1691 | ||
| 1692 | r = UNIT_VTABLE(u)->load(u); | |
| 1693 | if (r < 0) | |
| 1694 | goto fail; | |
| 1695 | ||
| 1696 | assert(u->load_state != UNIT_STUB); | |
| 1697 | ||
| 1698 | if (u->load_state == UNIT_LOADED) { | |
| 1699 | unit_add_to_target_deps_queue(u); | |
| 1700 | ||
| 1701 | r = unit_add_slice_dependencies(u); | |
| 1702 | if (r < 0) | |
| 1703 | goto fail; | |
| 1704 | ||
| 1705 | r = unit_add_mount_dependencies(u); | |
| 1706 | if (r < 0) | |
| 1707 | goto fail; | |
| 1708 | ||
| 1709 | r = unit_add_oomd_dependencies(u); | |
| 1710 | if (r < 0) | |
| 1711 | goto fail; | |
| 1712 | ||
| 1713 | r = unit_add_startup_units(u); | |
| 1714 | if (r < 0) | |
| 1715 | goto fail; | |
| 1716 | ||
| 1717 | r = unit_validate_on_termination_job_modes(u); | |
| 1718 | if (r < 0) | |
| 1719 | goto fail; | |
| 1720 | ||
| 1721 | if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout) | |
| 1722 | log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect."); | |
| 1723 | ||
| 1724 | /* We finished loading, let's ensure our parents recalculate the members mask */ | |
| 1725 | unit_invalidate_cgroup_members_masks(u); | |
| 1726 | } | |
| 1727 | ||
| 1728 | assert((u->load_state != UNIT_MERGED) == !u->merged_into); | |
| 1729 | ||
| 1730 | unit_add_to_dbus_queue(unit_follow_merge(u)); | |
| 1731 | unit_add_to_gc_queue(u); | |
| 1732 | (void) manager_varlink_send_managed_oom_update(u); | |
| 1733 | ||
| 1734 | return 0; | |
| 1735 | ||
| 1736 | fail: | |
| 1737 | /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code | |
| 1738 | * should hence return ENOEXEC to ensure units are placed in this state after loading. */ | |
| 1739 | ||
| 1740 | u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : | |
| 1741 | r == -ENOEXEC ? UNIT_BAD_SETTING : | |
| 1742 | UNIT_ERROR; | |
| 1743 | u->load_error = r; | |
| 1744 | ||
| 1745 | /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time | |
| 1746 | * an attempt is made to load this unit, we know we need to check again. */ | |
| 1747 | if (u->load_state == UNIT_NOT_FOUND) | |
| 1748 | u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash; | |
| 1749 | ||
| 1750 | unit_add_to_dbus_queue(u); | |
| 1751 | unit_add_to_gc_queue(u); | |
| 1752 | ||
| 1753 | return log_unit_debug_errno(u, r, "Failed to load configuration: %m"); | |
| 1754 | } | |
| 1755 | ||
| 1756 | _printf_(7, 8) | |
| 1757 | static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) { | |
| 1758 | Unit *u = userdata; | |
| 1759 | va_list ap; | |
| 1760 | int r; | |
| 1761 | ||
| 1762 | if (u && !unit_log_level_test(u, level)) | |
| 1763 | return -ERRNO_VALUE(error); | |
| 1764 | ||
| 1765 | va_start(ap, format); | |
| 1766 | if (u) | |
| 1767 | r = log_object_internalv(level, error, file, line, func, | |
| 1768 | unit_log_field(u), | |
| 1769 | u->id, | |
| 1770 | unit_invocation_log_field(u), | |
| 1771 | u->invocation_id_string, | |
| 1772 | format, ap); | |
| 1773 | else | |
| 1774 | r = log_internalv(level, error, file, line, func, format, ap); | |
| 1775 | va_end(ap); | |
| 1776 | ||
| 1777 | return r; | |
| 1778 | } | |
| 1779 | ||
| 1780 | static bool unit_test_condition(Unit *u) { | |
| 1781 | _cleanup_strv_free_ char **env = NULL; | |
| 1782 | int r; | |
| 1783 | ||
| 1784 | assert(u); | |
| 1785 | ||
| 1786 | dual_timestamp_now(&u->condition_timestamp); | |
| 1787 | ||
| 1788 | r = manager_get_effective_environment(u->manager, &env); | |
| 1789 | if (r < 0) { | |
| 1790 | log_unit_error_errno(u, r, "Failed to determine effective environment: %m"); | |
| 1791 | u->condition_result = true; | |
| 1792 | } else | |
| 1793 | u->condition_result = condition_test_list( | |
| 1794 | u->conditions, | |
| 1795 | env, | |
| 1796 | condition_type_to_string, | |
| 1797 | log_unit_internal, | |
| 1798 | u); | |
| 1799 | ||
| 1800 | unit_add_to_dbus_queue(u); | |
| 1801 | return u->condition_result; | |
| 1802 | } | |
| 1803 | ||
| 1804 | static bool unit_test_assert(Unit *u) { | |
| 1805 | _cleanup_strv_free_ char **env = NULL; | |
| 1806 | int r; | |
| 1807 | ||
| 1808 | assert(u); | |
| 1809 | ||
| 1810 | dual_timestamp_now(&u->assert_timestamp); | |
| 1811 | ||
| 1812 | r = manager_get_effective_environment(u->manager, &env); | |
| 1813 | if (r < 0) { | |
| 1814 | log_unit_error_errno(u, r, "Failed to determine effective environment: %m"); | |
| 1815 | u->assert_result = CONDITION_ERROR; | |
| 1816 | } else | |
| 1817 | u->assert_result = condition_test_list( | |
| 1818 | u->asserts, | |
| 1819 | env, | |
| 1820 | assert_type_to_string, | |
| 1821 | log_unit_internal, | |
| 1822 | u); | |
| 1823 | ||
| 1824 | unit_add_to_dbus_queue(u); | |
| 1825 | return u->assert_result; | |
| 1826 | } | |
| 1827 | ||
| 1828 | void unit_status_printf( | |
| 1829 | Unit *u, | |
| 1830 | StatusType status_type, | |
| 1831 | const char *status, | |
| 1832 | const char *format, | |
| 1833 | const char *ident) { | |
| 1834 | ||
| 1835 | assert(u); | |
| 1836 | ||
| 1837 | if (log_get_show_color()) { | |
| 1838 | if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' ')) | |
| 1839 | ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description); | |
| 1840 | else | |
| 1841 | ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL); | |
| 1842 | } | |
| 1843 | ||
| 1844 | DISABLE_WARNING_FORMAT_NONLITERAL; | |
| 1845 | manager_status_printf(u->manager, status_type, status, format, ident); | |
| 1846 | REENABLE_WARNING; | |
| 1847 | } | |
| 1848 | ||
| 1849 | int unit_test_start_limit(Unit *u) { | |
| 1850 | const char *reason; | |
| 1851 | ||
| 1852 | assert(u); | |
| 1853 | ||
| 1854 | if (ratelimit_below(&u->start_ratelimit)) { | |
| 1855 | u->start_limit_hit = false; | |
| 1856 | return 0; | |
| 1857 | } | |
| 1858 | ||
| 1859 | log_unit_warning(u, "Start request repeated too quickly."); | |
| 1860 | u->start_limit_hit = true; | |
| 1861 | ||
| 1862 | reason = strjoina("unit ", u->id, " failed"); | |
| 1863 | ||
| 1864 | emergency_action( | |
| 1865 | u->manager, | |
| 1866 | u->start_limit_action, | |
| 1867 | EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN|EMERGENCY_ACTION_SLEEP_5S, | |
| 1868 | u->reboot_arg, | |
| 1869 | /* exit_status= */ -1, | |
| 1870 | reason); | |
| 1871 | ||
| 1872 | return -ECANCELED; | |
| 1873 | } | |
| 1874 | ||
| 1875 | static bool unit_verify_deps(Unit *u) { | |
| 1876 | Unit *other; | |
| 1877 | ||
| 1878 | assert(u); | |
| 1879 | ||
| 1880 | /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined | |
| 1881 | * with After=. We do not check Requires= or Requisite= here as they only should have an effect on | |
| 1882 | * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies | |
| 1883 | * that are not used in conjunction with After= as for them any such check would make things entirely | |
| 1884 | * racy. */ | |
| 1885 | ||
| 1886 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) { | |
| 1887 | ||
| 1888 | if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other)) | |
| 1889 | continue; | |
| 1890 | ||
| 1891 | if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) { | |
| 1892 | log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id); | |
| 1893 | return false; | |
| 1894 | } | |
| 1895 | } | |
| 1896 | ||
| 1897 | return true; | |
| 1898 | } | |
| 1899 | ||
| 1900 | /* Errors that aren't really errors: | |
| 1901 | * -EALREADY: Unit is already started. | |
| 1902 | * -ECOMM: Condition failed | |
| 1903 | * -EAGAIN: An operation is already in progress. Retry later. | |
| 1904 | * | |
| 1905 | * Errors that are real errors: | |
| 1906 | * -EBADR: This unit type does not support starting. | |
| 1907 | * -ECANCELED: Start limit hit, too many requests for now | |
| 1908 | * -EPROTO: Assert failed | |
| 1909 | * -EINVAL: Unit not loaded | |
| 1910 | * -EOPNOTSUPP: Unit type not supported | |
| 1911 | * -ENOLINK: The necessary dependencies are not fulfilled. | |
| 1912 | * -ESTALE: This unit has been started before and can't be started a second time | |
| 1913 | * -EDEADLK: This unit is frozen | |
| 1914 | * -ENOENT: This is a triggering unit and unit to trigger is not loaded | |
| 1915 | * -ETOOMANYREFS: The hard concurrency limit of at least one of the slices the unit is contained in has been reached | |
| 1916 | */ | |
| 1917 | int unit_start(Unit *u, ActivationDetails *details) { | |
| 1918 | UnitActiveState state; | |
| 1919 | Unit *following; | |
| 1920 | int r; | |
| 1921 | ||
| 1922 | assert(u); | |
| 1923 | ||
| 1924 | /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */ | |
| 1925 | if (UNIT_VTABLE(u)->subsystem_ratelimited) { | |
| 1926 | r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager); | |
| 1927 | if (r < 0) | |
| 1928 | return r; | |
| 1929 | if (r > 0) | |
| 1930 | return -EAGAIN; | |
| 1931 | } | |
| 1932 | ||
| 1933 | /* If this is already started, then this will succeed. Note that this will even succeed if this unit | |
| 1934 | * is not startable by the user. This is relied on to detect when we need to wait for units and when | |
| 1935 | * waiting is finished. */ | |
| 1936 | state = unit_active_state(u); | |
| 1937 | if (UNIT_IS_ACTIVE_OR_RELOADING(state)) | |
| 1938 | return -EALREADY; | |
| 1939 | if (IN_SET(state, UNIT_DEACTIVATING, UNIT_MAINTENANCE)) | |
| 1940 | return -EAGAIN; | |
| 1941 | ||
| 1942 | /* Units that aren't loaded cannot be started */ | |
| 1943 | if (u->load_state != UNIT_LOADED) | |
| 1944 | return -EINVAL; | |
| 1945 | ||
| 1946 | /* Refuse starting scope units more than once */ | |
| 1947 | if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp)) | |
| 1948 | return -ESTALE; | |
| 1949 | ||
| 1950 | /* If the conditions were unmet, don't do anything at all. If we already are activating this call might | |
| 1951 | * still be useful to speed up activation in case there is some hold-off time, but we don't want to | |
| 1952 | * recheck the condition in that case. */ | |
| 1953 | if (state != UNIT_ACTIVATING && | |
| 1954 | !unit_test_condition(u)) | |
| 1955 | return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit."); | |
| 1956 | ||
| 1957 | /* If the asserts failed, fail the entire job */ | |
| 1958 | if (state != UNIT_ACTIVATING && | |
| 1959 | !unit_test_assert(u)) | |
| 1960 | return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed."); | |
| 1961 | ||
| 1962 | /* Units of types that aren't supported cannot be started. Note that we do this test only after the | |
| 1963 | * condition checks, so that we rather return condition check errors (which are usually not | |
| 1964 | * considered a true failure) than "not supported" errors (which are considered a failure). | |
| 1965 | */ | |
| 1966 | if (!unit_type_supported(u->type)) | |
| 1967 | return -EOPNOTSUPP; | |
| 1968 | ||
| 1969 | /* Let's make sure that the deps really are in order before we start this. Normally the job engine | |
| 1970 | * should have taken care of this already, but let's check this here again. After all, our | |
| 1971 | * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */ | |
| 1972 | if (!unit_verify_deps(u)) | |
| 1973 | return -ENOLINK; | |
| 1974 | ||
| 1975 | /* Forward to the main object, if we aren't it. */ | |
| 1976 | following = unit_following(u); | |
| 1977 | if (following) { | |
| 1978 | log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id); | |
| 1979 | return unit_start(following, details); | |
| 1980 | } | |
| 1981 | ||
| 1982 | /* Check to make sure the unit isn't frozen */ | |
| 1983 | if (u->freezer_state != FREEZER_RUNNING) | |
| 1984 | return -EDEADLK; | |
| 1985 | ||
| 1986 | /* Check our ability to start early so that ratelimited or already starting/started units don't | |
| 1987 | * cause us to enter a busy loop. */ | |
| 1988 | if (UNIT_VTABLE(u)->test_startable) { | |
| 1989 | r = UNIT_VTABLE(u)->test_startable(u); | |
| 1990 | if (r <= 0) | |
| 1991 | return r; | |
| 1992 | } | |
| 1993 | ||
| 1994 | /* If it is stopped, but we cannot start it, then fail */ | |
| 1995 | if (!UNIT_VTABLE(u)->start) | |
| 1996 | return -EBADR; | |
| 1997 | ||
| 1998 | if (UNIT_IS_INACTIVE_OR_FAILED(state)) { | |
| 1999 | Slice *slice = SLICE(UNIT_GET_SLICE(u)); | |
| 2000 | ||
| 2001 | if (slice) { | |
| 2002 | /* Check hard concurrency limit. Note this is partially redundant, we already checked | |
| 2003 | * this when enqueuing jobs. However, between the time when we enqueued this and the | |
| 2004 | * time we are dispatching the queue the configuration might have changed, hence | |
| 2005 | * check here again */ | |
| 2006 | if (slice_concurrency_hard_max_reached(slice, u)) | |
| 2007 | return -ETOOMANYREFS; | |
| 2008 | ||
| 2009 | /* Also check soft concurrenty limit, and return EAGAIN so that the job is kept in | |
| 2010 | * the queue */ | |
| 2011 | if (slice_concurrency_soft_max_reached(slice, u)) | |
| 2012 | return -EAGAIN; /* Try again, keep in queue */ | |
| 2013 | } | |
| 2014 | } | |
| 2015 | ||
| 2016 | /* We don't suppress calls to ->start() here when we are already starting, to allow this request to | |
| 2017 | * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it | |
| 2018 | * waits for a holdoff timer to elapse before it will start again. */ | |
| 2019 | ||
| 2020 | unit_add_to_dbus_queue(u); | |
| 2021 | ||
| 2022 | if (!u->activation_details) /* Older details object wins */ | |
| 2023 | u->activation_details = activation_details_ref(details); | |
| 2024 | ||
| 2025 | return UNIT_VTABLE(u)->start(u); | |
| 2026 | } | |
| 2027 | ||
| 2028 | bool unit_can_start(Unit *u) { | |
| 2029 | assert(u); | |
| 2030 | ||
| 2031 | if (u->load_state != UNIT_LOADED) | |
| 2032 | return false; | |
| 2033 | ||
| 2034 | if (!unit_type_supported(u->type)) | |
| 2035 | return false; | |
| 2036 | ||
| 2037 | /* Scope units may be started only once */ | |
| 2038 | if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp)) | |
| 2039 | return false; | |
| 2040 | ||
| 2041 | return !!UNIT_VTABLE(u)->start; | |
| 2042 | } | |
| 2043 | ||
| 2044 | bool unit_can_isolate(Unit *u) { | |
| 2045 | assert(u); | |
| 2046 | ||
| 2047 | return unit_can_start(u) && | |
| 2048 | u->allow_isolate; | |
| 2049 | } | |
| 2050 | ||
| 2051 | /* Errors: | |
| 2052 | * -EBADR: This unit type does not support stopping. | |
| 2053 | * -EALREADY: Unit is already stopped. | |
| 2054 | * -EAGAIN: An operation is already in progress. Retry later. | |
| 2055 | * -EDEADLK: Unit is frozen | |
| 2056 | */ | |
| 2057 | int unit_stop(Unit *u) { | |
| 2058 | UnitActiveState state; | |
| 2059 | Unit *following; | |
| 2060 | ||
| 2061 | assert(u); | |
| 2062 | ||
| 2063 | state = unit_active_state(u); | |
| 2064 | if (UNIT_IS_INACTIVE_OR_FAILED(state)) | |
| 2065 | return -EALREADY; | |
| 2066 | ||
| 2067 | following = unit_following(u); | |
| 2068 | if (following) { | |
| 2069 | log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id); | |
| 2070 | return unit_stop(following); | |
| 2071 | } | |
| 2072 | ||
| 2073 | /* Check to make sure the unit isn't frozen */ | |
| 2074 | if (u->freezer_state != FREEZER_RUNNING) | |
| 2075 | return -EDEADLK; | |
| 2076 | ||
| 2077 | if (!UNIT_VTABLE(u)->stop) | |
| 2078 | return -EBADR; | |
| 2079 | ||
| 2080 | unit_add_to_dbus_queue(u); | |
| 2081 | ||
| 2082 | return UNIT_VTABLE(u)->stop(u); | |
| 2083 | } | |
| 2084 | ||
| 2085 | bool unit_can_stop(Unit *u) { | |
| 2086 | assert(u); | |
| 2087 | ||
| 2088 | /* Note: if we return true here, it does not mean that the unit may be successfully stopped. | |
| 2089 | * Extrinsic units follow external state and they may stop following external state changes | |
| 2090 | * (hence we return true here), but an attempt to do this through the manager will fail. */ | |
| 2091 | ||
| 2092 | if (!unit_type_supported(u->type)) | |
| 2093 | return false; | |
| 2094 | ||
| 2095 | if (u->perpetual) | |
| 2096 | return false; | |
| 2097 | ||
| 2098 | return !!UNIT_VTABLE(u)->stop; | |
| 2099 | } | |
| 2100 | ||
| 2101 | /* Errors: | |
| 2102 | * -EBADR: This unit type does not support reloading. | |
| 2103 | * -ENOEXEC: Unit is not started. | |
| 2104 | * -EAGAIN: An operation is already in progress. Retry later. | |
| 2105 | * -EDEADLK: Unit is frozen. | |
| 2106 | */ | |
| 2107 | int unit_reload(Unit *u) { | |
| 2108 | UnitActiveState state; | |
| 2109 | Unit *following; | |
| 2110 | ||
| 2111 | assert(u); | |
| 2112 | ||
| 2113 | if (u->load_state != UNIT_LOADED) | |
| 2114 | return -EINVAL; | |
| 2115 | ||
| 2116 | if (!unit_can_reload(u)) | |
| 2117 | return -EBADR; | |
| 2118 | ||
| 2119 | state = unit_active_state(u); | |
| 2120 | if (IN_SET(state, UNIT_RELOADING, UNIT_REFRESHING)) | |
| 2121 | /* "refreshing" means some resources in the unit namespace is being updated. Unlike reload, | |
| 2122 | * the unit processes aren't made aware of refresh. Let's put the job back to queue | |
| 2123 | * in both cases, as refresh typically takes place before reload and it's better to wait | |
| 2124 | * for it rather than failing. */ | |
| 2125 | return -EAGAIN; | |
| 2126 | ||
| 2127 | if (state != UNIT_ACTIVE) | |
| 2128 | return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive."); | |
| 2129 | ||
| 2130 | following = unit_following(u); | |
| 2131 | if (following) { | |
| 2132 | log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id); | |
| 2133 | return unit_reload(following); | |
| 2134 | } | |
| 2135 | ||
| 2136 | /* Check to make sure the unit isn't frozen */ | |
| 2137 | if (u->freezer_state != FREEZER_RUNNING) | |
| 2138 | return -EDEADLK; | |
| 2139 | ||
| 2140 | unit_add_to_dbus_queue(u); | |
| 2141 | ||
| 2142 | if (!UNIT_VTABLE(u)->reload) { | |
| 2143 | /* Unit doesn't have a reload function, but we need to propagate the reload anyway */ | |
| 2144 | unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true); | |
| 2145 | return 0; | |
| 2146 | } | |
| 2147 | ||
| 2148 | return UNIT_VTABLE(u)->reload(u); | |
| 2149 | } | |
| 2150 | ||
| 2151 | bool unit_can_reload(Unit *u) { | |
| 2152 | assert(u); | |
| 2153 | ||
| 2154 | if (UNIT_VTABLE(u)->can_reload) | |
| 2155 | return UNIT_VTABLE(u)->can_reload(u); | |
| 2156 | ||
| 2157 | if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL)) | |
| 2158 | return true; | |
| 2159 | ||
| 2160 | return UNIT_VTABLE(u)->reload; | |
| 2161 | } | |
| 2162 | ||
| 2163 | bool unit_is_unneeded(Unit *u) { | |
| 2164 | Unit *other; | |
| 2165 | assert(u); | |
| 2166 | ||
| 2167 | if (!u->stop_when_unneeded) | |
| 2168 | return false; | |
| 2169 | ||
| 2170 | /* Don't clean up while the unit is transitioning or is even inactive. */ | |
| 2171 | if (unit_active_state(u) != UNIT_ACTIVE) | |
| 2172 | return false; | |
| 2173 | if (u->job) | |
| 2174 | return false; | |
| 2175 | ||
| 2176 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) { | |
| 2177 | /* If a dependent unit has a job queued, is active or transitioning, or is marked for | |
| 2178 | * restart, then don't clean this one up. */ | |
| 2179 | ||
| 2180 | if (other->job) | |
| 2181 | return false; | |
| 2182 | ||
| 2183 | if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) | |
| 2184 | return false; | |
| 2185 | ||
| 2186 | if (unit_will_restart(other)) | |
| 2187 | return false; | |
| 2188 | } | |
| 2189 | ||
| 2190 | return true; | |
| 2191 | } | |
| 2192 | ||
| 2193 | bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) { | |
| 2194 | Unit *other; | |
| 2195 | ||
| 2196 | assert(u); | |
| 2197 | ||
| 2198 | /* Checks if the unit needs to be started because it currently is not running, but some other unit | |
| 2199 | * that is active declared an Uphold= dependencies on it */ | |
| 2200 | ||
| 2201 | if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) { | |
| 2202 | if (ret_culprit) | |
| 2203 | *ret_culprit = NULL; | |
| 2204 | return false; | |
| 2205 | } | |
| 2206 | ||
| 2207 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) { | |
| 2208 | if (other->job) | |
| 2209 | continue; | |
| 2210 | ||
| 2211 | if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) { | |
| 2212 | if (ret_culprit) | |
| 2213 | *ret_culprit = other; | |
| 2214 | return true; | |
| 2215 | } | |
| 2216 | } | |
| 2217 | ||
| 2218 | if (ret_culprit) | |
| 2219 | *ret_culprit = NULL; | |
| 2220 | return false; | |
| 2221 | } | |
| 2222 | ||
| 2223 | bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) { | |
| 2224 | Unit *other; | |
| 2225 | ||
| 2226 | assert(u); | |
| 2227 | ||
| 2228 | /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop | |
| 2229 | * because the other unit is down. */ | |
| 2230 | ||
| 2231 | if (unit_active_state(u) != UNIT_ACTIVE || u->job) { | |
| 2232 | /* Don't clean up while the unit is transitioning or is even inactive. */ | |
| 2233 | if (ret_culprit) | |
| 2234 | *ret_culprit = NULL; | |
| 2235 | return false; | |
| 2236 | } | |
| 2237 | ||
| 2238 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) { | |
| 2239 | if (other->job) | |
| 2240 | continue; | |
| 2241 | ||
| 2242 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) { | |
| 2243 | if (ret_culprit) | |
| 2244 | *ret_culprit = other; | |
| 2245 | ||
| 2246 | return true; | |
| 2247 | } | |
| 2248 | } | |
| 2249 | ||
| 2250 | if (ret_culprit) | |
| 2251 | *ret_culprit = NULL; | |
| 2252 | return false; | |
| 2253 | } | |
| 2254 | ||
| 2255 | static void check_unneeded_dependencies(Unit *u) { | |
| 2256 | Unit *other; | |
| 2257 | assert(u); | |
| 2258 | ||
| 2259 | /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */ | |
| 2260 | ||
| 2261 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE) | |
| 2262 | unit_submit_to_stop_when_unneeded_queue(other); | |
| 2263 | } | |
| 2264 | ||
| 2265 | static void check_uphold_dependencies(Unit *u) { | |
| 2266 | Unit *other; | |
| 2267 | assert(u); | |
| 2268 | ||
| 2269 | /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */ | |
| 2270 | ||
| 2271 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE) | |
| 2272 | unit_submit_to_start_when_upheld_queue(other); | |
| 2273 | } | |
| 2274 | ||
| 2275 | static void check_bound_by_dependencies(Unit *u) { | |
| 2276 | Unit *other; | |
| 2277 | assert(u); | |
| 2278 | ||
| 2279 | /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */ | |
| 2280 | ||
| 2281 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE) | |
| 2282 | unit_submit_to_stop_when_bound_queue(other); | |
| 2283 | } | |
| 2284 | ||
| 2285 | static void retroactively_start_dependencies(Unit *u) { | |
| 2286 | Unit *other; | |
| 2287 | ||
| 2288 | assert(u); | |
| 2289 | assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))); | |
| 2290 | ||
| 2291 | UNIT_FOREACH_DEPENDENCY_SAFE(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */ | |
| 2292 | if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) && | |
| 2293 | !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other))) | |
| 2294 | (void) manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL); | |
| 2295 | ||
| 2296 | UNIT_FOREACH_DEPENDENCY_SAFE(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */ | |
| 2297 | if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) && | |
| 2298 | !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other))) | |
| 2299 | (void) manager_add_job(u->manager, JOB_START, other, JOB_FAIL, /* error = */ NULL, /* ret = */ NULL); | |
| 2300 | ||
| 2301 | UNIT_FOREACH_DEPENDENCY_SAFE(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */ | |
| 2302 | if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other))) | |
| 2303 | (void) manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL); | |
| 2304 | } | |
| 2305 | ||
| 2306 | static void retroactively_stop_dependencies(Unit *u) { | |
| 2307 | Unit *other; | |
| 2308 | ||
| 2309 | assert(u); | |
| 2310 | assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u))); | |
| 2311 | ||
| 2312 | /* Pull down units which are bound to us recursively if enabled */ | |
| 2313 | UNIT_FOREACH_DEPENDENCY_SAFE(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */ | |
| 2314 | if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other))) | |
| 2315 | (void) manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL); | |
| 2316 | } | |
| 2317 | ||
| 2318 | void unit_start_on_termination_deps(Unit *u, UnitDependencyAtom atom) { | |
| 2319 | const char *dependency_name = NULL; | |
| 2320 | JobMode job_mode; | |
| 2321 | unsigned n_jobs = 0; | |
| 2322 | int r; | |
| 2323 | ||
| 2324 | /* Act on OnFailure= and OnSuccess= dependencies */ | |
| 2325 | ||
| 2326 | assert(u); | |
| 2327 | assert(u->manager); | |
| 2328 | assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE)); | |
| 2329 | ||
| 2330 | FOREACH_ELEMENT(setting, on_termination_settings) | |
| 2331 | if (atom == setting->atom) { | |
| 2332 | job_mode = *(JobMode*) ((uint8_t*) u + setting->job_mode_offset); | |
| 2333 | dependency_name = setting->dependency_name; | |
| 2334 | break; | |
| 2335 | } | |
| 2336 | ||
| 2337 | assert(dependency_name); | |
| 2338 | ||
| 2339 | Unit *other; | |
| 2340 | UNIT_FOREACH_DEPENDENCY_SAFE(other, u, atom) { | |
| 2341 | _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL; | |
| 2342 | ||
| 2343 | if (n_jobs == 0) | |
| 2344 | log_unit_info(u, "Triggering %s dependencies.", dependency_name); | |
| 2345 | ||
| 2346 | r = manager_add_job(u->manager, JOB_START, other, job_mode, &error, /* ret = */ NULL); | |
| 2347 | if (r < 0) | |
| 2348 | log_unit_warning_errno(u, r, "Failed to enqueue %s%s job, ignoring: %s", | |
| 2349 | dependency_name, other->id, bus_error_message(&error, r)); | |
| 2350 | n_jobs++; | |
| 2351 | } | |
| 2352 | ||
| 2353 | if (n_jobs > 0) | |
| 2354 | log_unit_debug(u, "Triggering %s dependencies done (%u %s).", | |
| 2355 | dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs"); | |
| 2356 | } | |
| 2357 | ||
| 2358 | void unit_trigger_notify(Unit *u) { | |
| 2359 | Unit *other; | |
| 2360 | ||
| 2361 | assert(u); | |
| 2362 | ||
| 2363 | UNIT_FOREACH_DEPENDENCY_SAFE(other, u, UNIT_ATOM_TRIGGERED_BY) | |
| 2364 | if (UNIT_VTABLE(other)->trigger_notify) | |
| 2365 | UNIT_VTABLE(other)->trigger_notify(other, u); | |
| 2366 | } | |
| 2367 | ||
| 2368 | static int raise_level(int log_level, bool condition_info, bool condition_notice) { | |
| 2369 | if (condition_notice && log_level > LOG_NOTICE) | |
| 2370 | return LOG_NOTICE; | |
| 2371 | if (condition_info && log_level > LOG_INFO) | |
| 2372 | return LOG_INFO; | |
| 2373 | return log_level; | |
| 2374 | } | |
| 2375 | ||
| 2376 | static int unit_log_resources(Unit *u) { | |
| 2377 | ||
| 2378 | static const struct { | |
| 2379 | const char *journal_field; | |
| 2380 | const char *message_suffix; | |
| 2381 | } memory_fields[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = { | |
| 2382 | [CGROUP_MEMORY_PEAK] = { "MEMORY_PEAK", "memory peak" }, | |
| 2383 | [CGROUP_MEMORY_SWAP_PEAK] = { "MEMORY_SWAP_PEAK", "memory swap peak" }, | |
| 2384 | }, ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = { | |
| 2385 | [CGROUP_IP_INGRESS_BYTES] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" }, | |
| 2386 | [CGROUP_IP_EGRESS_BYTES] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" }, | |
| 2387 | [CGROUP_IP_INGRESS_PACKETS] = { "IP_METRIC_INGRESS_PACKETS", NULL }, | |
| 2388 | [CGROUP_IP_EGRESS_PACKETS] = { "IP_METRIC_EGRESS_PACKETS", NULL }, | |
| 2389 | }, io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = { | |
| 2390 | [CGROUP_IO_READ_BYTES] = { "IO_METRIC_READ_BYTES", "read from disk" }, | |
| 2391 | [CGROUP_IO_WRITE_BYTES] = { "IO_METRIC_WRITE_BYTES", "written to disk" }, | |
| 2392 | [CGROUP_IO_READ_OPERATIONS] = { "IO_METRIC_READ_OPERATIONS", NULL }, | |
| 2393 | [CGROUP_IO_WRITE_OPERATIONS] = { "IO_METRIC_WRITE_OPERATIONS", NULL }, | |
| 2394 | }; | |
| 2395 | ||
| 2396 | struct iovec *iovec = NULL; | |
| 2397 | size_t n_iovec = 0; | |
| 2398 | _cleanup_free_ char *message = NULL, *t = NULL; | |
| 2399 | nsec_t cpu_nsec = NSEC_INFINITY; | |
| 2400 | int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */ | |
| 2401 | ||
| 2402 | assert(u); | |
| 2403 | ||
| 2404 | CLEANUP_ARRAY(iovec, n_iovec, iovec_array_free); | |
| 2405 | ||
| 2406 | iovec = new(struct iovec, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1) + | |
| 2407 | _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4); | |
| 2408 | if (!iovec) | |
| 2409 | return log_oom(); | |
| 2410 | ||
| 2411 | /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource | |
| 2412 | * accounting was enabled for a unit. It does this in two ways: a friendly human-readable string with reduced | |
| 2413 | * information and the complete data in structured fields. */ | |
| 2414 | ||
| 2415 | (void) unit_get_cpu_usage(u, &cpu_nsec); | |
| 2416 | if (cpu_nsec != NSEC_INFINITY) { | |
| 2417 | /* Format the CPU time for inclusion in the structured log message */ | |
| 2418 | if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, cpu_nsec) < 0) | |
| 2419 | return log_oom(); | |
| 2420 | iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t)); | |
| 2421 | ||
| 2422 | /* Format the CPU time for inclusion in the human language message string */ | |
| 2423 | if (dual_timestamp_is_set(&u->inactive_exit_timestamp) && | |
| 2424 | dual_timestamp_is_set(&u->inactive_enter_timestamp)) { | |
| 2425 | usec_t wall_clock_usec = usec_sub_unsigned(u->inactive_enter_timestamp.monotonic, u->inactive_exit_timestamp.monotonic); | |
| 2426 | if (strextendf_with_separator(&message, ", ", | |
| 2427 | "Consumed %s CPU time over %s wall clock time", | |
| 2428 | FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC), | |
| 2429 | FORMAT_TIMESPAN(wall_clock_usec, USEC_PER_MSEC)) < 0) | |
| 2430 | return log_oom(); | |
| 2431 | } else { | |
| 2432 | if (strextendf_with_separator(&message, ", ", | |
| 2433 | "Consumed %s CPU time", | |
| 2434 | FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC)) < 0) | |
| 2435 | return log_oom(); | |
| 2436 | } | |
| 2437 | ||
| 2438 | log_level = raise_level(log_level, | |
| 2439 | cpu_nsec > MENTIONWORTHY_CPU_NSEC, | |
| 2440 | cpu_nsec > NOTICEWORTHY_CPU_NSEC); | |
| 2441 | } | |
| 2442 | ||
| 2443 | for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) { | |
| 2444 | uint64_t value = UINT64_MAX; | |
| 2445 | ||
| 2446 | assert(memory_fields[metric].journal_field); | |
| 2447 | assert(memory_fields[metric].message_suffix); | |
| 2448 | ||
| 2449 | (void) unit_get_memory_accounting(u, metric, &value); | |
| 2450 | if (value == UINT64_MAX) | |
| 2451 | continue; | |
| 2452 | ||
| 2453 | if (asprintf(&t, "%s=%" PRIu64, memory_fields[metric].journal_field, value) < 0) | |
| 2454 | return log_oom(); | |
| 2455 | iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t)); | |
| 2456 | ||
| 2457 | /* If value is 0, we don't log it in the MESSAGE= field. */ | |
| 2458 | if (value == 0) | |
| 2459 | continue; | |
| 2460 | ||
| 2461 | if (strextendf_with_separator(&message, ", ", "%s %s", | |
| 2462 | FORMAT_BYTES(value), memory_fields[metric].message_suffix) < 0) | |
| 2463 | return log_oom(); | |
| 2464 | ||
| 2465 | log_level = raise_level(log_level, | |
| 2466 | value > MENTIONWORTHY_MEMORY_BYTES, | |
| 2467 | value > NOTICEWORTHY_MEMORY_BYTES); | |
| 2468 | } | |
| 2469 | ||
| 2470 | for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) { | |
| 2471 | uint64_t value = UINT64_MAX; | |
| 2472 | ||
| 2473 | assert(io_fields[k].journal_field); | |
| 2474 | ||
| 2475 | (void) unit_get_io_accounting(u, k, &value); | |
| 2476 | if (value == UINT64_MAX) | |
| 2477 | continue; | |
| 2478 | ||
| 2479 | /* Format IO accounting data for inclusion in the structured log message */ | |
| 2480 | if (asprintf(&t, "%s=%" PRIu64, io_fields[k].journal_field, value) < 0) | |
| 2481 | return log_oom(); | |
| 2482 | iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t)); | |
| 2483 | ||
| 2484 | /* If value is 0, we don't log it in the MESSAGE= field. */ | |
| 2485 | if (value == 0) | |
| 2486 | continue; | |
| 2487 | ||
| 2488 | /* Format the IO accounting data for inclusion in the human language message string, but only | |
| 2489 | * for the bytes counters (and not for the operations counters) */ | |
| 2490 | if (io_fields[k].message_suffix) { | |
| 2491 | if (strextendf_with_separator(&message, ", ", "%s %s", | |
| 2492 | FORMAT_BYTES(value), io_fields[k].message_suffix) < 0) | |
| 2493 | return log_oom(); | |
| 2494 | ||
| 2495 | log_level = raise_level(log_level, | |
| 2496 | value > MENTIONWORTHY_IO_BYTES, | |
| 2497 | value > NOTICEWORTHY_IO_BYTES); | |
| 2498 | } | |
| 2499 | } | |
| 2500 | ||
| 2501 | for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) { | |
| 2502 | uint64_t value = UINT64_MAX; | |
| 2503 | ||
| 2504 | assert(ip_fields[m].journal_field); | |
| 2505 | ||
| 2506 | (void) unit_get_ip_accounting(u, m, &value); | |
| 2507 | if (value == UINT64_MAX) | |
| 2508 | continue; | |
| 2509 | ||
| 2510 | /* Format IP accounting data for inclusion in the structured log message */ | |
| 2511 | if (asprintf(&t, "%s=%" PRIu64, ip_fields[m].journal_field, value) < 0) | |
| 2512 | return log_oom(); | |
| 2513 | iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t)); | |
| 2514 | ||
| 2515 | /* If value is 0, we don't log it in the MESSAGE= field. */ | |
| 2516 | if (value == 0) | |
| 2517 | continue; | |
| 2518 | ||
| 2519 | /* Format the IP accounting data for inclusion in the human language message string, but only | |
| 2520 | * for the bytes counters (and not for the packets counters) */ | |
| 2521 | if (ip_fields[m].message_suffix) { | |
| 2522 | if (strextendf_with_separator(&message, ", ", "%s %s", | |
| 2523 | FORMAT_BYTES(value), ip_fields[m].message_suffix) < 0) | |
| 2524 | return log_oom(); | |
| 2525 | ||
| 2526 | log_level = raise_level(log_level, | |
| 2527 | value > MENTIONWORTHY_IP_BYTES, | |
| 2528 | value > NOTICEWORTHY_IP_BYTES); | |
| 2529 | } | |
| 2530 | } | |
| 2531 | ||
| 2532 | /* This check is here because it is the earliest point following all possible log_level assignments. | |
| 2533 | * (If log_level is assigned anywhere after this point, move this check.) */ | |
| 2534 | if (!unit_log_level_test(u, log_level)) | |
| 2535 | return 0; | |
| 2536 | ||
| 2537 | /* Is there any accounting data available at all? */ | |
| 2538 | if (n_iovec == 0) { | |
| 2539 | assert(!message); | |
| 2540 | return 0; | |
| 2541 | } | |
| 2542 | ||
| 2543 | t = strjoin("MESSAGE=", u->id, ": ", message ?: "Completed", "."); | |
| 2544 | if (!t) | |
| 2545 | return log_oom(); | |
| 2546 | iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t)); | |
| 2547 | ||
| 2548 | if (!set_iovec_string_field(iovec, &n_iovec, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR)) | |
| 2549 | return log_oom(); | |
| 2550 | ||
| 2551 | if (!set_iovec_string_field(iovec, &n_iovec, unit_log_field(u), u->id)) | |
| 2552 | return log_oom(); | |
| 2553 | ||
| 2554 | if (!set_iovec_string_field(iovec, &n_iovec, unit_invocation_log_field(u), u->invocation_id_string)) | |
| 2555 | return log_oom(); | |
| 2556 | ||
| 2557 | log_unit_struct_iovec(u, log_level, iovec, n_iovec); | |
| 2558 | ||
| 2559 | return 0; | |
| 2560 | } | |
| 2561 | ||
| 2562 | static void unit_update_on_console(Unit *u) { | |
| 2563 | bool b; | |
| 2564 | ||
| 2565 | assert(u); | |
| 2566 | ||
| 2567 | b = unit_needs_console(u); | |
| 2568 | if (u->on_console == b) | |
| 2569 | return; | |
| 2570 | ||
| 2571 | u->on_console = b; | |
| 2572 | if (b) | |
| 2573 | manager_ref_console(u->manager); | |
| 2574 | else | |
| 2575 | manager_unref_console(u->manager); | |
| 2576 | } | |
| 2577 | ||
| 2578 | static void unit_emit_audit_start(Unit *u) { | |
| 2579 | assert(u); | |
| 2580 | ||
| 2581 | if (UNIT_VTABLE(u)->audit_start_message_type <= 0) | |
| 2582 | return; | |
| 2583 | ||
| 2584 | /* Write audit record if we have just finished starting up */ | |
| 2585 | manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true); | |
| 2586 | u->in_audit = true; | |
| 2587 | } | |
| 2588 | ||
| 2589 | static void unit_emit_audit_stop(Unit *u, UnitActiveState state) { | |
| 2590 | assert(u); | |
| 2591 | ||
| 2592 | if (UNIT_VTABLE(u)->audit_start_message_type <= 0) | |
| 2593 | return; | |
| 2594 | ||
| 2595 | if (u->in_audit) { | |
| 2596 | /* Write audit record if we have just finished shutting down */ | |
| 2597 | manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE); | |
| 2598 | u->in_audit = false; | |
| 2599 | } else { | |
| 2600 | /* Hmm, if there was no start record written write it now, so that we always have a nice pair */ | |
| 2601 | manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE); | |
| 2602 | ||
| 2603 | if (state == UNIT_INACTIVE) | |
| 2604 | manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true); | |
| 2605 | } | |
| 2606 | } | |
| 2607 | ||
| 2608 | static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) { | |
| 2609 | bool unexpected = false; | |
| 2610 | JobResult result; | |
| 2611 | ||
| 2612 | assert(j); | |
| 2613 | ||
| 2614 | if (j->state == JOB_WAITING) | |
| 2615 | /* So we reached a different state for this job. Let's see if we can run it now if it failed previously | |
| 2616 | * due to EAGAIN. */ | |
| 2617 | job_add_to_run_queue(j); | |
| 2618 | ||
| 2619 | /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and | |
| 2620 | * hence needs to invalidate jobs. */ | |
| 2621 | ||
| 2622 | switch (j->type) { | |
| 2623 | ||
| 2624 | case JOB_START: | |
| 2625 | case JOB_VERIFY_ACTIVE: | |
| 2626 | ||
| 2627 | if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) | |
| 2628 | job_finish_and_invalidate(j, JOB_DONE, true, false); | |
| 2629 | else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) { | |
| 2630 | unexpected = true; | |
| 2631 | ||
| 2632 | if (UNIT_IS_INACTIVE_OR_FAILED(ns)) { | |
| 2633 | if (ns == UNIT_FAILED) | |
| 2634 | result = JOB_FAILED; | |
| 2635 | else | |
| 2636 | result = JOB_DONE; | |
| 2637 | ||
| 2638 | job_finish_and_invalidate(j, result, true, false); | |
| 2639 | } | |
| 2640 | } | |
| 2641 | ||
| 2642 | break; | |
| 2643 | ||
| 2644 | case JOB_RELOAD: | |
| 2645 | case JOB_RELOAD_OR_START: | |
| 2646 | case JOB_TRY_RELOAD: | |
| 2647 | ||
| 2648 | if (j->state == JOB_RUNNING) { | |
| 2649 | if (ns == UNIT_ACTIVE) | |
| 2650 | job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false); | |
| 2651 | else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING, UNIT_REFRESHING)) { | |
| 2652 | unexpected = true; | |
| 2653 | ||
| 2654 | if (UNIT_IS_INACTIVE_OR_FAILED(ns)) | |
| 2655 | job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false); | |
| 2656 | } | |
| 2657 | } | |
| 2658 | ||
| 2659 | break; | |
| 2660 | ||
| 2661 | case JOB_STOP: | |
| 2662 | case JOB_RESTART: | |
| 2663 | case JOB_TRY_RESTART: | |
| 2664 | ||
| 2665 | if (UNIT_IS_INACTIVE_OR_FAILED(ns)) | |
| 2666 | job_finish_and_invalidate(j, JOB_DONE, true, false); | |
| 2667 | else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) { | |
| 2668 | unexpected = true; | |
| 2669 | job_finish_and_invalidate(j, JOB_FAILED, true, false); | |
| 2670 | } | |
| 2671 | ||
| 2672 | break; | |
| 2673 | ||
| 2674 | default: | |
| 2675 | assert_not_reached(); | |
| 2676 | } | |
| 2677 | ||
| 2678 | return unexpected; | |
| 2679 | } | |
| 2680 | ||
| 2681 | static void unit_recursive_add_to_run_queue(Unit *u) { | |
| 2682 | assert(u); | |
| 2683 | ||
| 2684 | if (u->job) | |
| 2685 | job_add_to_run_queue(u->job); | |
| 2686 | ||
| 2687 | Unit *child; | |
| 2688 | UNIT_FOREACH_DEPENDENCY(child, u, UNIT_ATOM_SLICE_OF) { | |
| 2689 | ||
| 2690 | if (!child->job) | |
| 2691 | continue; | |
| 2692 | ||
| 2693 | unit_recursive_add_to_run_queue(child); | |
| 2694 | } | |
| 2695 | } | |
| 2696 | ||
| 2697 | static void unit_check_concurrency_limit(Unit *u) { | |
| 2698 | assert(u); | |
| 2699 | ||
| 2700 | Unit *slice = UNIT_GET_SLICE(u); | |
| 2701 | if (!slice) | |
| 2702 | return; | |
| 2703 | ||
| 2704 | /* If a unit was stopped, maybe it has pending siblings (or children thereof) that can be started now */ | |
| 2705 | ||
| 2706 | if (SLICE(slice)->concurrency_soft_max != UINT_MAX) { | |
| 2707 | Unit *sibling; | |
| 2708 | UNIT_FOREACH_DEPENDENCY(sibling, slice, UNIT_ATOM_SLICE_OF) { | |
| 2709 | if (sibling == u) | |
| 2710 | continue; | |
| 2711 | ||
| 2712 | unit_recursive_add_to_run_queue(sibling); | |
| 2713 | } | |
| 2714 | } | |
| 2715 | ||
| 2716 | /* Also go up the tree. */ | |
| 2717 | unit_check_concurrency_limit(slice); | |
| 2718 | } | |
| 2719 | ||
| 2720 | void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) { | |
| 2721 | assert(u); | |
| 2722 | assert(os < _UNIT_ACTIVE_STATE_MAX); | |
| 2723 | assert(ns < _UNIT_ACTIVE_STATE_MAX); | |
| 2724 | ||
| 2725 | /* Note that this is called for all low-level state changes, even if they might map to the same high-level | |
| 2726 | * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is | |
| 2727 | * remounted this function will be called too! */ | |
| 2728 | ||
| 2729 | Manager *m = ASSERT_PTR(u->manager); | |
| 2730 | ||
| 2731 | /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in | |
| 2732 | * the bus queue, so that any job change signal queued will force out the unit change signal first. */ | |
| 2733 | unit_add_to_dbus_queue(u); | |
| 2734 | ||
| 2735 | /* Update systemd-oomd on the property/state change. | |
| 2736 | * | |
| 2737 | * Always send an update if the unit is going into an inactive state so systemd-oomd knows to | |
| 2738 | * stop monitoring. | |
| 2739 | * Also send an update whenever the unit goes active; this is to handle a case where an override file | |
| 2740 | * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to | |
| 2741 | * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't | |
| 2742 | * have the information on the property. Thus, indiscriminately send an update. */ | |
| 2743 | if (os != ns && (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))) | |
| 2744 | (void) manager_varlink_send_managed_oom_update(u); | |
| 2745 | ||
| 2746 | /* Update timestamps for state changes */ | |
| 2747 | if (!MANAGER_IS_RELOADING(m)) { | |
| 2748 | dual_timestamp_now(&u->state_change_timestamp); | |
| 2749 | ||
| 2750 | if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns)) | |
| 2751 | u->inactive_exit_timestamp = u->state_change_timestamp; | |
| 2752 | else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns)) | |
| 2753 | u->inactive_enter_timestamp = u->state_change_timestamp; | |
| 2754 | ||
| 2755 | if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns)) | |
| 2756 | u->active_enter_timestamp = u->state_change_timestamp; | |
| 2757 | else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns)) | |
| 2758 | u->active_exit_timestamp = u->state_change_timestamp; | |
| 2759 | } | |
| 2760 | ||
| 2761 | /* Keep track of failed units */ | |
| 2762 | (void) manager_update_failed_units(m, u, ns == UNIT_FAILED); | |
| 2763 | ||
| 2764 | /* Make sure the cgroup and state files are always removed when we become inactive */ | |
| 2765 | if (UNIT_IS_INACTIVE_OR_FAILED(ns)) { | |
| 2766 | SET_FLAG(u->markers, | |
| 2767 | (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART), | |
| 2768 | false); | |
| 2769 | unit_prune_cgroup(u); | |
| 2770 | unit_unlink_state_files(u); | |
| 2771 | } else if (ns != os && ns == UNIT_RELOADING) | |
| 2772 | SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false); | |
| 2773 | ||
| 2774 | unit_update_on_console(u); | |
| 2775 | ||
| 2776 | if (!MANAGER_IS_RELOADING(m)) { | |
| 2777 | bool unexpected; | |
| 2778 | ||
| 2779 | /* Let's propagate state changes to the job */ | |
| 2780 | if (u->job) | |
| 2781 | unexpected = unit_process_job(u->job, ns, reload_success); | |
| 2782 | else | |
| 2783 | unexpected = true; | |
| 2784 | ||
| 2785 | /* If this state change happened without being requested by a job, then let's retroactively start or | |
| 2786 | * stop dependencies. We skip that step when deserializing, since we don't want to create any | |
| 2787 | * additional jobs just because something is already activated. */ | |
| 2788 | ||
| 2789 | if (unexpected) { | |
| 2790 | if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns)) | |
| 2791 | retroactively_start_dependencies(u); | |
| 2792 | else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns)) | |
| 2793 | retroactively_stop_dependencies(u); | |
| 2794 | } | |
| 2795 | ||
| 2796 | if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) { | |
| 2797 | /* This unit just finished starting up */ | |
| 2798 | ||
| 2799 | unit_emit_audit_start(u); | |
| 2800 | manager_send_unit_plymouth(m, u); | |
| 2801 | manager_send_unit_supervisor(m, u, /* active= */ true); | |
| 2802 | ||
| 2803 | } else if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) { | |
| 2804 | /* This unit just stopped/failed. */ | |
| 2805 | ||
| 2806 | unit_emit_audit_stop(u, ns); | |
| 2807 | manager_send_unit_supervisor(m, u, /* active= */ false); | |
| 2808 | unit_log_resources(u); | |
| 2809 | } | |
| 2810 | ||
| 2811 | if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE)) | |
| 2812 | unit_start_on_termination_deps(u, UNIT_ATOM_ON_SUCCESS); | |
| 2813 | else if (ns != os && ns == UNIT_FAILED) | |
| 2814 | unit_start_on_termination_deps(u, UNIT_ATOM_ON_FAILURE); | |
| 2815 | } | |
| 2816 | ||
| 2817 | manager_recheck_journal(m); | |
| 2818 | manager_recheck_dbus(m); | |
| 2819 | ||
| 2820 | unit_trigger_notify(u); | |
| 2821 | ||
| 2822 | if (!MANAGER_IS_RELOADING(m)) { | |
| 2823 | const char *reason; | |
| 2824 | ||
| 2825 | if (os != UNIT_FAILED && ns == UNIT_FAILED) { | |
| 2826 | reason = strjoina("unit ", u->id, " failed"); | |
| 2827 | emergency_action(m, u->failure_action, EMERGENCY_ACTION_WARN|EMERGENCY_ACTION_SLEEP_5S, u->reboot_arg, unit_failure_action_exit_status(u), reason); | |
| 2828 | } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) { | |
| 2829 | reason = strjoina("unit ", u->id, " succeeded"); | |
| 2830 | emergency_action(m, u->success_action, /* flags= */ 0, u->reboot_arg, unit_success_action_exit_status(u), reason); | |
| 2831 | } | |
| 2832 | } | |
| 2833 | ||
| 2834 | /* And now, add the unit or depending units to various queues that will act on the new situation if | |
| 2835 | * needed. These queues generally check for continuous state changes rather than events (like most of | |
| 2836 | * the state propagation above), and do work deferred instead of instantly, since they typically | |
| 2837 | * don't want to run during reloading, and usually involve checking combined state of multiple units | |
| 2838 | * at once. */ | |
| 2839 | ||
| 2840 | if (UNIT_IS_INACTIVE_OR_FAILED(ns)) { | |
| 2841 | /* Stop unneeded units and bound-by units regardless if going down was expected or not */ | |
| 2842 | check_unneeded_dependencies(u); | |
| 2843 | check_bound_by_dependencies(u); | |
| 2844 | ||
| 2845 | /* Maybe someone wants us to remain up? */ | |
| 2846 | unit_submit_to_start_when_upheld_queue(u); | |
| 2847 | ||
| 2848 | /* Maybe the unit should be GC'ed now? */ | |
| 2849 | unit_add_to_gc_queue(u); | |
| 2850 | ||
| 2851 | /* Maybe we can release some resources now? */ | |
| 2852 | unit_submit_to_release_resources_queue(u); | |
| 2853 | ||
| 2854 | /* Maybe the concurrency limits now allow dispatching of another start job in this slice? */ | |
| 2855 | unit_check_concurrency_limit(u); | |
| 2856 | ||
| 2857 | /* Maybe someone else has been waiting for us to stop? */ | |
| 2858 | m->may_dispatch_stop_notify_queue = true; | |
| 2859 | ||
| 2860 | } else if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) { | |
| 2861 | /* Start uphold units regardless if going up was expected or not */ | |
| 2862 | check_uphold_dependencies(u); | |
| 2863 | ||
| 2864 | /* Maybe we finished startup and are now ready for being stopped because unneeded? */ | |
| 2865 | unit_submit_to_stop_when_unneeded_queue(u); | |
| 2866 | ||
| 2867 | /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens | |
| 2868 | * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to | |
| 2869 | * inactive, without ever entering started.) */ | |
| 2870 | unit_submit_to_stop_when_bound_queue(u); | |
| 2871 | } | |
| 2872 | } | |
| 2873 | ||
| 2874 | int unit_watch_pidref(Unit *u, const PidRef *pid, bool exclusive) { | |
| 2875 | _cleanup_(pidref_freep) PidRef *pid_dup = NULL; | |
| 2876 | int r; | |
| 2877 | ||
| 2878 | /* Adds a specific PID to the set of PIDs this unit watches. */ | |
| 2879 | ||
| 2880 | assert(u); | |
| 2881 | assert(pidref_is_set(pid)); | |
| 2882 | ||
| 2883 | /* Caller might be sure that this PID belongs to this unit only. Let's take this | |
| 2884 | * opportunity to remove any stalled references to this PID as they can be created | |
| 2885 | * easily (when watching a process which is not our direct child). */ | |
| 2886 | if (exclusive) | |
| 2887 | manager_unwatch_pidref(u->manager, pid); | |
| 2888 | ||
| 2889 | if (set_contains(u->pids, pid)) { /* early exit if already being watched */ | |
| 2890 | assert(!exclusive); | |
| 2891 | return 0; | |
| 2892 | } | |
| 2893 | ||
| 2894 | r = pidref_dup(pid, &pid_dup); | |
| 2895 | if (r < 0) | |
| 2896 | return r; | |
| 2897 | ||
| 2898 | /* First, insert into the set of PIDs maintained by the unit */ | |
| 2899 | r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup); | |
| 2900 | if (r < 0) | |
| 2901 | return r; | |
| 2902 | ||
| 2903 | pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */ | |
| 2904 | ||
| 2905 | /* Second, insert it into the simple global table, see if that works */ | |
| 2906 | r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops, pid, u); | |
| 2907 | if (r != -EEXIST) | |
| 2908 | return r; | |
| 2909 | ||
| 2910 | /* OK, the key is already assigned to a different unit. That's fine, then add us via the second | |
| 2911 | * hashmap that points to an array. */ | |
| 2912 | ||
| 2913 | PidRef *old_pid = NULL; | |
| 2914 | Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid); | |
| 2915 | ||
| 2916 | /* Count entries in array */ | |
| 2917 | size_t n = 0; | |
| 2918 | for (; array && array[n]; n++) | |
| 2919 | ; | |
| 2920 | ||
| 2921 | /* Allocate a new array */ | |
| 2922 | _cleanup_free_ Unit **new_array = new(Unit*, n + 2); | |
| 2923 | if (!new_array) | |
| 2924 | return -ENOMEM; | |
| 2925 | ||
| 2926 | /* Append us to the end */ | |
| 2927 | memcpy_safe(new_array, array, sizeof(Unit*) * n); | |
| 2928 | new_array[n] = u; | |
| 2929 | new_array[n+1] = NULL; | |
| 2930 | ||
| 2931 | /* Add or replace the old array */ | |
| 2932 | r = hashmap_ensure_replace(&u->manager->watch_pids_more, &pidref_hash_ops, old_pid ?: pid, new_array); | |
| 2933 | if (r < 0) | |
| 2934 | return r; | |
| 2935 | ||
| 2936 | TAKE_PTR(new_array); /* Now part of the hash table */ | |
| 2937 | free(array); /* Which means we can now delete the old version */ | |
| 2938 | return 0; | |
| 2939 | } | |
| 2940 | ||
| 2941 | void unit_unwatch_pidref(Unit *u, const PidRef *pid) { | |
| 2942 | assert(u); | |
| 2943 | assert(pidref_is_set(pid)); | |
| 2944 | ||
| 2945 | /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */ | |
| 2946 | _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid); | |
| 2947 | if (!pid1) | |
| 2948 | return; /* Early exit if this PID was never watched by us */ | |
| 2949 | ||
| 2950 | /* First let's drop the unit from the simple hash table, if it is included there */ | |
| 2951 | PidRef *pid2 = NULL; | |
| 2952 | Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2); | |
| 2953 | ||
| 2954 | /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */ | |
| 2955 | assert((uu == u) == (pid1 == pid2)); | |
| 2956 | ||
| 2957 | if (uu == u) | |
| 2958 | /* OK, we are in the first table. Let's remove it there then, and we are done already. */ | |
| 2959 | assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu)); | |
| 2960 | else { | |
| 2961 | /* We weren't in the first table, then let's consult the 2nd table that points to an array */ | |
| 2962 | PidRef *pid3 = NULL; | |
| 2963 | Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3); | |
| 2964 | ||
| 2965 | /* Let's iterate through the array, dropping our own entry */ | |
| 2966 | size_t m = 0, n = 0; | |
| 2967 | for (; array && array[n]; n++) | |
| 2968 | if (array[n] != u) | |
| 2969 | array[m++] = array[n]; | |
| 2970 | if (n == m) | |
| 2971 | return; /* Not there */ | |
| 2972 | ||
| 2973 | array[m] = NULL; /* set trailing NULL marker on the new end */ | |
| 2974 | ||
| 2975 | if (m == 0) { | |
| 2976 | /* The array is now empty, remove the entire entry */ | |
| 2977 | assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array)); | |
| 2978 | free(array); | |
| 2979 | } else { | |
| 2980 | /* The array is not empty, but let's make sure the entry is not keyed by the PidRef | |
| 2981 | * we will delete, but by the PidRef object of the Unit that is now first in the | |
| 2982 | * array. */ | |
| 2983 | ||
| 2984 | PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid)); | |
| 2985 | assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0); | |
| 2986 | } | |
| 2987 | } | |
| 2988 | } | |
| 2989 | ||
| 2990 | void unit_unwatch_all_pids(Unit *u) { | |
| 2991 | assert(u); | |
| 2992 | ||
| 2993 | while (!set_isempty(u->pids)) | |
| 2994 | unit_unwatch_pidref(u, set_first(u->pids)); | |
| 2995 | ||
| 2996 | u->pids = set_free(u->pids); | |
| 2997 | } | |
| 2998 | ||
| 2999 | void unit_unwatch_pidref_done(Unit *u, PidRef *pidref) { | |
| 3000 | assert(u); | |
| 3001 | ||
| 3002 | if (!pidref_is_set(pidref)) | |
| 3003 | return; | |
| 3004 | ||
| 3005 | unit_unwatch_pidref(u, pidref); | |
| 3006 | pidref_done(pidref); | |
| 3007 | } | |
| 3008 | ||
| 3009 | bool unit_job_is_applicable(Unit *u, JobType j) { | |
| 3010 | assert(u); | |
| 3011 | assert(j >= 0 && j < _JOB_TYPE_MAX); | |
| 3012 | ||
| 3013 | switch (j) { | |
| 3014 | ||
| 3015 | case JOB_VERIFY_ACTIVE: | |
| 3016 | case JOB_START: | |
| 3017 | case JOB_NOP: | |
| 3018 | /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not | |
| 3019 | * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing | |
| 3020 | * jobs for it. */ | |
| 3021 | return true; | |
| 3022 | ||
| 3023 | case JOB_STOP: | |
| 3024 | /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to | |
| 3025 | * external events), hence it makes no sense to permit enqueuing such a request either. */ | |
| 3026 | return !u->perpetual; | |
| 3027 | ||
| 3028 | case JOB_RESTART: | |
| 3029 | case JOB_TRY_RESTART: | |
| 3030 | return unit_can_stop(u) && unit_can_start(u); | |
| 3031 | ||
| 3032 | case JOB_RELOAD: | |
| 3033 | case JOB_TRY_RELOAD: | |
| 3034 | return unit_can_reload(u); | |
| 3035 | ||
| 3036 | case JOB_RELOAD_OR_START: | |
| 3037 | return unit_can_reload(u) && unit_can_start(u); | |
| 3038 | ||
| 3039 | default: | |
| 3040 | assert_not_reached(); | |
| 3041 | } | |
| 3042 | } | |
| 3043 | ||
| 3044 | static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) { | |
| 3045 | Hashmap *deps; | |
| 3046 | ||
| 3047 | assert(u); | |
| 3048 | assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX); | |
| 3049 | ||
| 3050 | deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d)); | |
| 3051 | if (!deps) { | |
| 3052 | _cleanup_hashmap_free_ Hashmap *h = NULL; | |
| 3053 | ||
| 3054 | h = hashmap_new(NULL); | |
| 3055 | if (!h) | |
| 3056 | return NULL; | |
| 3057 | ||
| 3058 | if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0) | |
| 3059 | return NULL; | |
| 3060 | ||
| 3061 | deps = TAKE_PTR(h); | |
| 3062 | } | |
| 3063 | ||
| 3064 | return deps; | |
| 3065 | } | |
| 3066 | ||
| 3067 | typedef enum NotifyDependencyFlags { | |
| 3068 | NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0, | |
| 3069 | NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1, | |
| 3070 | } NotifyDependencyFlags; | |
| 3071 | ||
| 3072 | static int unit_add_dependency_impl( | |
| 3073 | Unit *u, | |
| 3074 | UnitDependency d, | |
| 3075 | Unit *other, | |
| 3076 | UnitDependencyMask mask) { | |
| 3077 | ||
| 3078 | static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = { | |
| 3079 | [UNIT_REQUIRES] = UNIT_REQUIRED_BY, | |
| 3080 | [UNIT_REQUISITE] = UNIT_REQUISITE_OF, | |
| 3081 | [UNIT_WANTS] = UNIT_WANTED_BY, | |
| 3082 | [UNIT_BINDS_TO] = UNIT_BOUND_BY, | |
| 3083 | [UNIT_PART_OF] = UNIT_CONSISTS_OF, | |
| 3084 | [UNIT_UPHOLDS] = UNIT_UPHELD_BY, | |
| 3085 | [UNIT_REQUIRED_BY] = UNIT_REQUIRES, | |
| 3086 | [UNIT_REQUISITE_OF] = UNIT_REQUISITE, | |
| 3087 | [UNIT_WANTED_BY] = UNIT_WANTS, | |
| 3088 | [UNIT_BOUND_BY] = UNIT_BINDS_TO, | |
| 3089 | [UNIT_CONSISTS_OF] = UNIT_PART_OF, | |
| 3090 | [UNIT_UPHELD_BY] = UNIT_UPHOLDS, | |
| 3091 | [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY, | |
| 3092 | [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS, | |
| 3093 | [UNIT_BEFORE] = UNIT_AFTER, | |
| 3094 | [UNIT_AFTER] = UNIT_BEFORE, | |
| 3095 | [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF, | |
| 3096 | [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS, | |
| 3097 | [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF, | |
| 3098 | [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE, | |
| 3099 | [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY, | |
| 3100 | [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS, | |
| 3101 | [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM, | |
| 3102 | [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO, | |
| 3103 | [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM, | |
| 3104 | [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO, | |
| 3105 | [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */ | |
| 3106 | [UNIT_REFERENCES] = UNIT_REFERENCED_BY, | |
| 3107 | [UNIT_REFERENCED_BY] = UNIT_REFERENCES, | |
| 3108 | [UNIT_IN_SLICE] = UNIT_SLICE_OF, | |
| 3109 | [UNIT_SLICE_OF] = UNIT_IN_SLICE, | |
| 3110 | }; | |
| 3111 | ||
| 3112 | Hashmap *u_deps, *other_deps; | |
| 3113 | UnitDependencyInfo u_info, u_info_old, other_info, other_info_old; | |
| 3114 | NotifyDependencyFlags flags = 0; | |
| 3115 | int r; | |
| 3116 | ||
| 3117 | assert(u); | |
| 3118 | assert(other); | |
| 3119 | assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX); | |
| 3120 | assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX); | |
| 3121 | assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL); | |
| 3122 | ||
| 3123 | /* Ensure the following two hashmaps for each unit exist: | |
| 3124 | * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo), | |
| 3125 | * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */ | |
| 3126 | u_deps = unit_get_dependency_hashmap_per_type(u, d); | |
| 3127 | if (!u_deps) | |
| 3128 | return -ENOMEM; | |
| 3129 | ||
| 3130 | other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]); | |
| 3131 | if (!other_deps) | |
| 3132 | return -ENOMEM; | |
| 3133 | ||
| 3134 | /* Save the original dependency info. */ | |
| 3135 | u_info.data = u_info_old.data = hashmap_get(u_deps, other); | |
| 3136 | other_info.data = other_info_old.data = hashmap_get(other_deps, u); | |
| 3137 | ||
| 3138 | /* Update dependency info. */ | |
| 3139 | u_info.origin_mask |= mask; | |
| 3140 | other_info.destination_mask |= mask; | |
| 3141 | ||
| 3142 | /* Save updated dependency info. */ | |
| 3143 | if (u_info.data != u_info_old.data) { | |
| 3144 | r = hashmap_replace(u_deps, other, u_info.data); | |
| 3145 | if (r < 0) | |
| 3146 | return r; | |
| 3147 | ||
| 3148 | flags = NOTIFY_DEPENDENCY_UPDATE_FROM; | |
| 3149 | u->dependency_generation++; | |
| 3150 | } | |
| 3151 | ||
| 3152 | if (other_info.data != other_info_old.data) { | |
| 3153 | r = hashmap_replace(other_deps, u, other_info.data); | |
| 3154 | if (r < 0) { | |
| 3155 | if (u_info.data != u_info_old.data) { | |
| 3156 | /* Restore the old dependency. */ | |
| 3157 | if (u_info_old.data) | |
| 3158 | (void) hashmap_update(u_deps, other, u_info_old.data); | |
| 3159 | else | |
| 3160 | hashmap_remove(u_deps, other); | |
| 3161 | } | |
| 3162 | return r; | |
| 3163 | } | |
| 3164 | ||
| 3165 | flags |= NOTIFY_DEPENDENCY_UPDATE_TO; | |
| 3166 | other->dependency_generation++; | |
| 3167 | } | |
| 3168 | ||
| 3169 | return flags; | |
| 3170 | } | |
| 3171 | ||
| 3172 | int unit_add_dependency( | |
| 3173 | Unit *u, | |
| 3174 | UnitDependency d, | |
| 3175 | Unit *other, | |
| 3176 | bool add_reference, | |
| 3177 | UnitDependencyMask mask) { | |
| 3178 | ||
| 3179 | UnitDependencyAtom a; | |
| 3180 | int r; | |
| 3181 | ||
| 3182 | /* Helper to know whether sending a notification is necessary or not: if the dependency is already | |
| 3183 | * there, no need to notify! */ | |
| 3184 | NotifyDependencyFlags notify_flags; | |
| 3185 | ||
| 3186 | assert(u); | |
| 3187 | assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX); | |
| 3188 | assert(other); | |
| 3189 | ||
| 3190 | u = unit_follow_merge(u); | |
| 3191 | other = unit_follow_merge(other); | |
| 3192 | a = unit_dependency_to_atom(d); | |
| 3193 | assert(a >= 0); | |
| 3194 | ||
| 3195 | /* We won't allow dependencies on ourselves. We will not consider them an error however. */ | |
| 3196 | if (u == other) { | |
| 3197 | if (unit_should_warn_about_dependency(d)) | |
| 3198 | log_unit_warning(u, "Dependency %s=%s is dropped.", | |
| 3199 | unit_dependency_to_string(d), u->id); | |
| 3200 | return 0; | |
| 3201 | } | |
| 3202 | ||
| 3203 | if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES)) | |
| 3204 | return 0; | |
| 3205 | ||
| 3206 | /* Note that ordering a device unit after a unit is permitted since it allows its job running | |
| 3207 | * timeout to be started at a specific time. */ | |
| 3208 | if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) { | |
| 3209 | log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id); | |
| 3210 | return 0; | |
| 3211 | } | |
| 3212 | ||
| 3213 | if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) { | |
| 3214 | log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type)); | |
| 3215 | return 0; | |
| 3216 | } | |
| 3217 | ||
| 3218 | if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger) | |
| 3219 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 3220 | "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type)); | |
| 3221 | if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger) | |
| 3222 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 3223 | "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type)); | |
| 3224 | ||
| 3225 | if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE) | |
| 3226 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 3227 | "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id); | |
| 3228 | if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE) | |
| 3229 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 3230 | "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id); | |
| 3231 | ||
| 3232 | if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u)) | |
| 3233 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 3234 | "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id); | |
| 3235 | ||
| 3236 | if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other)) | |
| 3237 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL), | |
| 3238 | "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id); | |
| 3239 | ||
| 3240 | r = unit_add_dependency_impl(u, d, other, mask); | |
| 3241 | if (r < 0) | |
| 3242 | return r; | |
| 3243 | notify_flags = r; | |
| 3244 | ||
| 3245 | if (add_reference) { | |
| 3246 | r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask); | |
| 3247 | if (r < 0) | |
| 3248 | return r; | |
| 3249 | notify_flags |= r; | |
| 3250 | } | |
| 3251 | ||
| 3252 | if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM)) | |
| 3253 | unit_add_to_dbus_queue(u); | |
| 3254 | if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO)) | |
| 3255 | unit_add_to_dbus_queue(other); | |
| 3256 | ||
| 3257 | return notify_flags != 0; | |
| 3258 | } | |
| 3259 | ||
| 3260 | int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) { | |
| 3261 | int r = 0, s = 0; | |
| 3262 | ||
| 3263 | assert(u); | |
| 3264 | assert(d >= 0 || e >= 0); | |
| 3265 | ||
| 3266 | if (d >= 0) { | |
| 3267 | r = unit_add_dependency(u, d, other, add_reference, mask); | |
| 3268 | if (r < 0) | |
| 3269 | return r; | |
| 3270 | } | |
| 3271 | ||
| 3272 | if (e >= 0) { | |
| 3273 | s = unit_add_dependency(u, e, other, add_reference, mask); | |
| 3274 | if (s < 0) | |
| 3275 | return s; | |
| 3276 | } | |
| 3277 | ||
| 3278 | return r > 0 || s > 0; | |
| 3279 | } | |
| 3280 | ||
| 3281 | static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) { | |
| 3282 | int r; | |
| 3283 | ||
| 3284 | assert(u); | |
| 3285 | assert(name); | |
| 3286 | assert(buf); | |
| 3287 | assert(ret); | |
| 3288 | ||
| 3289 | if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) { | |
| 3290 | *buf = NULL; | |
| 3291 | *ret = name; | |
| 3292 | return 0; | |
| 3293 | } | |
| 3294 | ||
| 3295 | if (u->instance) | |
| 3296 | r = unit_name_replace_instance(name, u->instance, buf); | |
| 3297 | else { | |
| 3298 | _cleanup_free_ char *i = NULL; | |
| 3299 | ||
| 3300 | r = unit_name_to_prefix(u->id, &i); | |
| 3301 | if (r < 0) | |
| 3302 | return r; | |
| 3303 | ||
| 3304 | r = unit_name_replace_instance(name, i, buf); | |
| 3305 | } | |
| 3306 | if (r < 0) | |
| 3307 | return r; | |
| 3308 | ||
| 3309 | *ret = *buf; | |
| 3310 | return 0; | |
| 3311 | } | |
| 3312 | ||
| 3313 | int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) { | |
| 3314 | _cleanup_free_ char *buf = NULL; | |
| 3315 | Unit *other; | |
| 3316 | int r; | |
| 3317 | ||
| 3318 | assert(u); | |
| 3319 | assert(name); | |
| 3320 | ||
| 3321 | r = resolve_template(u, name, &buf, &name); | |
| 3322 | if (r < 0) | |
| 3323 | return r; | |
| 3324 | ||
| 3325 | if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES)) | |
| 3326 | return 0; | |
| 3327 | ||
| 3328 | r = manager_load_unit(u->manager, name, NULL, NULL, &other); | |
| 3329 | if (r < 0) | |
| 3330 | return r; | |
| 3331 | ||
| 3332 | return unit_add_dependency(u, d, other, add_reference, mask); | |
| 3333 | } | |
| 3334 | ||
| 3335 | int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) { | |
| 3336 | _cleanup_free_ char *buf = NULL; | |
| 3337 | Unit *other; | |
| 3338 | int r; | |
| 3339 | ||
| 3340 | assert(u); | |
| 3341 | assert(name); | |
| 3342 | ||
| 3343 | r = resolve_template(u, name, &buf, &name); | |
| 3344 | if (r < 0) | |
| 3345 | return r; | |
| 3346 | ||
| 3347 | if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES)) | |
| 3348 | return 0; | |
| 3349 | ||
| 3350 | r = manager_load_unit(u->manager, name, NULL, NULL, &other); | |
| 3351 | if (r < 0) | |
| 3352 | return r; | |
| 3353 | ||
| 3354 | return unit_add_two_dependencies(u, d, e, other, add_reference, mask); | |
| 3355 | } | |
| 3356 | ||
| 3357 | int setenv_unit_path(const char *p) { | |
| 3358 | assert(p); | |
| 3359 | ||
| 3360 | /* This is mostly for debug purposes */ | |
| 3361 | return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, /* overwrite = */ true)); | |
| 3362 | } | |
| 3363 | ||
| 3364 | char* unit_dbus_path(Unit *u) { | |
| 3365 | assert(u); | |
| 3366 | ||
| 3367 | if (!u->id) | |
| 3368 | return NULL; | |
| 3369 | ||
| 3370 | return unit_dbus_path_from_name(u->id); | |
| 3371 | } | |
| 3372 | ||
| 3373 | char* unit_dbus_path_invocation_id(Unit *u) { | |
| 3374 | assert(u); | |
| 3375 | ||
| 3376 | if (sd_id128_is_null(u->invocation_id)) | |
| 3377 | return NULL; | |
| 3378 | ||
| 3379 | return unit_dbus_path_from_name(u->invocation_id_string); | |
| 3380 | } | |
| 3381 | ||
| 3382 | int unit_set_invocation_id(Unit *u, sd_id128_t id) { | |
| 3383 | int r; | |
| 3384 | ||
| 3385 | assert(u); | |
| 3386 | ||
| 3387 | /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */ | |
| 3388 | ||
| 3389 | if (sd_id128_equal(u->invocation_id, id)) | |
| 3390 | return 0; | |
| 3391 | ||
| 3392 | if (!sd_id128_is_null(u->invocation_id)) | |
| 3393 | (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u); | |
| 3394 | ||
| 3395 | if (sd_id128_is_null(id)) { | |
| 3396 | r = 0; | |
| 3397 | goto reset; | |
| 3398 | } | |
| 3399 | ||
| 3400 | r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops); | |
| 3401 | if (r < 0) | |
| 3402 | goto reset; | |
| 3403 | ||
| 3404 | u->invocation_id = id; | |
| 3405 | sd_id128_to_string(id, u->invocation_id_string); | |
| 3406 | ||
| 3407 | r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u); | |
| 3408 | if (r < 0) | |
| 3409 | goto reset; | |
| 3410 | ||
| 3411 | return 0; | |
| 3412 | ||
| 3413 | reset: | |
| 3414 | u->invocation_id = SD_ID128_NULL; | |
| 3415 | u->invocation_id_string[0] = 0; | |
| 3416 | return r; | |
| 3417 | } | |
| 3418 | ||
| 3419 | int unit_set_slice(Unit *u, Unit *slice) { | |
| 3420 | int r; | |
| 3421 | ||
| 3422 | assert(u); | |
| 3423 | assert(slice); | |
| 3424 | ||
| 3425 | /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units | |
| 3426 | * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent | |
| 3427 | * slice is derived from the name). Make sure the unit we set is actually a slice. */ | |
| 3428 | ||
| 3429 | if (!UNIT_HAS_CGROUP_CONTEXT(u)) | |
| 3430 | return -EOPNOTSUPP; | |
| 3431 | ||
| 3432 | if (u->type == UNIT_SLICE) | |
| 3433 | return -EINVAL; | |
| 3434 | ||
| 3435 | if (unit_active_state(u) != UNIT_INACTIVE) | |
| 3436 | return -EBUSY; | |
| 3437 | ||
| 3438 | if (slice->type != UNIT_SLICE) | |
| 3439 | return -EINVAL; | |
| 3440 | ||
| 3441 | if (unit_has_name(u, SPECIAL_INIT_SCOPE) && | |
| 3442 | !unit_has_name(slice, SPECIAL_ROOT_SLICE)) | |
| 3443 | return -EPERM; | |
| 3444 | ||
| 3445 | if (UNIT_GET_SLICE(u) == slice) | |
| 3446 | return 0; | |
| 3447 | ||
| 3448 | /* Disallow slice changes if @u is already bound to cgroups */ | |
| 3449 | if (UNIT_GET_SLICE(u)) { | |
| 3450 | CGroupRuntime *crt = unit_get_cgroup_runtime(u); | |
| 3451 | if (crt && crt->cgroup_path) | |
| 3452 | return -EBUSY; | |
| 3453 | } | |
| 3454 | ||
| 3455 | /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */ | |
| 3456 | if (UNIT_GET_SLICE(u)) | |
| 3457 | unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY); | |
| 3458 | ||
| 3459 | r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY); | |
| 3460 | if (r < 0) | |
| 3461 | return r; | |
| 3462 | ||
| 3463 | return 1; | |
| 3464 | } | |
| 3465 | ||
| 3466 | int unit_set_default_slice(Unit *u) { | |
| 3467 | const char *slice_name; | |
| 3468 | Unit *slice; | |
| 3469 | int r; | |
| 3470 | ||
| 3471 | assert(u); | |
| 3472 | ||
| 3473 | if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES)) | |
| 3474 | return 0; | |
| 3475 | ||
| 3476 | if (UNIT_GET_SLICE(u)) | |
| 3477 | return 0; | |
| 3478 | ||
| 3479 | if (u->instance) { | |
| 3480 | _cleanup_free_ char *prefix = NULL, *escaped = NULL; | |
| 3481 | ||
| 3482 | /* Implicitly place all instantiated units in their | |
| 3483 | * own per-template slice */ | |
| 3484 | ||
| 3485 | r = unit_name_to_prefix(u->id, &prefix); | |
| 3486 | if (r < 0) | |
| 3487 | return r; | |
| 3488 | ||
| 3489 | /* The prefix is already escaped, but it might include | |
| 3490 | * "-" which has a special meaning for slice units, | |
| 3491 | * hence escape it here extra. */ | |
| 3492 | escaped = unit_name_escape(prefix); | |
| 3493 | if (!escaped) | |
| 3494 | return -ENOMEM; | |
| 3495 | ||
| 3496 | if (MANAGER_IS_SYSTEM(u->manager)) | |
| 3497 | slice_name = strjoina("system-", escaped, ".slice"); | |
| 3498 | else | |
| 3499 | slice_name = strjoina("app-", escaped, ".slice"); | |
| 3500 | ||
| 3501 | } else if (unit_is_extrinsic(u)) | |
| 3502 | /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in | |
| 3503 | * the root slice. They don't really belong in one of the subslices. */ | |
| 3504 | slice_name = SPECIAL_ROOT_SLICE; | |
| 3505 | ||
| 3506 | else if (MANAGER_IS_SYSTEM(u->manager)) | |
| 3507 | slice_name = SPECIAL_SYSTEM_SLICE; | |
| 3508 | else | |
| 3509 | slice_name = SPECIAL_APP_SLICE; | |
| 3510 | ||
| 3511 | r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice); | |
| 3512 | if (r < 0) | |
| 3513 | return r; | |
| 3514 | ||
| 3515 | return unit_set_slice(u, slice); | |
| 3516 | } | |
| 3517 | ||
| 3518 | const char* unit_slice_name(Unit *u) { | |
| 3519 | Unit *slice; | |
| 3520 | assert(u); | |
| 3521 | ||
| 3522 | slice = UNIT_GET_SLICE(u); | |
| 3523 | if (!slice) | |
| 3524 | return NULL; | |
| 3525 | ||
| 3526 | return slice->id; | |
| 3527 | } | |
| 3528 | ||
| 3529 | int unit_load_related_unit(Unit *u, const char *type, Unit **_found) { | |
| 3530 | _cleanup_free_ char *t = NULL; | |
| 3531 | int r; | |
| 3532 | ||
| 3533 | assert(u); | |
| 3534 | assert(type); | |
| 3535 | assert(_found); | |
| 3536 | ||
| 3537 | r = unit_name_change_suffix(u->id, type, &t); | |
| 3538 | if (r < 0) | |
| 3539 | return r; | |
| 3540 | if (unit_has_name(u, t)) | |
| 3541 | return -EINVAL; | |
| 3542 | ||
| 3543 | r = manager_load_unit(u->manager, t, NULL, NULL, _found); | |
| 3544 | assert(r < 0 || *_found != u); | |
| 3545 | return r; | |
| 3546 | } | |
| 3547 | ||
| 3548 | static int signal_name_owner_changed_install_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) { | |
| 3549 | Unit *u = ASSERT_PTR(userdata); | |
| 3550 | const sd_bus_error *e; | |
| 3551 | int r; | |
| 3552 | ||
| 3553 | e = sd_bus_message_get_error(message); | |
| 3554 | if (!e) { | |
| 3555 | log_unit_trace(u, "Successfully installed NameOwnerChanged signal match."); | |
| 3556 | return 0; | |
| 3557 | } | |
| 3558 | ||
| 3559 | r = sd_bus_error_get_errno(e); | |
| 3560 | log_unit_error_errno(u, r, | |
| 3561 | "Unexpected error response on installing NameOwnerChanged signal match: %s", | |
| 3562 | bus_error_message(e, r)); | |
| 3563 | ||
| 3564 | /* If we failed to install NameOwnerChanged signal, also unref the bus slot of GetNameOwner(). */ | |
| 3565 | u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot); | |
| 3566 | u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot); | |
| 3567 | ||
| 3568 | if (UNIT_VTABLE(u)->bus_name_owner_change) | |
| 3569 | UNIT_VTABLE(u)->bus_name_owner_change(u, NULL); | |
| 3570 | ||
| 3571 | return 0; | |
| 3572 | } | |
| 3573 | ||
| 3574 | static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) { | |
| 3575 | const char *new_owner; | |
| 3576 | Unit *u = ASSERT_PTR(userdata); | |
| 3577 | int r; | |
| 3578 | ||
| 3579 | assert(message); | |
| 3580 | ||
| 3581 | r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner); | |
| 3582 | if (r < 0) { | |
| 3583 | bus_log_parse_error(r); | |
| 3584 | return 0; | |
| 3585 | } | |
| 3586 | ||
| 3587 | if (UNIT_VTABLE(u)->bus_name_owner_change) | |
| 3588 | UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner)); | |
| 3589 | ||
| 3590 | return 0; | |
| 3591 | } | |
| 3592 | ||
| 3593 | static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) { | |
| 3594 | const sd_bus_error *e; | |
| 3595 | const char *new_owner; | |
| 3596 | Unit *u = ASSERT_PTR(userdata); | |
| 3597 | int r; | |
| 3598 | ||
| 3599 | assert(message); | |
| 3600 | ||
| 3601 | u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot); | |
| 3602 | ||
| 3603 | e = sd_bus_message_get_error(message); | |
| 3604 | if (e) { | |
| 3605 | if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) { | |
| 3606 | r = sd_bus_error_get_errno(e); | |
| 3607 | log_unit_error_errno(u, r, | |
| 3608 | "Unexpected error response from GetNameOwner(): %s", | |
| 3609 | bus_error_message(e, r)); | |
| 3610 | } | |
| 3611 | ||
| 3612 | new_owner = NULL; | |
| 3613 | } else { | |
| 3614 | r = sd_bus_message_read(message, "s", &new_owner); | |
| 3615 | if (r < 0) | |
| 3616 | return bus_log_parse_error(r); | |
| 3617 | ||
| 3618 | assert(!isempty(new_owner)); | |
| 3619 | } | |
| 3620 | ||
| 3621 | if (UNIT_VTABLE(u)->bus_name_owner_change) | |
| 3622 | UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner); | |
| 3623 | ||
| 3624 | return 0; | |
| 3625 | } | |
| 3626 | ||
| 3627 | int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) { | |
| 3628 | _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL; | |
| 3629 | const char *match; | |
| 3630 | usec_t timeout_usec = 0; | |
| 3631 | int r; | |
| 3632 | ||
| 3633 | assert(u); | |
| 3634 | assert(bus); | |
| 3635 | assert(name); | |
| 3636 | ||
| 3637 | if (u->match_bus_slot || u->get_name_owner_slot) | |
| 3638 | return -EBUSY; | |
| 3639 | ||
| 3640 | /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus | |
| 3641 | * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default | |
| 3642 | * value defined above. */ | |
| 3643 | if (UNIT_VTABLE(u)->get_timeout_start_usec) | |
| 3644 | timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u); | |
| 3645 | ||
| 3646 | match = strjoina("type='signal'," | |
| 3647 | "sender='org.freedesktop.DBus'," | |
| 3648 | "path='/org/freedesktop/DBus'," | |
| 3649 | "interface='org.freedesktop.DBus'," | |
| 3650 | "member='NameOwnerChanged'," | |
| 3651 | "arg0='", name, "'"); | |
| 3652 | ||
| 3653 | r = bus_add_match_full( | |
| 3654 | bus, | |
| 3655 | &u->match_bus_slot, | |
| 3656 | /* asynchronous = */ true, | |
| 3657 | match, | |
| 3658 | signal_name_owner_changed, | |
| 3659 | signal_name_owner_changed_install_handler, | |
| 3660 | u, | |
| 3661 | timeout_usec); | |
| 3662 | if (r < 0) | |
| 3663 | return r; | |
| 3664 | ||
| 3665 | r = sd_bus_message_new_method_call( | |
| 3666 | bus, | |
| 3667 | &m, | |
| 3668 | "org.freedesktop.DBus", | |
| 3669 | "/org/freedesktop/DBus", | |
| 3670 | "org.freedesktop.DBus", | |
| 3671 | "GetNameOwner"); | |
| 3672 | if (r < 0) | |
| 3673 | return r; | |
| 3674 | ||
| 3675 | r = sd_bus_message_append(m, "s", name); | |
| 3676 | if (r < 0) | |
| 3677 | return r; | |
| 3678 | ||
| 3679 | r = sd_bus_call_async( | |
| 3680 | bus, | |
| 3681 | &u->get_name_owner_slot, | |
| 3682 | m, | |
| 3683 | get_name_owner_handler, | |
| 3684 | u, | |
| 3685 | timeout_usec); | |
| 3686 | if (r < 0) { | |
| 3687 | u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot); | |
| 3688 | return r; | |
| 3689 | } | |
| 3690 | ||
| 3691 | log_unit_debug(u, "Watching D-Bus name '%s'.", name); | |
| 3692 | return 0; | |
| 3693 | } | |
| 3694 | ||
| 3695 | int unit_watch_bus_name(Unit *u, const char *name) { | |
| 3696 | int r; | |
| 3697 | ||
| 3698 | assert(u); | |
| 3699 | assert(name); | |
| 3700 | ||
| 3701 | /* Watch a specific name on the bus. We only support one unit | |
| 3702 | * watching each name for now. */ | |
| 3703 | ||
| 3704 | if (u->manager->api_bus) { | |
| 3705 | /* If the bus is already available, install the match directly. | |
| 3706 | * Otherwise, just put the name in the list. bus_setup_api() will take care later. */ | |
| 3707 | r = unit_install_bus_match(u, u->manager->api_bus, name); | |
| 3708 | if (r < 0) | |
| 3709 | return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name); | |
| 3710 | } | |
| 3711 | ||
| 3712 | r = hashmap_put(u->manager->watch_bus, name, u); | |
| 3713 | if (r < 0) { | |
| 3714 | u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot); | |
| 3715 | u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot); | |
| 3716 | return log_warning_errno(r, "Failed to put bus name to hashmap: %m"); | |
| 3717 | } | |
| 3718 | ||
| 3719 | return 0; | |
| 3720 | } | |
| 3721 | ||
| 3722 | void unit_unwatch_bus_name(Unit *u, const char *name) { | |
| 3723 | assert(u); | |
| 3724 | assert(name); | |
| 3725 | ||
| 3726 | (void) hashmap_remove_value(u->manager->watch_bus, name, u); | |
| 3727 | u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot); | |
| 3728 | u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot); | |
| 3729 | } | |
| 3730 | ||
| 3731 | int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) { | |
| 3732 | _cleanup_free_ char *e = NULL; | |
| 3733 | Unit *device; | |
| 3734 | int r; | |
| 3735 | ||
| 3736 | assert(u); | |
| 3737 | ||
| 3738 | /* Adds in links to the device node that this unit is based on */ | |
| 3739 | if (isempty(what)) | |
| 3740 | return 0; | |
| 3741 | ||
| 3742 | if (!is_device_path(what)) | |
| 3743 | return 0; | |
| 3744 | ||
| 3745 | /* When device units aren't supported (such as in a container), don't create dependencies on them. */ | |
| 3746 | if (!unit_type_supported(UNIT_DEVICE)) | |
| 3747 | return 0; | |
| 3748 | ||
| 3749 | r = unit_name_from_path(what, ".device", &e); | |
| 3750 | if (r < 0) | |
| 3751 | return r; | |
| 3752 | ||
| 3753 | r = manager_load_unit(u->manager, e, NULL, NULL, &device); | |
| 3754 | if (r < 0) | |
| 3755 | return r; | |
| 3756 | ||
| 3757 | if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u)) | |
| 3758 | dep = UNIT_BINDS_TO; | |
| 3759 | ||
| 3760 | return unit_add_two_dependencies(u, UNIT_AFTER, | |
| 3761 | MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS, | |
| 3762 | device, true, mask); | |
| 3763 | } | |
| 3764 | ||
| 3765 | int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) { | |
| 3766 | _cleanup_free_ char *escaped = NULL, *target = NULL; | |
| 3767 | int r; | |
| 3768 | ||
| 3769 | assert(u); | |
| 3770 | ||
| 3771 | if (isempty(what)) | |
| 3772 | return 0; | |
| 3773 | ||
| 3774 | if (!path_startswith(what, "/dev/")) | |
| 3775 | return 0; | |
| 3776 | ||
| 3777 | /* If we don't support devices, then also don't bother with blockdev@.target */ | |
| 3778 | if (!unit_type_supported(UNIT_DEVICE)) | |
| 3779 | return 0; | |
| 3780 | ||
| 3781 | r = unit_name_path_escape(what, &escaped); | |
| 3782 | if (r < 0) | |
| 3783 | return r; | |
| 3784 | ||
| 3785 | r = unit_name_build("blockdev", escaped, ".target", &target); | |
| 3786 | if (r < 0) | |
| 3787 | return r; | |
| 3788 | ||
| 3789 | return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask); | |
| 3790 | } | |
| 3791 | ||
| 3792 | int unit_coldplug(Unit *u) { | |
| 3793 | int r = 0; | |
| 3794 | ||
| 3795 | assert(u); | |
| 3796 | ||
| 3797 | /* Make sure we don't enter a loop, when coldplugging recursively. */ | |
| 3798 | if (u->coldplugged) | |
| 3799 | return 0; | |
| 3800 | ||
| 3801 | u->coldplugged = true; | |
| 3802 | ||
| 3803 | STRV_FOREACH(i, u->deserialized_refs) | |
| 3804 | RET_GATHER(r, bus_unit_track_add_name(u, *i)); | |
| 3805 | ||
| 3806 | u->deserialized_refs = strv_free(u->deserialized_refs); | |
| 3807 | ||
| 3808 | if (UNIT_VTABLE(u)->coldplug) | |
| 3809 | RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u)); | |
| 3810 | ||
| 3811 | if (u->job) | |
| 3812 | RET_GATHER(r, job_coldplug(u->job)); | |
| 3813 | if (u->nop_job) | |
| 3814 | RET_GATHER(r, job_coldplug(u->nop_job)); | |
| 3815 | ||
| 3816 | unit_modify_nft_set(u, /* add = */ true); | |
| 3817 | return r; | |
| 3818 | } | |
| 3819 | ||
| 3820 | void unit_catchup(Unit *u) { | |
| 3821 | assert(u); | |
| 3822 | ||
| 3823 | if (UNIT_VTABLE(u)->catchup) | |
| 3824 | UNIT_VTABLE(u)->catchup(u); | |
| 3825 | ||
| 3826 | unit_cgroup_catchup(u); | |
| 3827 | } | |
| 3828 | ||
| 3829 | static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) { | |
| 3830 | struct stat st; | |
| 3831 | ||
| 3832 | if (!path) | |
| 3833 | return false; | |
| 3834 | ||
| 3835 | /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we | |
| 3836 | * are never out-of-date. */ | |
| 3837 | if (PATH_STARTSWITH_SET(path, "/proc", "/sys")) | |
| 3838 | return false; | |
| 3839 | ||
| 3840 | if (stat(path, &st) < 0) | |
| 3841 | /* What, cannot access this anymore? */ | |
| 3842 | return true; | |
| 3843 | ||
| 3844 | if (path_masked) | |
| 3845 | /* For masked files check if they are still so */ | |
| 3846 | return !null_or_empty(&st); | |
| 3847 | else | |
| 3848 | /* For non-empty files check the mtime */ | |
| 3849 | return timespec_load(&st.st_mtim) > mtime; | |
| 3850 | ||
| 3851 | return false; | |
| 3852 | } | |
| 3853 | ||
| 3854 | bool unit_need_daemon_reload(Unit *u) { | |
| 3855 | assert(u); | |
| 3856 | assert(u->manager); | |
| 3857 | ||
| 3858 | if (u->manager->unit_file_state_outdated) | |
| 3859 | return true; | |
| 3860 | ||
| 3861 | /* For unit files, we allow masking… */ | |
| 3862 | if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime, | |
| 3863 | u->load_state == UNIT_MASKED)) | |
| 3864 | return true; | |
| 3865 | ||
| 3866 | /* Source paths should not be masked… */ | |
| 3867 | if (fragment_mtime_newer(u->source_path, u->source_mtime, false)) | |
| 3868 | return true; | |
| 3869 | ||
| 3870 | if (u->load_state == UNIT_LOADED) { | |
| 3871 | _cleanup_strv_free_ char **dropins = NULL; | |
| 3872 | ||
| 3873 | (void) unit_find_dropin_paths(u, /* use_unit_path_cache = */ false, &dropins); | |
| 3874 | ||
| 3875 | if (!strv_equal(u->dropin_paths, dropins)) | |
| 3876 | return true; | |
| 3877 | ||
| 3878 | /* … any drop-ins that are masked are simply omitted from the list. */ | |
| 3879 | STRV_FOREACH(path, u->dropin_paths) | |
| 3880 | if (fragment_mtime_newer(*path, u->dropin_mtime, false)) | |
| 3881 | return true; | |
| 3882 | } | |
| 3883 | ||
| 3884 | return false; | |
| 3885 | } | |
| 3886 | ||
| 3887 | void unit_reset_failed(Unit *u) { | |
| 3888 | assert(u); | |
| 3889 | ||
| 3890 | if (UNIT_VTABLE(u)->reset_failed) | |
| 3891 | UNIT_VTABLE(u)->reset_failed(u); | |
| 3892 | ||
| 3893 | ratelimit_reset(&u->start_ratelimit); | |
| 3894 | u->start_limit_hit = false; | |
| 3895 | ||
| 3896 | (void) unit_set_debug_invocation(u, /* enable= */ false); | |
| 3897 | } | |
| 3898 | ||
| 3899 | Unit *unit_following(Unit *u) { | |
| 3900 | assert(u); | |
| 3901 | ||
| 3902 | if (UNIT_VTABLE(u)->following) | |
| 3903 | return UNIT_VTABLE(u)->following(u); | |
| 3904 | ||
| 3905 | return NULL; | |
| 3906 | } | |
| 3907 | ||
| 3908 | bool unit_stop_pending(Unit *u) { | |
| 3909 | assert(u); | |
| 3910 | ||
| 3911 | /* This call does check the current state of the unit. It's | |
| 3912 | * hence useful to be called from state change calls of the | |
| 3913 | * unit itself, where the state isn't updated yet. This is | |
| 3914 | * different from unit_inactive_or_pending() which checks both | |
| 3915 | * the current state and for a queued job. */ | |
| 3916 | ||
| 3917 | return unit_has_job_type(u, JOB_STOP); | |
| 3918 | } | |
| 3919 | ||
| 3920 | bool unit_inactive_or_pending(Unit *u) { | |
| 3921 | assert(u); | |
| 3922 | ||
| 3923 | /* Returns true if the unit is inactive or going down */ | |
| 3924 | ||
| 3925 | if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u))) | |
| 3926 | return true; | |
| 3927 | ||
| 3928 | if (unit_stop_pending(u)) | |
| 3929 | return true; | |
| 3930 | ||
| 3931 | return false; | |
| 3932 | } | |
| 3933 | ||
| 3934 | bool unit_active_or_pending(Unit *u) { | |
| 3935 | assert(u); | |
| 3936 | ||
| 3937 | /* Returns true if the unit is active or going up */ | |
| 3938 | ||
| 3939 | if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) | |
| 3940 | return true; | |
| 3941 | ||
| 3942 | if (u->job && | |
| 3943 | IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART)) | |
| 3944 | return true; | |
| 3945 | ||
| 3946 | return false; | |
| 3947 | } | |
| 3948 | ||
| 3949 | bool unit_will_restart_default(Unit *u) { | |
| 3950 | assert(u); | |
| 3951 | ||
| 3952 | return unit_has_job_type(u, JOB_START); | |
| 3953 | } | |
| 3954 | ||
| 3955 | bool unit_will_restart(Unit *u) { | |
| 3956 | assert(u); | |
| 3957 | ||
| 3958 | if (!UNIT_VTABLE(u)->will_restart) | |
| 3959 | return false; | |
| 3960 | ||
| 3961 | return UNIT_VTABLE(u)->will_restart(u); | |
| 3962 | } | |
| 3963 | ||
| 3964 | void unit_notify_cgroup_oom(Unit *u, bool managed_oom) { | |
| 3965 | assert(u); | |
| 3966 | ||
| 3967 | if (UNIT_VTABLE(u)->notify_cgroup_oom) | |
| 3968 | UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom); | |
| 3969 | } | |
| 3970 | ||
| 3971 | static int unit_pid_set(Unit *u, Set **pid_set) { | |
| 3972 | int r; | |
| 3973 | ||
| 3974 | assert(u); | |
| 3975 | assert(pid_set); | |
| 3976 | ||
| 3977 | set_clear(*pid_set); /* This updates input. */ | |
| 3978 | ||
| 3979 | /* Exclude the main/control pids from being killed via the cgroup */ | |
| 3980 | ||
| 3981 | PidRef *pid; | |
| 3982 | FOREACH_ARGUMENT(pid, unit_main_pid(u), unit_control_pid(u)) | |
| 3983 | if (pidref_is_set(pid)) { | |
| 3984 | r = set_ensure_put(pid_set, NULL, PID_TO_PTR(pid->pid)); | |
| 3985 | if (r < 0) | |
| 3986 | return r; | |
| 3987 | } | |
| 3988 | ||
| 3989 | return 0; | |
| 3990 | } | |
| 3991 | ||
| 3992 | static int kill_common_log(const PidRef *pid, int signo, void *userdata) { | |
| 3993 | _cleanup_free_ char *comm = NULL; | |
| 3994 | Unit *u = ASSERT_PTR(userdata); | |
| 3995 | ||
| 3996 | (void) pidref_get_comm(pid, &comm); | |
| 3997 | ||
| 3998 | log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.", | |
| 3999 | signal_to_string(signo), pid->pid, strna(comm)); | |
| 4000 | ||
| 4001 | return 1; | |
| 4002 | } | |
| 4003 | ||
| 4004 | static int kill_or_sigqueue(PidRef *pidref, int signo, int code, int value) { | |
| 4005 | assert(pidref_is_set(pidref)); | |
| 4006 | assert(SIGNAL_VALID(signo)); | |
| 4007 | ||
| 4008 | switch (code) { | |
| 4009 | ||
| 4010 | case SI_USER: | |
| 4011 | log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo)); | |
| 4012 | return pidref_kill(pidref, signo); | |
| 4013 | ||
| 4014 | case SI_QUEUE: | |
| 4015 | log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo)); | |
| 4016 | return pidref_sigqueue(pidref, signo, value); | |
| 4017 | ||
| 4018 | default: | |
| 4019 | assert_not_reached(); | |
| 4020 | } | |
| 4021 | } | |
| 4022 | ||
| 4023 | static int unit_kill_one( | |
| 4024 | Unit *u, | |
| 4025 | PidRef *pidref, | |
| 4026 | const char *type, | |
| 4027 | int signo, | |
| 4028 | int code, | |
| 4029 | int value, | |
| 4030 | sd_bus_error *ret_error) { | |
| 4031 | ||
| 4032 | int r; | |
| 4033 | ||
| 4034 | assert(u); | |
| 4035 | assert(type); | |
| 4036 | ||
| 4037 | if (!pidref_is_set(pidref)) | |
| 4038 | return 0; | |
| 4039 | ||
| 4040 | _cleanup_free_ char *comm = NULL; | |
| 4041 | (void) pidref_get_comm(pidref, &comm); | |
| 4042 | ||
| 4043 | r = kill_or_sigqueue(pidref, signo, code, value); | |
| 4044 | if (r == -ESRCH) | |
| 4045 | return 0; | |
| 4046 | if (r < 0) { | |
| 4047 | /* Report this failure both to the logs and to the client */ | |
| 4048 | if (ret_error) | |
| 4049 | sd_bus_error_set_errnof( | |
| 4050 | ret_error, r, | |
| 4051 | "Failed to send signal SIG%s to %s process " PID_FMT " (%s): %m", | |
| 4052 | signal_to_string(signo), type, pidref->pid, strna(comm)); | |
| 4053 | ||
| 4054 | return log_unit_warning_errno( | |
| 4055 | u, r, | |
| 4056 | "Failed to send signal SIG%s to %s process " PID_FMT " (%s) on client request: %m", | |
| 4057 | signal_to_string(signo), type, pidref->pid, strna(comm)); | |
| 4058 | } | |
| 4059 | ||
| 4060 | log_unit_info(u, "Sent signal SIG%s to %s process " PID_FMT " (%s) on client request.", | |
| 4061 | signal_to_string(signo), type, pidref->pid, strna(comm)); | |
| 4062 | return 1; /* killed */ | |
| 4063 | } | |
| 4064 | ||
| 4065 | int unit_kill( | |
| 4066 | Unit *u, | |
| 4067 | KillWhom whom, | |
| 4068 | const char *subgroup, | |
| 4069 | int signo, | |
| 4070 | int code, | |
| 4071 | int value, | |
| 4072 | sd_bus_error *ret_error) { | |
| 4073 | ||
| 4074 | PidRef *main_pid, *control_pid; | |
| 4075 | bool killed = false; | |
| 4076 | int ret = 0, r; | |
| 4077 | ||
| 4078 | /* This is the common implementation for explicit user-requested killing of unit processes, shared by | |
| 4079 | * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to | |
| 4080 | * stop a service ourselves. */ | |
| 4081 | ||
| 4082 | assert(u); | |
| 4083 | assert(whom >= 0); | |
| 4084 | assert(whom < _KILL_WHOM_MAX); | |
| 4085 | assert(SIGNAL_VALID(signo)); | |
| 4086 | assert(IN_SET(code, SI_USER, SI_QUEUE)); | |
| 4087 | ||
| 4088 | if (subgroup) { | |
| 4089 | if (!IN_SET(whom, KILL_CGROUP, KILL_CGROUP_FAIL)) | |
| 4090 | return sd_bus_error_set(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, | |
| 4091 | "Killing by subgroup is only supported for 'cgroup' or 'cgroup-kill' modes."); | |
| 4092 | ||
| 4093 | if (!unit_cgroup_delegate(u)) | |
| 4094 | return sd_bus_error_set(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, | |
| 4095 | "Killing by subgroup is only available for units with control group delegation enabled."); | |
| 4096 | } | |
| 4097 | ||
| 4098 | main_pid = unit_main_pid(u); | |
| 4099 | control_pid = unit_control_pid(u); | |
| 4100 | ||
| 4101 | if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid) | |
| 4102 | return sd_bus_error_set(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing."); | |
| 4103 | ||
| 4104 | if (IN_SET(whom, KILL_MAIN, KILL_MAIN_FAIL)) { | |
| 4105 | if (!main_pid) | |
| 4106 | return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type)); | |
| 4107 | if (!pidref_is_set(main_pid)) | |
| 4108 | return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill"); | |
| 4109 | } | |
| 4110 | ||
| 4111 | if (IN_SET(whom, KILL_CONTROL, KILL_CONTROL_FAIL)) { | |
| 4112 | if (!control_pid) | |
| 4113 | return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type)); | |
| 4114 | if (!pidref_is_set(control_pid)) | |
| 4115 | return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill"); | |
| 4116 | } | |
| 4117 | ||
| 4118 | if (IN_SET(whom, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) { | |
| 4119 | r = unit_kill_one(u, control_pid, "control", signo, code, value, ret_error); | |
| 4120 | RET_GATHER(ret, r); | |
| 4121 | killed = killed || r > 0; | |
| 4122 | } | |
| 4123 | ||
| 4124 | if (IN_SET(whom, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) { | |
| 4125 | r = unit_kill_one(u, main_pid, "main", signo, code, value, ret >= 0 ? ret_error : NULL); | |
| 4126 | RET_GATHER(ret, r); | |
| 4127 | killed = killed || r > 0; | |
| 4128 | } | |
| 4129 | ||
| 4130 | /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it | |
| 4131 | * doesn't really make much sense (and given that enqueued values are a relatively expensive | |
| 4132 | * resource, and we shouldn't allow us to be subjects for such allocation sprees) */ | |
| 4133 | if (IN_SET(whom, KILL_ALL, KILL_ALL_FAIL, KILL_CGROUP, KILL_CGROUP_FAIL) && code == SI_USER) { | |
| 4134 | CGroupRuntime *crt = unit_get_cgroup_runtime(u); | |
| 4135 | if (crt && crt->cgroup_path) { | |
| 4136 | _cleanup_set_free_ Set *pid_set = NULL; | |
| 4137 | _cleanup_free_ char *joined = NULL; | |
| 4138 | const char *p; | |
| 4139 | ||
| 4140 | if (empty_or_root(subgroup)) | |
| 4141 | p = crt->cgroup_path; | |
| 4142 | else { | |
| 4143 | joined = path_join(crt->cgroup_path, subgroup); | |
| 4144 | if (!joined) | |
| 4145 | return -ENOMEM; | |
| 4146 | ||
| 4147 | p = joined; | |
| 4148 | } | |
| 4149 | ||
| 4150 | if (signo == SIGKILL) { | |
| 4151 | r = cg_kill_kernel_sigkill(p); | |
| 4152 | if (r >= 0) { | |
| 4153 | killed = true; | |
| 4154 | log_unit_info(u, "Killed unit cgroup '%s' with SIGKILL on client request.", p); | |
| 4155 | goto finish; | |
| 4156 | } | |
| 4157 | if (r != -EOPNOTSUPP) { | |
| 4158 | if (ret >= 0) | |
| 4159 | sd_bus_error_set_errnof(ret_error, r, | |
| 4160 | "Failed to kill unit cgroup: %m"); | |
| 4161 | RET_GATHER(ret, log_unit_warning_errno(u, r, "Failed to kill unit cgroup '%s': %m", p)); | |
| 4162 | goto finish; | |
| 4163 | } | |
| 4164 | /* Fall back to manual enumeration */ | |
| 4165 | } else if (IN_SET(whom, KILL_ALL, KILL_ALL_FAIL)) { | |
| 4166 | /* Exclude the main/control pids from being killed via the cgroup if not | |
| 4167 | * SIGKILL */ | |
| 4168 | r = unit_pid_set(u, &pid_set); | |
| 4169 | if (r < 0) | |
| 4170 | return log_oom(); | |
| 4171 | } | |
| 4172 | ||
| 4173 | r = cg_kill_recursive(p, signo, /* flags= */ 0, pid_set, kill_common_log, u); | |
| 4174 | if (r < 0 && !IN_SET(r, -ESRCH, -ENOENT)) { | |
| 4175 | if (ret >= 0) | |
| 4176 | sd_bus_error_set_errnof( | |
| 4177 | ret_error, r, | |
| 4178 | "Failed to send signal SIG%s to processes in unit cgroup '%s': %m", | |
| 4179 | signal_to_string(signo), p); | |
| 4180 | ||
| 4181 | RET_GATHER(ret, log_unit_warning_errno( | |
| 4182 | u, r, | |
| 4183 | "Failed to send signal SIG%s to processes in unit cgroup '%s' on client request: %m", | |
| 4184 | signal_to_string(signo), p)); | |
| 4185 | } | |
| 4186 | killed = killed || r > 0; | |
| 4187 | } | |
| 4188 | } | |
| 4189 | ||
| 4190 | finish: | |
| 4191 | /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */ | |
| 4192 | if (ret >= 0 && !killed && IN_SET(whom, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL, KILL_CGROUP_FAIL)) | |
| 4193 | return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill"); | |
| 4194 | ||
| 4195 | return ret; | |
| 4196 | } | |
| 4197 | ||
| 4198 | int unit_following_set(Unit *u, Set **s) { | |
| 4199 | assert(u); | |
| 4200 | assert(s); | |
| 4201 | ||
| 4202 | if (UNIT_VTABLE(u)->following_set) | |
| 4203 | return UNIT_VTABLE(u)->following_set(u, s); | |
| 4204 | ||
| 4205 | *s = NULL; | |
| 4206 | return 0; | |
| 4207 | } | |
| 4208 | ||
| 4209 | UnitFileState unit_get_unit_file_state(Unit *u) { | |
| 4210 | int r; | |
| 4211 | ||
| 4212 | assert(u); | |
| 4213 | ||
| 4214 | if (u->unit_file_state >= 0 || !u->fragment_path) | |
| 4215 | return u->unit_file_state; | |
| 4216 | ||
| 4217 | /* If we know this is a transient unit no need to ask the unit file state for details. Let's bypass | |
| 4218 | * the more expensive on-disk check. */ | |
| 4219 | if (u->transient) | |
| 4220 | return (u->unit_file_state = UNIT_FILE_TRANSIENT); | |
| 4221 | ||
| 4222 | r = unit_file_get_state( | |
| 4223 | u->manager->runtime_scope, | |
| 4224 | /* root_dir= */ NULL, | |
| 4225 | u->id, | |
| 4226 | &u->unit_file_state); | |
| 4227 | if (r < 0) | |
| 4228 | u->unit_file_state = UNIT_FILE_BAD; | |
| 4229 | ||
| 4230 | return u->unit_file_state; | |
| 4231 | } | |
| 4232 | ||
| 4233 | PresetAction unit_get_unit_file_preset(Unit *u) { | |
| 4234 | int r; | |
| 4235 | ||
| 4236 | assert(u); | |
| 4237 | ||
| 4238 | if (u->unit_file_preset >= 0) | |
| 4239 | return u->unit_file_preset; | |
| 4240 | ||
| 4241 | /* If this is a transient or perpetual unit file it doesn't make much sense to ask the preset | |
| 4242 | * database about this, because enabling/disabling makes no sense for either. Hence don't. */ | |
| 4243 | if (!u->fragment_path || u->transient || u->perpetual) | |
| 4244 | return (u->unit_file_preset = -ENOEXEC); | |
| 4245 | ||
| 4246 | _cleanup_free_ char *bn = NULL; | |
| 4247 | r = path_extract_filename(u->fragment_path, &bn); | |
| 4248 | if (r < 0) | |
| 4249 | return (u->unit_file_preset = r); | |
| 4250 | if (r == O_DIRECTORY) | |
| 4251 | return (u->unit_file_preset = -EISDIR); | |
| 4252 | ||
| 4253 | return (u->unit_file_preset = unit_file_query_preset( | |
| 4254 | u->manager->runtime_scope, | |
| 4255 | /* root_dir= */ NULL, | |
| 4256 | bn, | |
| 4257 | /* cached= */ NULL)); | |
| 4258 | } | |
| 4259 | ||
| 4260 | Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) { | |
| 4261 | assert(ref); | |
| 4262 | assert(source); | |
| 4263 | assert(target); | |
| 4264 | ||
| 4265 | if (ref->target) | |
| 4266 | unit_ref_unset(ref); | |
| 4267 | ||
| 4268 | ref->source = source; | |
| 4269 | ref->target = target; | |
| 4270 | LIST_PREPEND(refs_by_target, target->refs_by_target, ref); | |
| 4271 | return target; | |
| 4272 | } | |
| 4273 | ||
| 4274 | void unit_ref_unset(UnitRef *ref) { | |
| 4275 | assert(ref); | |
| 4276 | ||
| 4277 | if (!ref->target) | |
| 4278 | return; | |
| 4279 | ||
| 4280 | /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might | |
| 4281 | * be unreferenced now. */ | |
| 4282 | unit_add_to_gc_queue(ref->target); | |
| 4283 | ||
| 4284 | LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref); | |
| 4285 | ref->source = ref->target = NULL; | |
| 4286 | } | |
| 4287 | ||
| 4288 | static int user_from_unit_name(Unit *u, char **ret) { | |
| 4289 | ||
| 4290 | static const uint8_t hash_key[] = { | |
| 4291 | 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96, | |
| 4292 | 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec | |
| 4293 | }; | |
| 4294 | ||
| 4295 | _cleanup_free_ char *n = NULL; | |
| 4296 | int r; | |
| 4297 | ||
| 4298 | r = unit_name_to_prefix(u->id, &n); | |
| 4299 | if (r < 0) | |
| 4300 | return r; | |
| 4301 | ||
| 4302 | if (valid_user_group_name(n, 0)) { | |
| 4303 | *ret = TAKE_PTR(n); | |
| 4304 | return 0; | |
| 4305 | } | |
| 4306 | ||
| 4307 | /* If we can't use the unit name as a user name, then let's hash it and use that */ | |
| 4308 | if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0) | |
| 4309 | return -ENOMEM; | |
| 4310 | ||
| 4311 | return 0; | |
| 4312 | } | |
| 4313 | ||
| 4314 | static int unit_verify_contexts(const Unit *u) { | |
| 4315 | assert(u); | |
| 4316 | ||
| 4317 | const ExecContext *ec = unit_get_exec_context(u); | |
| 4318 | if (!ec) | |
| 4319 | return 0; | |
| 4320 | ||
| 4321 | if (MANAGER_IS_USER(u->manager) && ec->dynamic_user) | |
| 4322 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "DynamicUser= enabled for user unit, which is not supported. Refusing."); | |
| 4323 | ||
| 4324 | if (ec->dynamic_user && ec->working_directory_home) | |
| 4325 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "WorkingDirectory=~ is not allowed under DynamicUser=yes. Refusing."); | |
| 4326 | ||
| 4327 | if (ec->working_directory && path_below_api_vfs(ec->working_directory) && | |
| 4328 | exec_needs_mount_namespace(ec, /* params = */ NULL, /* runtime = */ NULL)) | |
| 4329 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "WorkingDirectory= may not be below /proc/, /sys/ or /dev/ when using mount namespacing. Refusing."); | |
| 4330 | ||
| 4331 | if (exec_needs_pid_namespace(ec, /* params= */ NULL) && !UNIT_VTABLE(u)->notify_pidref) | |
| 4332 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "PrivatePIDs= setting is only supported for service units. Refusing."); | |
| 4333 | ||
| 4334 | const KillContext *kc = unit_get_kill_context(u); | |
| 4335 | ||
| 4336 | if (ec->pam_name && kc && !IN_SET(kc->kill_mode, KILL_CONTROL_GROUP, KILL_MIXED)) | |
| 4337 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit has PAM enabled. Kill mode must be set to 'control-group' or 'mixed'. Refusing."); | |
| 4338 | ||
| 4339 | return 0; | |
| 4340 | } | |
| 4341 | ||
| 4342 | static PrivateTmp unit_get_private_var_tmp(const Unit *u, const ExecContext *c) { | |
| 4343 | assert(u); | |
| 4344 | assert(c); | |
| 4345 | assert(c->private_tmp >= 0 && c->private_tmp < _PRIVATE_TMP_MAX); | |
| 4346 | ||
| 4347 | /* Disable disconnected private tmpfs on /var/tmp/ when DefaultDependencies=no and | |
| 4348 | * RootImage=/RootDirectory= are not set, as /var/ may be a separated partition. | |
| 4349 | * See issue #37258. */ | |
| 4350 | ||
| 4351 | /* PrivateTmp=yes/no also enables/disables private tmpfs on /var/tmp/. */ | |
| 4352 | if (c->private_tmp != PRIVATE_TMP_DISCONNECTED) | |
| 4353 | return c->private_tmp; | |
| 4354 | ||
| 4355 | /* When DefaultDependencies=yes, disconnected tmpfs is also enabled on /var/tmp/, and an explicit | |
| 4356 | * dependency to the mount on /var/ will be added in unit_add_exec_dependencies(). */ | |
| 4357 | if (u->default_dependencies) | |
| 4358 | return PRIVATE_TMP_DISCONNECTED; | |
| 4359 | ||
| 4360 | /* When RootImage=/RootDirectory= is enabled, /var/ should be prepared by the image or directory, | |
| 4361 | * hence we can mount a disconnected tmpfs on /var/tmp/. */ | |
| 4362 | if (exec_context_with_rootfs(c)) | |
| 4363 | return PRIVATE_TMP_DISCONNECTED; | |
| 4364 | ||
| 4365 | /* Even if DefaultDependencies=no, enable disconnected tmpfs when | |
| 4366 | * RequiresMountsFor=/WantsMountsFor=/var/ is explicitly set. */ | |
| 4367 | for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; t++) | |
| 4368 | if (hashmap_contains(u->mounts_for[t], "/var/")) | |
| 4369 | return PRIVATE_TMP_DISCONNECTED; | |
| 4370 | ||
| 4371 | /* Check the same but for After= with Requires=/Requisite=/Wants= or friends. */ | |
| 4372 | Unit *m = manager_get_unit(u->manager, "var.mount"); | |
| 4373 | if (!m) | |
| 4374 | return PRIVATE_TMP_NO; | |
| 4375 | ||
| 4376 | if (!unit_has_dependency(u, UNIT_ATOM_AFTER, m)) | |
| 4377 | return PRIVATE_TMP_NO; | |
| 4378 | ||
| 4379 | if (unit_has_dependency(u, UNIT_ATOM_PULL_IN_START, m) || | |
| 4380 | unit_has_dependency(u, UNIT_ATOM_PULL_IN_VERIFY, m) || | |
| 4381 | unit_has_dependency(u, UNIT_ATOM_PULL_IN_START_IGNORED, m)) | |
| 4382 | return PRIVATE_TMP_DISCONNECTED; | |
| 4383 | ||
| 4384 | return PRIVATE_TMP_NO; | |
| 4385 | } | |
| 4386 | ||
| 4387 | int unit_patch_contexts(Unit *u) { | |
| 4388 | CGroupContext *cc; | |
| 4389 | ExecContext *ec; | |
| 4390 | int r; | |
| 4391 | ||
| 4392 | assert(u); | |
| 4393 | ||
| 4394 | /* Patch in the manager defaults into the exec and cgroup | |
| 4395 | * contexts, _after_ the rest of the settings have been | |
| 4396 | * initialized */ | |
| 4397 | ||
| 4398 | ec = unit_get_exec_context(u); | |
| 4399 | if (ec) { | |
| 4400 | /* This only copies in the ones that need memory */ | |
| 4401 | for (unsigned i = 0; i < _RLIMIT_MAX; i++) | |
| 4402 | if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) { | |
| 4403 | ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1); | |
| 4404 | if (!ec->rlimit[i]) | |
| 4405 | return -ENOMEM; | |
| 4406 | } | |
| 4407 | ||
| 4408 | if (MANAGER_IS_USER(u->manager) && !ec->working_directory) { | |
| 4409 | r = get_home_dir(&ec->working_directory); | |
| 4410 | if (r < 0) | |
| 4411 | return r; | |
| 4412 | ||
| 4413 | if (!ec->working_directory_home) | |
| 4414 | /* If home directory is implied by us, allow it to be missing. */ | |
| 4415 | ec->working_directory_missing_ok = true; | |
| 4416 | } | |
| 4417 | ||
| 4418 | if (ec->private_devices) | |
| 4419 | ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO)); | |
| 4420 | ||
| 4421 | if (ec->protect_kernel_modules) | |
| 4422 | ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE); | |
| 4423 | ||
| 4424 | if (ec->protect_kernel_logs) | |
| 4425 | ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG); | |
| 4426 | ||
| 4427 | if (ec->protect_clock) | |
| 4428 | ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM)); | |
| 4429 | ||
| 4430 | if (ec->dynamic_user) { | |
| 4431 | if (!ec->user) { | |
| 4432 | r = user_from_unit_name(u, &ec->user); | |
| 4433 | if (r < 0) | |
| 4434 | return r; | |
| 4435 | } | |
| 4436 | ||
| 4437 | if (!ec->group) { | |
| 4438 | ec->group = strdup(ec->user); | |
| 4439 | if (!ec->group) | |
| 4440 | return -ENOMEM; | |
| 4441 | } | |
| 4442 | ||
| 4443 | /* If the dynamic user option is on, let's make sure that the unit can't leave its | |
| 4444 | * UID/GID around in the file system or on IPC objects. Hence enforce a strict | |
| 4445 | * sandbox. */ | |
| 4446 | ||
| 4447 | /* With DynamicUser= we want private directories, so if the user hasn't manually | |
| 4448 | * selected PrivateTmp=, enable it, but to a fully private (disconnected) tmpfs | |
| 4449 | * instance. */ | |
| 4450 | if (ec->private_tmp == PRIVATE_TMP_NO) | |
| 4451 | ec->private_tmp = PRIVATE_TMP_DISCONNECTED; | |
| 4452 | ec->remove_ipc = true; | |
| 4453 | ec->protect_system = PROTECT_SYSTEM_STRICT; | |
| 4454 | if (ec->protect_home == PROTECT_HOME_NO) | |
| 4455 | ec->protect_home = PROTECT_HOME_READ_ONLY; | |
| 4456 | ||
| 4457 | /* Make sure this service can neither benefit from SUID/SGID binaries nor create | |
| 4458 | * them. */ | |
| 4459 | ec->no_new_privileges = true; | |
| 4460 | ec->restrict_suid_sgid = true; | |
| 4461 | } | |
| 4462 | ||
| 4463 | ec->private_var_tmp = unit_get_private_var_tmp(u, ec); | |
| 4464 | ||
| 4465 | FOREACH_ARRAY(d, ec->directories, _EXEC_DIRECTORY_TYPE_MAX) | |
| 4466 | exec_directory_sort(d); | |
| 4467 | } | |
| 4468 | ||
| 4469 | cc = unit_get_cgroup_context(u); | |
| 4470 | if (cc && ec) { | |
| 4471 | ||
| 4472 | if (ec->private_devices && | |
| 4473 | cc->device_policy == CGROUP_DEVICE_POLICY_AUTO) | |
| 4474 | cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED; | |
| 4475 | ||
| 4476 | /* Only add these if needed, as they imply that everything else is blocked. */ | |
| 4477 | if (cgroup_context_has_device_policy(cc)) { | |
| 4478 | if (ec->root_image || ec->mount_images) { | |
| 4479 | ||
| 4480 | /* When RootImage= or MountImages= is specified, the following devices are touched. */ | |
| 4481 | FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") { | |
| 4482 | r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE); | |
| 4483 | if (r < 0) | |
| 4484 | return r; | |
| 4485 | } | |
| 4486 | FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") { | |
| 4487 | r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD); | |
| 4488 | if (r < 0) | |
| 4489 | return r; | |
| 4490 | } | |
| 4491 | ||
| 4492 | /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices. | |
| 4493 | * Same for mapper and verity. */ | |
| 4494 | FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") { | |
| 4495 | r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE); | |
| 4496 | if (r < 0) | |
| 4497 | return r; | |
| 4498 | } | |
| 4499 | } | |
| 4500 | ||
| 4501 | if (ec->protect_clock) { | |
| 4502 | r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ); | |
| 4503 | if (r < 0) | |
| 4504 | return r; | |
| 4505 | } | |
| 4506 | } | |
| 4507 | } | |
| 4508 | ||
| 4509 | return unit_verify_contexts(u); | |
| 4510 | } | |
| 4511 | ||
| 4512 | ExecContext *unit_get_exec_context(const Unit *u) { | |
| 4513 | size_t offset; | |
| 4514 | assert(u); | |
| 4515 | ||
| 4516 | if (u->type < 0) | |
| 4517 | return NULL; | |
| 4518 | ||
| 4519 | offset = UNIT_VTABLE(u)->exec_context_offset; | |
| 4520 | if (offset <= 0) | |
| 4521 | return NULL; | |
| 4522 | ||
| 4523 | return (ExecContext*) ((uint8_t*) u + offset); | |
| 4524 | } | |
| 4525 | ||
| 4526 | KillContext *unit_get_kill_context(const Unit *u) { | |
| 4527 | size_t offset; | |
| 4528 | assert(u); | |
| 4529 | ||
| 4530 | if (u->type < 0) | |
| 4531 | return NULL; | |
| 4532 | ||
| 4533 | offset = UNIT_VTABLE(u)->kill_context_offset; | |
| 4534 | if (offset <= 0) | |
| 4535 | return NULL; | |
| 4536 | ||
| 4537 | return (KillContext*) ((uint8_t*) u + offset); | |
| 4538 | } | |
| 4539 | ||
| 4540 | CGroupContext *unit_get_cgroup_context(const Unit *u) { | |
| 4541 | size_t offset; | |
| 4542 | ||
| 4543 | if (u->type < 0) | |
| 4544 | return NULL; | |
| 4545 | ||
| 4546 | offset = UNIT_VTABLE(u)->cgroup_context_offset; | |
| 4547 | if (offset <= 0) | |
| 4548 | return NULL; | |
| 4549 | ||
| 4550 | return (CGroupContext*) ((uint8_t*) u + offset); | |
| 4551 | } | |
| 4552 | ||
| 4553 | ExecRuntime *unit_get_exec_runtime(const Unit *u) { | |
| 4554 | size_t offset; | |
| 4555 | ||
| 4556 | if (u->type < 0) | |
| 4557 | return NULL; | |
| 4558 | ||
| 4559 | offset = UNIT_VTABLE(u)->exec_runtime_offset; | |
| 4560 | if (offset <= 0) | |
| 4561 | return NULL; | |
| 4562 | ||
| 4563 | return *(ExecRuntime**) ((uint8_t*) u + offset); | |
| 4564 | } | |
| 4565 | ||
| 4566 | CGroupRuntime *unit_get_cgroup_runtime(const Unit *u) { | |
| 4567 | size_t offset; | |
| 4568 | ||
| 4569 | if (u->type < 0) | |
| 4570 | return NULL; | |
| 4571 | ||
| 4572 | offset = UNIT_VTABLE(u)->cgroup_runtime_offset; | |
| 4573 | if (offset <= 0) | |
| 4574 | return NULL; | |
| 4575 | ||
| 4576 | return *(CGroupRuntime**) ((uint8_t*) u + offset); | |
| 4577 | } | |
| 4578 | ||
| 4579 | static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) { | |
| 4580 | assert(u); | |
| 4581 | ||
| 4582 | if (UNIT_WRITE_FLAGS_NOOP(flags)) | |
| 4583 | return NULL; | |
| 4584 | ||
| 4585 | if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */ | |
| 4586 | return u->manager->lookup_paths.transient; | |
| 4587 | ||
| 4588 | if (flags & UNIT_PERSISTENT) | |
| 4589 | return u->manager->lookup_paths.persistent_control; | |
| 4590 | ||
| 4591 | if (flags & UNIT_RUNTIME) | |
| 4592 | return u->manager->lookup_paths.runtime_control; | |
| 4593 | ||
| 4594 | return NULL; | |
| 4595 | } | |
| 4596 | ||
| 4597 | const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) { | |
| 4598 | assert(s); | |
| 4599 | assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1); | |
| 4600 | assert(buf); | |
| 4601 | ||
| 4602 | _cleanup_free_ char *t = NULL; | |
| 4603 | ||
| 4604 | /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and | |
| 4605 | * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is | |
| 4606 | * written to *buf. This means the return value always contains a properly escaped version, but *buf | |
| 4607 | * only contains a pointer if an allocation was made. Callers can use this to optimize memory | |
| 4608 | * allocations. */ | |
| 4609 | ||
| 4610 | if (flags & UNIT_ESCAPE_SPECIFIERS) { | |
| 4611 | t = specifier_escape(s); | |
| 4612 | if (!t) | |
| 4613 | return NULL; | |
| 4614 | ||
| 4615 | s = t; | |
| 4616 | } | |
| 4617 | ||
| 4618 | /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for | |
| 4619 | * ExecStart= and friends, i.e. '$' and quotes. */ | |
| 4620 | ||
| 4621 | if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) { | |
| 4622 | char *t2; | |
| 4623 | ||
| 4624 | if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) { | |
| 4625 | t2 = strreplace(s, "$", "$$"); | |
| 4626 | if (!t2) | |
| 4627 | return NULL; | |
| 4628 | free_and_replace(t, t2); | |
| 4629 | } | |
| 4630 | ||
| 4631 | t2 = shell_escape(t ?: s, "\""); | |
| 4632 | if (!t2) | |
| 4633 | return NULL; | |
| 4634 | free_and_replace(t, t2); | |
| 4635 | ||
| 4636 | s = t; | |
| 4637 | ||
| 4638 | } else if (flags & UNIT_ESCAPE_C) { | |
| 4639 | char *t2; | |
| 4640 | ||
| 4641 | t2 = cescape(s); | |
| 4642 | if (!t2) | |
| 4643 | return NULL; | |
| 4644 | free_and_replace(t, t2); | |
| 4645 | ||
| 4646 | s = t; | |
| 4647 | } | |
| 4648 | ||
| 4649 | *buf = TAKE_PTR(t); | |
| 4650 | return s; | |
| 4651 | } | |
| 4652 | ||
| 4653 | char* unit_concat_strv(char **l, UnitWriteFlags flags) { | |
| 4654 | _cleanup_free_ char *result = NULL; | |
| 4655 | size_t n = 0; | |
| 4656 | ||
| 4657 | /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command | |
| 4658 | * lines in a way suitable for ExecStart= stanzas. */ | |
| 4659 | ||
| 4660 | STRV_FOREACH(i, l) { | |
| 4661 | _cleanup_free_ char *buf = NULL; | |
| 4662 | const char *p; | |
| 4663 | size_t a; | |
| 4664 | char *q; | |
| 4665 | ||
| 4666 | p = unit_escape_setting(*i, flags, &buf); | |
| 4667 | if (!p) | |
| 4668 | return NULL; | |
| 4669 | ||
| 4670 | a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */ | |
| 4671 | if (!GREEDY_REALLOC(result, n + a + 1)) | |
| 4672 | return NULL; | |
| 4673 | ||
| 4674 | q = result + n; | |
| 4675 | if (n > 0) | |
| 4676 | *(q++) = ' '; | |
| 4677 | ||
| 4678 | *(q++) = '"'; | |
| 4679 | q = stpcpy(q, p); | |
| 4680 | *(q++) = '"'; | |
| 4681 | ||
| 4682 | n += a; | |
| 4683 | } | |
| 4684 | ||
| 4685 | if (!GREEDY_REALLOC(result, n + 1)) | |
| 4686 | return NULL; | |
| 4687 | ||
| 4688 | result[n] = 0; | |
| 4689 | ||
| 4690 | return TAKE_PTR(result); | |
| 4691 | } | |
| 4692 | ||
| 4693 | int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) { | |
| 4694 | _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL; | |
| 4695 | const char *dir, *wrapped; | |
| 4696 | int r; | |
| 4697 | ||
| 4698 | assert(u); | |
| 4699 | assert(name); | |
| 4700 | assert(data); | |
| 4701 | ||
| 4702 | if (UNIT_WRITE_FLAGS_NOOP(flags)) | |
| 4703 | return 0; | |
| 4704 | ||
| 4705 | data = unit_escape_setting(data, flags, &escaped); | |
| 4706 | if (!data) | |
| 4707 | return -ENOMEM; | |
| 4708 | ||
| 4709 | /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the | |
| 4710 | * previous section header is the same */ | |
| 4711 | ||
| 4712 | if (flags & UNIT_PRIVATE) { | |
| 4713 | if (!UNIT_VTABLE(u)->private_section) | |
| 4714 | return -EINVAL; | |
| 4715 | ||
| 4716 | if (!u->transient_file || u->last_section_private < 0) | |
| 4717 | data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data); | |
| 4718 | else if (u->last_section_private == 0) | |
| 4719 | data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data); | |
| 4720 | } else { | |
| 4721 | if (!u->transient_file || u->last_section_private < 0) | |
| 4722 | data = strjoina("[Unit]\n", data); | |
| 4723 | else if (u->last_section_private > 0) | |
| 4724 | data = strjoina("\n[Unit]\n", data); | |
| 4725 | } | |
| 4726 | ||
| 4727 | if (u->transient_file) { | |
| 4728 | /* When this is a transient unit file in creation, then let's not create a new drop-in, | |
| 4729 | * but instead write to the transient unit file. */ | |
| 4730 | fputs_with_newline(u->transient_file, data); | |
| 4731 | ||
| 4732 | /* Remember which section we wrote this entry to */ | |
| 4733 | u->last_section_private = !!(flags & UNIT_PRIVATE); | |
| 4734 | return 0; | |
| 4735 | } | |
| 4736 | ||
| 4737 | dir = unit_drop_in_dir(u, flags); | |
| 4738 | if (!dir) | |
| 4739 | return -EINVAL; | |
| 4740 | ||
| 4741 | wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n" | |
| 4742 | "# or an equivalent operation. Do not edit.\n", | |
| 4743 | data, | |
| 4744 | "\n"); | |
| 4745 | ||
| 4746 | r = drop_in_file(dir, u->id, 50, name, &p, &q); | |
| 4747 | if (r < 0) | |
| 4748 | return r; | |
| 4749 | ||
| 4750 | (void) mkdir_p_label(p, 0755); | |
| 4751 | ||
| 4752 | /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly | |
| 4753 | * recreate the cache after every drop-in we write. */ | |
| 4754 | if (u->manager->unit_path_cache) { | |
| 4755 | r = set_put_strdup_full(&u->manager->unit_path_cache, &path_hash_ops_free, p); | |
| 4756 | if (r < 0) | |
| 4757 | return r; | |
| 4758 | } | |
| 4759 | ||
| 4760 | r = write_string_file(q, wrapped, WRITE_STRING_FILE_CREATE|WRITE_STRING_FILE_ATOMIC|WRITE_STRING_FILE_LABEL); | |
| 4761 | if (r < 0) | |
| 4762 | return r; | |
| 4763 | ||
| 4764 | r = strv_push(&u->dropin_paths, q); | |
| 4765 | if (r < 0) | |
| 4766 | return r; | |
| 4767 | q = NULL; | |
| 4768 | ||
| 4769 | strv_uniq(u->dropin_paths); | |
| 4770 | ||
| 4771 | u->dropin_mtime = now(CLOCK_REALTIME); | |
| 4772 | ||
| 4773 | return 0; | |
| 4774 | } | |
| 4775 | ||
| 4776 | int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) { | |
| 4777 | _cleanup_free_ char *p = NULL; | |
| 4778 | va_list ap; | |
| 4779 | int r; | |
| 4780 | ||
| 4781 | assert(u); | |
| 4782 | assert(name); | |
| 4783 | assert(format); | |
| 4784 | ||
| 4785 | if (UNIT_WRITE_FLAGS_NOOP(flags)) | |
| 4786 | return 0; | |
| 4787 | ||
| 4788 | va_start(ap, format); | |
| 4789 | r = vasprintf(&p, format, ap); | |
| 4790 | va_end(ap); | |
| 4791 | ||
| 4792 | if (r < 0) | |
| 4793 | return -ENOMEM; | |
| 4794 | ||
| 4795 | return unit_write_setting(u, flags, name, p); | |
| 4796 | } | |
| 4797 | ||
| 4798 | int unit_make_transient(Unit *u) { | |
| 4799 | _cleanup_free_ char *path = NULL; | |
| 4800 | FILE *f; | |
| 4801 | ||
| 4802 | assert(u); | |
| 4803 | ||
| 4804 | if (!UNIT_VTABLE(u)->can_transient) | |
| 4805 | return -EOPNOTSUPP; | |
| 4806 | ||
| 4807 | (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755); | |
| 4808 | ||
| 4809 | path = path_join(u->manager->lookup_paths.transient, u->id); | |
| 4810 | if (!path) | |
| 4811 | return -ENOMEM; | |
| 4812 | ||
| 4813 | /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are | |
| 4814 | * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */ | |
| 4815 | ||
| 4816 | WITH_UMASK(0022) { | |
| 4817 | f = fopen(path, "we"); | |
| 4818 | if (!f) | |
| 4819 | return -errno; | |
| 4820 | } | |
| 4821 | ||
| 4822 | safe_fclose(u->transient_file); | |
| 4823 | u->transient_file = f; | |
| 4824 | ||
| 4825 | free_and_replace(u->fragment_path, path); | |
| 4826 | ||
| 4827 | u->source_path = mfree(u->source_path); | |
| 4828 | u->dropin_paths = strv_free(u->dropin_paths); | |
| 4829 | u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0; | |
| 4830 | ||
| 4831 | u->load_state = UNIT_STUB; | |
| 4832 | u->load_error = 0; | |
| 4833 | u->transient = true; | |
| 4834 | ||
| 4835 | unit_add_to_dbus_queue(u); | |
| 4836 | unit_add_to_gc_queue(u); | |
| 4837 | ||
| 4838 | fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n", | |
| 4839 | u->transient_file); | |
| 4840 | ||
| 4841 | return 0; | |
| 4842 | } | |
| 4843 | ||
| 4844 | static bool ignore_leftover_process(const char *comm) { | |
| 4845 | return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */ | |
| 4846 | } | |
| 4847 | ||
| 4848 | static int log_kill(const PidRef *pid, int sig, void *userdata) { | |
| 4849 | const Unit *u = ASSERT_PTR(userdata); | |
| 4850 | _cleanup_free_ char *comm = NULL; | |
| 4851 | ||
| 4852 | assert(pidref_is_set(pid)); | |
| 4853 | ||
| 4854 | (void) pidref_get_comm(pid, &comm); | |
| 4855 | ||
| 4856 | if (ignore_leftover_process(comm)) | |
| 4857 | /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1 | |
| 4858 | * here to let the manager know that a process was killed. */ | |
| 4859 | return 1; | |
| 4860 | ||
| 4861 | log_unit_notice(u, | |
| 4862 | "Killing process " PID_FMT " (%s) with signal SIG%s.", | |
| 4863 | pid->pid, | |
| 4864 | strna(comm), | |
| 4865 | signal_to_string(sig)); | |
| 4866 | ||
| 4867 | return 1; | |
| 4868 | } | |
| 4869 | ||
| 4870 | static int operation_to_signal( | |
| 4871 | const KillContext *c, | |
| 4872 | KillOperation k, | |
| 4873 | bool *ret_noteworthy) { | |
| 4874 | ||
| 4875 | assert(c); | |
| 4876 | assert(ret_noteworthy); | |
| 4877 | ||
| 4878 | switch (k) { | |
| 4879 | ||
| 4880 | case KILL_TERMINATE: | |
| 4881 | case KILL_TERMINATE_AND_LOG: | |
| 4882 | *ret_noteworthy = k == KILL_TERMINATE_AND_LOG; | |
| 4883 | return c->kill_signal; | |
| 4884 | ||
| 4885 | case KILL_RESTART: | |
| 4886 | *ret_noteworthy = false; | |
| 4887 | return restart_kill_signal(c); | |
| 4888 | ||
| 4889 | case KILL_KILL: | |
| 4890 | *ret_noteworthy = true; | |
| 4891 | return c->final_kill_signal; | |
| 4892 | ||
| 4893 | case KILL_WATCHDOG: | |
| 4894 | *ret_noteworthy = true; | |
| 4895 | return c->watchdog_signal; | |
| 4896 | ||
| 4897 | default: | |
| 4898 | assert_not_reached(); | |
| 4899 | } | |
| 4900 | } | |
| 4901 | ||
| 4902 | static int unit_kill_context_one( | |
| 4903 | Unit *u, | |
| 4904 | const PidRef *pidref, | |
| 4905 | const char *type, | |
| 4906 | bool is_alien, | |
| 4907 | int sig, | |
| 4908 | bool send_sighup, | |
| 4909 | cg_kill_log_func_t log_func) { | |
| 4910 | ||
| 4911 | int r; | |
| 4912 | ||
| 4913 | assert(u); | |
| 4914 | assert(type); | |
| 4915 | ||
| 4916 | /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */ | |
| 4917 | ||
| 4918 | if (!pidref_is_set(pidref)) | |
| 4919 | return 0; | |
| 4920 | ||
| 4921 | if (log_func) | |
| 4922 | log_func(pidref, sig, u); | |
| 4923 | ||
| 4924 | r = pidref_kill_and_sigcont(pidref, sig); | |
| 4925 | if (r == -ESRCH) | |
| 4926 | return !is_alien; | |
| 4927 | if (r < 0) { | |
| 4928 | _cleanup_free_ char *comm = NULL; | |
| 4929 | ||
| 4930 | (void) pidref_get_comm(pidref, &comm); | |
| 4931 | return log_unit_warning_errno(u, r, "Failed to kill %s process " PID_FMT " (%s), ignoring: %m", type, pidref->pid, strna(comm)); | |
| 4932 | } | |
| 4933 | ||
| 4934 | if (send_sighup) | |
| 4935 | (void) pidref_kill(pidref, SIGHUP); | |
| 4936 | ||
| 4937 | return !is_alien; | |
| 4938 | } | |
| 4939 | ||
| 4940 | int unit_kill_context(Unit *u, KillOperation k) { | |
| 4941 | bool wait_for_exit = false, send_sighup; | |
| 4942 | cg_kill_log_func_t log_func = NULL; | |
| 4943 | int sig, r; | |
| 4944 | ||
| 4945 | assert(u); | |
| 4946 | ||
| 4947 | /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0 | |
| 4948 | * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common() | |
| 4949 | * which is used for user-requested killing of unit processes. */ | |
| 4950 | ||
| 4951 | KillContext *c = unit_get_kill_context(u); | |
| 4952 | if (!c || c->kill_mode == KILL_NONE) | |
| 4953 | return 0; | |
| 4954 | ||
| 4955 | bool noteworthy; | |
| 4956 | sig = operation_to_signal(c, k, ¬eworthy); | |
| 4957 | if (noteworthy) | |
| 4958 | log_func = log_kill; | |
| 4959 | ||
| 4960 | send_sighup = | |
| 4961 | c->send_sighup && | |
| 4962 | IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) && | |
| 4963 | sig != SIGHUP; | |
| 4964 | ||
| 4965 | bool is_alien; | |
| 4966 | PidRef *main_pid = unit_main_pid_full(u, &is_alien); | |
| 4967 | r = unit_kill_context_one(u, main_pid, "main", is_alien, sig, send_sighup, log_func); | |
| 4968 | wait_for_exit = wait_for_exit || r > 0; | |
| 4969 | ||
| 4970 | r = unit_kill_context_one(u, unit_control_pid(u), "control", /* is_alien = */ false, sig, send_sighup, log_func); | |
| 4971 | wait_for_exit = wait_for_exit || r > 0; | |
| 4972 | ||
| 4973 | CGroupRuntime *crt = unit_get_cgroup_runtime(u); | |
| 4974 | if (crt && crt->cgroup_path && | |
| 4975 | (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) { | |
| 4976 | _cleanup_set_free_ Set *pid_set = NULL; | |
| 4977 | ||
| 4978 | /* Exclude the main/control pids from being killed via the cgroup */ | |
| 4979 | r = unit_pid_set(u, &pid_set); | |
| 4980 | if (r < 0) | |
| 4981 | return r; | |
| 4982 | ||
| 4983 | r = cg_kill_recursive( | |
| 4984 | crt->cgroup_path, | |
| 4985 | sig, | |
| 4986 | CGROUP_SIGCONT|CGROUP_IGNORE_SELF, | |
| 4987 | pid_set, | |
| 4988 | log_func, u); | |
| 4989 | if (r < 0) { | |
| 4990 | if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT)) | |
| 4991 | log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt->cgroup_path)); | |
| 4992 | ||
| 4993 | } else if (r > 0) { | |
| 4994 | ||
| 4995 | wait_for_exit = true; | |
| 4996 | ||
| 4997 | if (send_sighup) { | |
| 4998 | r = unit_pid_set(u, &pid_set); | |
| 4999 | if (r < 0) | |
| 5000 | return r; | |
| 5001 | ||
| 5002 | (void) cg_kill_recursive( | |
| 5003 | crt->cgroup_path, | |
| 5004 | SIGHUP, | |
| 5005 | CGROUP_IGNORE_SELF, | |
| 5006 | pid_set, | |
| 5007 | /* log_kill= */ NULL, | |
| 5008 | /* userdata= */ NULL); | |
| 5009 | } | |
| 5010 | } | |
| 5011 | } | |
| 5012 | ||
| 5013 | return wait_for_exit; | |
| 5014 | } | |
| 5015 | ||
| 5016 | int unit_add_mounts_for(Unit *u, const char *path, UnitDependencyMask mask, UnitMountDependencyType type) { | |
| 5017 | Hashmap **unit_map, **manager_map; | |
| 5018 | int r; | |
| 5019 | ||
| 5020 | assert(u); | |
| 5021 | assert(path); | |
| 5022 | assert(type >= 0 && type < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX); | |
| 5023 | ||
| 5024 | unit_map = &u->mounts_for[type]; | |
| 5025 | manager_map = &u->manager->units_needing_mounts_for[type]; | |
| 5026 | ||
| 5027 | /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these | |
| 5028 | * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the | |
| 5029 | * dependency came to be). However, we build a prefix table for all possible prefixes so that new | |
| 5030 | * appearing mount units can easily determine which units to make themselves a dependency of. */ | |
| 5031 | ||
| 5032 | if (!path_is_absolute(path)) | |
| 5033 | return -EINVAL; | |
| 5034 | ||
| 5035 | if (hashmap_contains(*unit_map, path)) /* Exit quickly if the path is already covered. */ | |
| 5036 | return 0; | |
| 5037 | ||
| 5038 | /* Use the canonical form of the path as the stored key. We call path_is_normalized() | |
| 5039 | * only after simplification, since path_is_normalized() rejects paths with '.'. | |
| 5040 | * path_is_normalized() also verifies that the path fits in PATH_MAX. */ | |
| 5041 | _cleanup_free_ char *p = NULL; | |
| 5042 | r = path_simplify_alloc(path, &p); | |
| 5043 | if (r < 0) | |
| 5044 | return r; | |
| 5045 | path = p; | |
| 5046 | ||
| 5047 | if (!path_is_normalized(path)) | |
| 5048 | return -EPERM; | |
| 5049 | ||
| 5050 | UnitDependencyInfo di = { | |
| 5051 | .origin_mask = mask | |
| 5052 | }; | |
| 5053 | ||
| 5054 | r = hashmap_ensure_put(unit_map, &path_hash_ops, p, di.data); | |
| 5055 | if (r < 0) | |
| 5056 | return r; | |
| 5057 | assert(r > 0); | |
| 5058 | TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */ | |
| 5059 | ||
| 5060 | char prefix[strlen(path) + 1]; | |
| 5061 | PATH_FOREACH_PREFIX_MORE(prefix, path) { | |
| 5062 | Set *x; | |
| 5063 | ||
| 5064 | x = hashmap_get(*manager_map, prefix); | |
| 5065 | if (!x) { | |
| 5066 | _cleanup_free_ char *q = NULL; | |
| 5067 | ||
| 5068 | r = hashmap_ensure_allocated(manager_map, &path_hash_ops); | |
| 5069 | if (r < 0) | |
| 5070 | return r; | |
| 5071 | ||
| 5072 | q = strdup(prefix); | |
| 5073 | if (!q) | |
| 5074 | return -ENOMEM; | |
| 5075 | ||
| 5076 | x = set_new(NULL); | |
| 5077 | if (!x) | |
| 5078 | return -ENOMEM; | |
| 5079 | ||
| 5080 | r = hashmap_put(*manager_map, q, x); | |
| 5081 | if (r < 0) { | |
| 5082 | set_free(x); | |
| 5083 | return r; | |
| 5084 | } | |
| 5085 | q = NULL; | |
| 5086 | } | |
| 5087 | ||
| 5088 | r = set_put(x, u); | |
| 5089 | if (r < 0) | |
| 5090 | return r; | |
| 5091 | } | |
| 5092 | ||
| 5093 | return 0; | |
| 5094 | } | |
| 5095 | ||
| 5096 | int unit_setup_exec_runtime(Unit *u) { | |
| 5097 | _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL; | |
| 5098 | _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL; | |
| 5099 | _cleanup_set_free_ Set *units = NULL; | |
| 5100 | ExecRuntime **rt; | |
| 5101 | ExecContext *ec; | |
| 5102 | size_t offset; | |
| 5103 | Unit *other; | |
| 5104 | int r; | |
| 5105 | ||
| 5106 | offset = UNIT_VTABLE(u)->exec_runtime_offset; | |
| 5107 | assert(offset > 0); | |
| 5108 | ||
| 5109 | /* Check if there already is an ExecRuntime for this unit? */ | |
| 5110 | rt = (ExecRuntime**) ((uint8_t*) u + offset); | |
| 5111 | if (*rt) | |
| 5112 | return 0; | |
| 5113 | ||
| 5114 | ec = ASSERT_PTR(unit_get_exec_context(u)); | |
| 5115 | ||
| 5116 | r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units); | |
| 5117 | if (r < 0) | |
| 5118 | return r; | |
| 5119 | ||
| 5120 | /* Try to get it from somebody else */ | |
| 5121 | SET_FOREACH(other, units) { | |
| 5122 | r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr); | |
| 5123 | if (r < 0) | |
| 5124 | return r; | |
| 5125 | if (r > 0) | |
| 5126 | break; | |
| 5127 | } | |
| 5128 | ||
| 5129 | if (!esr) { | |
| 5130 | r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr); | |
| 5131 | if (r < 0) | |
| 5132 | return r; | |
| 5133 | } | |
| 5134 | ||
| 5135 | if (ec->dynamic_user) { | |
| 5136 | r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds); | |
| 5137 | if (r < 0) | |
| 5138 | return r; | |
| 5139 | } | |
| 5140 | ||
| 5141 | r = exec_runtime_make(u, ec, esr, dcreds, rt); | |
| 5142 | if (r < 0) | |
| 5143 | return r; | |
| 5144 | ||
| 5145 | TAKE_PTR(esr); | |
| 5146 | TAKE_PTR(dcreds); | |
| 5147 | ||
| 5148 | return r; | |
| 5149 | } | |
| 5150 | ||
| 5151 | CGroupRuntime *unit_setup_cgroup_runtime(Unit *u) { | |
| 5152 | size_t offset; | |
| 5153 | ||
| 5154 | assert(u); | |
| 5155 | ||
| 5156 | offset = UNIT_VTABLE(u)->cgroup_runtime_offset; | |
| 5157 | assert(offset > 0); | |
| 5158 | ||
| 5159 | CGroupRuntime **rt = (CGroupRuntime**) ((uint8_t*) u + offset); | |
| 5160 | if (*rt) | |
| 5161 | return *rt; | |
| 5162 | ||
| 5163 | return (*rt = cgroup_runtime_new()); | |
| 5164 | } | |
| 5165 | ||
| 5166 | bool unit_type_supported(UnitType t) { | |
| 5167 | static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */ | |
| 5168 | int r; | |
| 5169 | ||
| 5170 | assert(t >= 0 && t < _UNIT_TYPE_MAX); | |
| 5171 | ||
| 5172 | if (cache[t] == 0) { | |
| 5173 | char *e; | |
| 5174 | ||
| 5175 | e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t)); | |
| 5176 | ||
| 5177 | r = getenv_bool(ascii_strupper(e)); | |
| 5178 | if (r < 0 && r != -ENXIO) | |
| 5179 | log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e); | |
| 5180 | ||
| 5181 | cache[t] = r == 0 ? -1 : 1; | |
| 5182 | } | |
| 5183 | if (cache[t] < 0) | |
| 5184 | return false; | |
| 5185 | ||
| 5186 | if (!unit_vtable[t]->supported) | |
| 5187 | return true; | |
| 5188 | ||
| 5189 | return unit_vtable[t]->supported(); | |
| 5190 | } | |
| 5191 | ||
| 5192 | void unit_warn_if_dir_nonempty(Unit *u, const char* where) { | |
| 5193 | int r; | |
| 5194 | ||
| 5195 | assert(u); | |
| 5196 | assert(where); | |
| 5197 | ||
| 5198 | if (!unit_log_level_test(u, LOG_NOTICE)) | |
| 5199 | return; | |
| 5200 | ||
| 5201 | r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false); | |
| 5202 | if (r > 0 || r == -ENOTDIR) | |
| 5203 | return; | |
| 5204 | if (r < 0) { | |
| 5205 | log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where); | |
| 5206 | return; | |
| 5207 | } | |
| 5208 | ||
| 5209 | log_unit_struct(u, LOG_NOTICE, | |
| 5210 | LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING_STR), | |
| 5211 | LOG_UNIT_INVOCATION_ID(u), | |
| 5212 | LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where), | |
| 5213 | LOG_ITEM("WHERE=%s", where)); | |
| 5214 | } | |
| 5215 | ||
| 5216 | int unit_log_noncanonical_mount_path(Unit *u, const char *where) { | |
| 5217 | assert(u); | |
| 5218 | assert(where); | |
| 5219 | ||
| 5220 | /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */ | |
| 5221 | log_unit_struct(u, LOG_ERR, | |
| 5222 | LOG_MESSAGE_ID(SD_MESSAGE_NON_CANONICAL_MOUNT_STR), | |
| 5223 | LOG_UNIT_INVOCATION_ID(u), | |
| 5224 | LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where), | |
| 5225 | LOG_ITEM("WHERE=%s", where)); | |
| 5226 | ||
| 5227 | return -ELOOP; | |
| 5228 | } | |
| 5229 | ||
| 5230 | int unit_fail_if_noncanonical_mount_path(Unit *u, const char* where) { | |
| 5231 | int r; | |
| 5232 | ||
| 5233 | assert(u); | |
| 5234 | assert(where); | |
| 5235 | ||
| 5236 | _cleanup_free_ char *canonical_where = NULL; | |
| 5237 | r = chase(where, /* root= */ NULL, CHASE_NONEXISTENT, &canonical_where, /* ret_fd= */ NULL); | |
| 5238 | if (r < 0) { | |
| 5239 | log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where); | |
| 5240 | return 0; | |
| 5241 | } | |
| 5242 | ||
| 5243 | /* We will happily ignore a trailing slash (or any redundant slashes) */ | |
| 5244 | if (path_equal(where, canonical_where)) | |
| 5245 | return 0; | |
| 5246 | ||
| 5247 | return unit_log_noncanonical_mount_path(u, where); | |
| 5248 | } | |
| 5249 | ||
| 5250 | bool unit_is_pristine(Unit *u) { | |
| 5251 | assert(u); | |
| 5252 | ||
| 5253 | /* Check if the unit already exists or is already around, in a number of different ways. Note that to | |
| 5254 | * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED | |
| 5255 | * even though nothing was actually loaded, as those unit types don't require a file on disk. | |
| 5256 | * | |
| 5257 | * Note that we don't check for drop-ins here, because we allow drop-ins for transient units | |
| 5258 | * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service: | |
| 5259 | * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf. | |
| 5260 | */ | |
| 5261 | ||
| 5262 | return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) && | |
| 5263 | !u->fragment_path && | |
| 5264 | !u->source_path && | |
| 5265 | !u->job && | |
| 5266 | !u->merged_into; | |
| 5267 | } | |
| 5268 | ||
| 5269 | PidRef* unit_control_pid(Unit *u) { | |
| 5270 | assert(u); | |
| 5271 | ||
| 5272 | if (UNIT_VTABLE(u)->control_pid) | |
| 5273 | return UNIT_VTABLE(u)->control_pid(u); | |
| 5274 | ||
| 5275 | return NULL; | |
| 5276 | } | |
| 5277 | ||
| 5278 | PidRef* unit_main_pid_full(Unit *u, bool *ret_is_alien) { | |
| 5279 | assert(u); | |
| 5280 | ||
| 5281 | if (UNIT_VTABLE(u)->main_pid) | |
| 5282 | return UNIT_VTABLE(u)->main_pid(u, ret_is_alien); | |
| 5283 | ||
| 5284 | if (ret_is_alien) | |
| 5285 | *ret_is_alien = false; | |
| 5286 | return NULL; | |
| 5287 | } | |
| 5288 | ||
| 5289 | static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) { | |
| 5290 | int r; | |
| 5291 | ||
| 5292 | assert(u); | |
| 5293 | ||
| 5294 | if (!MANAGER_IS_SYSTEM(u->manager)) | |
| 5295 | return; | |
| 5296 | ||
| 5297 | CGroupContext *c; | |
| 5298 | c = unit_get_cgroup_context(u); | |
| 5299 | if (!c) | |
| 5300 | return; | |
| 5301 | ||
| 5302 | if (!u->manager->nfnl) { | |
| 5303 | r = sd_nfnl_socket_open(&u->manager->nfnl); | |
| 5304 | if (r < 0) | |
| 5305 | return; | |
| 5306 | } | |
| 5307 | ||
| 5308 | FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) { | |
| 5309 | if (nft_set->source != source) | |
| 5310 | continue; | |
| 5311 | ||
| 5312 | r = nft_set_element_modify_any(u->manager->nfnl, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element)); | |
| 5313 | if (r < 0) | |
| 5314 | log_warning_errno(r, "Failed to %s NFT set entry: family %s, table %s, set %s, ID %u, ignoring: %m", | |
| 5315 | add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element); | |
| 5316 | else | |
| 5317 | log_debug("%s NFT set entry: family %s, table %s, set %s, ID %u", | |
| 5318 | add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element); | |
| 5319 | } | |
| 5320 | } | |
| 5321 | ||
| 5322 | static void unit_unref_uid_internal( | |
| 5323 | Unit *u, | |
| 5324 | uid_t *ref_uid, | |
| 5325 | bool destroy_now, | |
| 5326 | void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) { | |
| 5327 | ||
| 5328 | assert(u); | |
| 5329 | assert(ref_uid); | |
| 5330 | assert(_manager_unref_uid); | |
| 5331 | ||
| 5332 | /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and | |
| 5333 | * gid_t are actually the same time, with the same validity rules. | |
| 5334 | * | |
| 5335 | * Drops a reference to UID/GID from a unit. */ | |
| 5336 | ||
| 5337 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
| 5338 | assert_cc(UID_INVALID == (uid_t) GID_INVALID); | |
| 5339 | ||
| 5340 | if (!uid_is_valid(*ref_uid)) | |
| 5341 | return; | |
| 5342 | ||
| 5343 | _manager_unref_uid(u->manager, *ref_uid, destroy_now); | |
| 5344 | *ref_uid = UID_INVALID; | |
| 5345 | } | |
| 5346 | ||
| 5347 | static void unit_unref_uid(Unit *u, bool destroy_now) { | |
| 5348 | assert(u); | |
| 5349 | ||
| 5350 | unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid); | |
| 5351 | ||
| 5352 | unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid); | |
| 5353 | } | |
| 5354 | ||
| 5355 | static void unit_unref_gid(Unit *u, bool destroy_now) { | |
| 5356 | assert(u); | |
| 5357 | ||
| 5358 | unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid); | |
| 5359 | ||
| 5360 | unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid); | |
| 5361 | } | |
| 5362 | ||
| 5363 | void unit_unref_uid_gid(Unit *u, bool destroy_now) { | |
| 5364 | assert(u); | |
| 5365 | ||
| 5366 | unit_unref_uid(u, destroy_now); | |
| 5367 | unit_unref_gid(u, destroy_now); | |
| 5368 | } | |
| 5369 | ||
| 5370 | static int unit_ref_uid_internal( | |
| 5371 | Unit *u, | |
| 5372 | uid_t *ref_uid, | |
| 5373 | uid_t uid, | |
| 5374 | bool clean_ipc, | |
| 5375 | int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) { | |
| 5376 | ||
| 5377 | int r; | |
| 5378 | ||
| 5379 | assert(u); | |
| 5380 | assert(ref_uid); | |
| 5381 | assert(uid_is_valid(uid)); | |
| 5382 | assert(_manager_ref_uid); | |
| 5383 | ||
| 5384 | /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t | |
| 5385 | * are actually the same type, and have the same validity rules. | |
| 5386 | * | |
| 5387 | * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a | |
| 5388 | * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter | |
| 5389 | * drops to zero. */ | |
| 5390 | ||
| 5391 | assert_cc(sizeof(uid_t) == sizeof(gid_t)); | |
| 5392 | assert_cc(UID_INVALID == (uid_t) GID_INVALID); | |
| 5393 | ||
| 5394 | if (*ref_uid == uid) | |
| 5395 | return 0; | |
| 5396 | ||
| 5397 | if (uid_is_valid(*ref_uid)) /* Already set? */ | |
| 5398 | return -EBUSY; | |
| 5399 | ||
| 5400 | r = _manager_ref_uid(u->manager, uid, clean_ipc); | |
| 5401 | if (r < 0) | |
| 5402 | return r; | |
| 5403 | ||
| 5404 | *ref_uid = uid; | |
| 5405 | return 1; | |
| 5406 | } | |
| 5407 | ||
| 5408 | static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) { | |
| 5409 | return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid); | |
| 5410 | } | |
| 5411 | ||
| 5412 | static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) { | |
| 5413 | return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid); | |
| 5414 | } | |
| 5415 | ||
| 5416 | static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) { | |
| 5417 | int r = 0, q = 0; | |
| 5418 | ||
| 5419 | assert(u); | |
| 5420 | ||
| 5421 | /* Reference both a UID and a GID in one go. Either references both, or neither. */ | |
| 5422 | ||
| 5423 | if (uid_is_valid(uid)) { | |
| 5424 | r = unit_ref_uid(u, uid, clean_ipc); | |
| 5425 | if (r < 0) | |
| 5426 | return r; | |
| 5427 | } | |
| 5428 | ||
| 5429 | if (gid_is_valid(gid)) { | |
| 5430 | q = unit_ref_gid(u, gid, clean_ipc); | |
| 5431 | if (q < 0) { | |
| 5432 | if (r > 0) | |
| 5433 | unit_unref_uid(u, false); | |
| 5434 | ||
| 5435 | return q; | |
| 5436 | } | |
| 5437 | } | |
| 5438 | ||
| 5439 | return r > 0 || q > 0; | |
| 5440 | } | |
| 5441 | ||
| 5442 | int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) { | |
| 5443 | ExecContext *c; | |
| 5444 | int r; | |
| 5445 | ||
| 5446 | assert(u); | |
| 5447 | ||
| 5448 | c = unit_get_exec_context(u); | |
| 5449 | ||
| 5450 | r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false); | |
| 5451 | if (r < 0) | |
| 5452 | return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m"); | |
| 5453 | ||
| 5454 | unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid); | |
| 5455 | unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid); | |
| 5456 | ||
| 5457 | return r; | |
| 5458 | } | |
| 5459 | ||
| 5460 | void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) { | |
| 5461 | int r; | |
| 5462 | ||
| 5463 | assert(u); | |
| 5464 | ||
| 5465 | /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names | |
| 5466 | * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC | |
| 5467 | * objects when no service references the UID/GID anymore. */ | |
| 5468 | ||
| 5469 | r = unit_ref_uid_gid(u, uid, gid); | |
| 5470 | if (r > 0) | |
| 5471 | unit_add_to_dbus_queue(u); | |
| 5472 | } | |
| 5473 | ||
| 5474 | int unit_acquire_invocation_id(Unit *u) { | |
| 5475 | sd_id128_t id; | |
| 5476 | int r; | |
| 5477 | ||
| 5478 | assert(u); | |
| 5479 | ||
| 5480 | r = sd_id128_randomize(&id); | |
| 5481 | if (r < 0) | |
| 5482 | return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m"); | |
| 5483 | ||
| 5484 | r = unit_set_invocation_id(u, id); | |
| 5485 | if (r < 0) | |
| 5486 | return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m"); | |
| 5487 | ||
| 5488 | unit_add_to_dbus_queue(u); | |
| 5489 | return 0; | |
| 5490 | } | |
| 5491 | ||
| 5492 | int unit_set_exec_params(Unit *u, ExecParameters *p) { | |
| 5493 | int r; | |
| 5494 | ||
| 5495 | assert(u); | |
| 5496 | assert(p); | |
| 5497 | ||
| 5498 | /* Copy parameters from manager */ | |
| 5499 | r = manager_get_effective_environment(u->manager, &p->environment); | |
| 5500 | if (r < 0) | |
| 5501 | return r; | |
| 5502 | ||
| 5503 | p->runtime_scope = u->manager->runtime_scope; | |
| 5504 | ||
| 5505 | r = strdup_to(&p->confirm_spawn, manager_get_confirm_spawn(u->manager)); | |
| 5506 | if (r < 0) | |
| 5507 | return r; | |
| 5508 | ||
| 5509 | p->prefix = u->manager->prefix; | |
| 5510 | SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager)); | |
| 5511 | ||
| 5512 | /* Copy parameters from unit */ | |
| 5513 | CGroupRuntime *crt = unit_get_cgroup_runtime(u); | |
| 5514 | p->cgroup_path = crt ? crt->cgroup_path : NULL; | |
| 5515 | SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u)); | |
| 5516 | ||
| 5517 | p->received_credentials_directory = u->manager->received_credentials_directory; | |
| 5518 | p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory; | |
| 5519 | ||
| 5520 | p->shall_confirm_spawn = u->manager->confirm_spawn; | |
| 5521 | ||
| 5522 | p->fallback_smack_process_label = u->manager->defaults.smack_process_label; | |
| 5523 | ||
| 5524 | if (u->manager->restrict_fs && p->bpf_restrict_fs_map_fd < 0) { | |
| 5525 | int fd = bpf_restrict_fs_map_fd(u); | |
| 5526 | if (fd < 0) | |
| 5527 | return fd; | |
| 5528 | ||
| 5529 | p->bpf_restrict_fs_map_fd = fd; | |
| 5530 | } | |
| 5531 | ||
| 5532 | p->user_lookup_fd = u->manager->user_lookup_fds[1]; | |
| 5533 | p->handoff_timestamp_fd = u->manager->handoff_timestamp_fds[1]; | |
| 5534 | if (UNIT_VTABLE(u)->notify_pidref) | |
| 5535 | p->pidref_transport_fd = u->manager->pidref_transport_fds[1]; | |
| 5536 | ||
| 5537 | p->cgroup_id = crt ? crt->cgroup_id : 0; | |
| 5538 | p->invocation_id = u->invocation_id; | |
| 5539 | sd_id128_to_string(p->invocation_id, p->invocation_id_string); | |
| 5540 | p->unit_id = strdup(u->id); | |
| 5541 | if (!p->unit_id) | |
| 5542 | return -ENOMEM; | |
| 5543 | ||
| 5544 | p->debug_invocation = u->debug_invocation; | |
| 5545 | ||
| 5546 | return 0; | |
| 5547 | } | |
| 5548 | ||
| 5549 | int unit_fork_helper_process(Unit *u, const char *name, bool into_cgroup, PidRef *ret) { | |
| 5550 | CGroupRuntime *crt = NULL; | |
| 5551 | pid_t pid; | |
| 5552 | int r; | |
| 5553 | ||
| 5554 | assert(u); | |
| 5555 | assert(ret); | |
| 5556 | ||
| 5557 | /* Forks off a helper process and makes sure it is a member of the unit's cgroup, if configured to | |
| 5558 | * do so. Returns == 0 in the child, and > 0 in the parent. The pid parameter is always filled in | |
| 5559 | * with the child's PID. */ | |
| 5560 | ||
| 5561 | if (into_cgroup) { | |
| 5562 | r = unit_realize_cgroup(u); | |
| 5563 | if (r < 0) | |
| 5564 | return r; | |
| 5565 | ||
| 5566 | crt = unit_get_cgroup_runtime(u); | |
| 5567 | } | |
| 5568 | ||
| 5569 | r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid); | |
| 5570 | if (r < 0) | |
| 5571 | return r; | |
| 5572 | if (r > 0) { | |
| 5573 | _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL; | |
| 5574 | int q; | |
| 5575 | ||
| 5576 | /* Parent */ | |
| 5577 | ||
| 5578 | q = pidref_set_pid(&pidref, pid); | |
| 5579 | if (q < 0) | |
| 5580 | return q; | |
| 5581 | ||
| 5582 | *ret = TAKE_PIDREF(pidref); | |
| 5583 | return r; | |
| 5584 | } | |
| 5585 | ||
| 5586 | /* Child */ | |
| 5587 | ||
| 5588 | (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE); | |
| 5589 | (void) ignore_signals(SIGPIPE); | |
| 5590 | ||
| 5591 | if (crt && crt->cgroup_path) { | |
| 5592 | r = cg_attach(crt->cgroup_path, 0); | |
| 5593 | if (r < 0) { | |
| 5594 | log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(crt->cgroup_path)); | |
| 5595 | _exit(EXIT_CGROUP); | |
| 5596 | } | |
| 5597 | } | |
| 5598 | ||
| 5599 | return 0; | |
| 5600 | } | |
| 5601 | ||
| 5602 | int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) { | |
| 5603 | _cleanup_(pidref_done) PidRef pid = PIDREF_NULL; | |
| 5604 | int r; | |
| 5605 | ||
| 5606 | assert(u); | |
| 5607 | assert(ret_pid); | |
| 5608 | ||
| 5609 | r = unit_fork_helper_process(u, "(sd-rmrf)", /* into_cgroup= */ true, &pid); | |
| 5610 | if (r < 0) | |
| 5611 | return r; | |
| 5612 | if (r == 0) { | |
| 5613 | int ret = EXIT_SUCCESS; | |
| 5614 | ||
| 5615 | STRV_FOREACH(i, paths) { | |
| 5616 | r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK); | |
| 5617 | if (r < 0) { | |
| 5618 | log_error_errno(r, "Failed to remove '%s': %m", *i); | |
| 5619 | ret = EXIT_FAILURE; | |
| 5620 | } | |
| 5621 | } | |
| 5622 | ||
| 5623 | _exit(ret); | |
| 5624 | } | |
| 5625 | ||
| 5626 | r = unit_watch_pidref(u, &pid, /* exclusive= */ true); | |
| 5627 | if (r < 0) | |
| 5628 | return r; | |
| 5629 | ||
| 5630 | *ret_pid = TAKE_PIDREF(pid); | |
| 5631 | return 0; | |
| 5632 | } | |
| 5633 | ||
| 5634 | static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) { | |
| 5635 | assert(deps); | |
| 5636 | assert(other); | |
| 5637 | ||
| 5638 | if (di.origin_mask == 0 && di.destination_mask == 0) | |
| 5639 | /* No bit set anymore, let's drop the whole entry */ | |
| 5640 | assert_se(hashmap_remove(deps, other)); | |
| 5641 | else | |
| 5642 | /* Mask was reduced, let's update the entry */ | |
| 5643 | assert_se(hashmap_update(deps, other, di.data) == 0); | |
| 5644 | } | |
| 5645 | ||
| 5646 | void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) { | |
| 5647 | Hashmap *deps; | |
| 5648 | assert(u); | |
| 5649 | ||
| 5650 | /* Removes all dependencies u has on other units marked for ownership by 'mask'. */ | |
| 5651 | ||
| 5652 | if (mask == 0) | |
| 5653 | return; | |
| 5654 | ||
| 5655 | HASHMAP_FOREACH(deps, u->dependencies) { | |
| 5656 | bool done; | |
| 5657 | ||
| 5658 | do { | |
| 5659 | UnitDependencyInfo di; | |
| 5660 | Unit *other; | |
| 5661 | ||
| 5662 | done = true; | |
| 5663 | ||
| 5664 | HASHMAP_FOREACH_KEY(di.data, other, deps) { | |
| 5665 | Hashmap *other_deps; | |
| 5666 | ||
| 5667 | if (FLAGS_SET(~mask, di.origin_mask)) | |
| 5668 | continue; | |
| 5669 | ||
| 5670 | di.origin_mask &= ~mask; | |
| 5671 | unit_update_dependency_mask(deps, other, di); | |
| 5672 | ||
| 5673 | /* We updated the dependency from our unit to the other unit now. But most | |
| 5674 | * dependencies imply a reverse dependency. Hence, let's delete that one | |
| 5675 | * too. For that we go through all dependency types on the other unit and | |
| 5676 | * delete all those which point to us and have the right mask set. */ | |
| 5677 | ||
| 5678 | HASHMAP_FOREACH(other_deps, other->dependencies) { | |
| 5679 | UnitDependencyInfo dj; | |
| 5680 | ||
| 5681 | dj.data = hashmap_get(other_deps, u); | |
| 5682 | if (FLAGS_SET(~mask, dj.destination_mask)) | |
| 5683 | continue; | |
| 5684 | ||
| 5685 | dj.destination_mask &= ~mask; | |
| 5686 | unit_update_dependency_mask(other_deps, u, dj); | |
| 5687 | } | |
| 5688 | ||
| 5689 | unit_add_to_gc_queue(other); | |
| 5690 | ||
| 5691 | /* The unit 'other' may not be wanted by the unit 'u'. */ | |
| 5692 | unit_submit_to_stop_when_unneeded_queue(other); | |
| 5693 | ||
| 5694 | u->dependency_generation++; | |
| 5695 | other->dependency_generation++; | |
| 5696 | ||
| 5697 | done = false; | |
| 5698 | break; | |
| 5699 | } | |
| 5700 | ||
| 5701 | } while (!done); | |
| 5702 | } | |
| 5703 | } | |
| 5704 | ||
| 5705 | static int unit_get_invocation_path(Unit *u, char **ret) { | |
| 5706 | char *p; | |
| 5707 | int r; | |
| 5708 | ||
| 5709 | assert(u); | |
| 5710 | assert(ret); | |
| 5711 | ||
| 5712 | if (MANAGER_IS_SYSTEM(u->manager)) | |
| 5713 | p = strjoin("/run/systemd/units/invocation:", u->id); | |
| 5714 | else { | |
| 5715 | _cleanup_free_ char *user_path = NULL; | |
| 5716 | ||
| 5717 | r = xdg_user_runtime_dir("/systemd/units/invocation:", &user_path); | |
| 5718 | if (r < 0) | |
| 5719 | return r; | |
| 5720 | ||
| 5721 | p = strjoin(user_path, u->id); | |
| 5722 | } | |
| 5723 | if (!p) | |
| 5724 | return -ENOMEM; | |
| 5725 | ||
| 5726 | *ret = p; | |
| 5727 | return 0; | |
| 5728 | } | |
| 5729 | ||
| 5730 | static int unit_export_invocation_id(Unit *u) { | |
| 5731 | _cleanup_free_ char *p = NULL; | |
| 5732 | int r; | |
| 5733 | ||
| 5734 | assert(u); | |
| 5735 | ||
| 5736 | if (u->exported_invocation_id) | |
| 5737 | return 0; | |
| 5738 | ||
| 5739 | if (sd_id128_is_null(u->invocation_id)) | |
| 5740 | return 0; | |
| 5741 | ||
| 5742 | r = unit_get_invocation_path(u, &p); | |
| 5743 | if (r < 0) | |
| 5744 | return log_unit_debug_errno(u, r, "Failed to get invocation path: %m"); | |
| 5745 | ||
| 5746 | r = symlinkat_atomic_full(u->invocation_id_string, AT_FDCWD, p, SYMLINK_LABEL); | |
| 5747 | if (r < 0) | |
| 5748 | return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p); | |
| 5749 | ||
| 5750 | u->exported_invocation_id = true; | |
| 5751 | return 0; | |
| 5752 | } | |
| 5753 | ||
| 5754 | static int unit_export_log_level_max(Unit *u, int log_level_max, bool overwrite) { | |
| 5755 | const char *p; | |
| 5756 | char buf[2]; | |
| 5757 | int r; | |
| 5758 | ||
| 5759 | assert(u); | |
| 5760 | ||
| 5761 | /* When the debug_invocation logic runs, overwrite will be true as we always want to switch the max | |
| 5762 | * log level that the journal applies, and we want to always restore the previous level once done */ | |
| 5763 | ||
| 5764 | if (!overwrite && u->exported_log_level_max) | |
| 5765 | return 0; | |
| 5766 | ||
| 5767 | if (log_level_max < 0) | |
| 5768 | return 0; | |
| 5769 | ||
| 5770 | assert(log_level_max <= 7); | |
| 5771 | ||
| 5772 | buf[0] = '0' + log_level_max; | |
| 5773 | buf[1] = 0; | |
| 5774 | ||
| 5775 | p = strjoina("/run/systemd/units/log-level-max:", u->id); | |
| 5776 | r = symlink_atomic(buf, p); | |
| 5777 | if (r < 0) | |
| 5778 | return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p); | |
| 5779 | ||
| 5780 | u->exported_log_level_max = true; | |
| 5781 | return 0; | |
| 5782 | } | |
| 5783 | ||
| 5784 | static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) { | |
| 5785 | _cleanup_close_ int fd = -EBADF; | |
| 5786 | struct iovec *iovec; | |
| 5787 | const char *p; | |
| 5788 | char *pattern; | |
| 5789 | le64_t *sizes; | |
| 5790 | ssize_t n; | |
| 5791 | int r; | |
| 5792 | ||
| 5793 | if (u->exported_log_extra_fields) | |
| 5794 | return 0; | |
| 5795 | ||
| 5796 | if (c->n_log_extra_fields <= 0) | |
| 5797 | return 0; | |
| 5798 | ||
| 5799 | sizes = newa(le64_t, c->n_log_extra_fields); | |
| 5800 | iovec = newa(struct iovec, c->n_log_extra_fields * 2); | |
| 5801 | ||
| 5802 | for (size_t i = 0; i < c->n_log_extra_fields; i++) { | |
| 5803 | sizes[i] = htole64(c->log_extra_fields[i].iov_len); | |
| 5804 | ||
| 5805 | iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t)); | |
| 5806 | iovec[i*2+1] = c->log_extra_fields[i]; | |
| 5807 | } | |
| 5808 | ||
| 5809 | p = strjoina("/run/systemd/units/log-extra-fields:", u->id); | |
| 5810 | pattern = strjoina(p, ".XXXXXX"); | |
| 5811 | ||
| 5812 | fd = mkostemp_safe(pattern); | |
| 5813 | if (fd < 0) | |
| 5814 | return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p); | |
| 5815 | ||
| 5816 | n = writev(fd, iovec, c->n_log_extra_fields*2); | |
| 5817 | if (n < 0) { | |
| 5818 | r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m"); | |
| 5819 | goto fail; | |
| 5820 | } | |
| 5821 | ||
| 5822 | (void) fchmod(fd, 0644); | |
| 5823 | ||
| 5824 | if (rename(pattern, p) < 0) { | |
| 5825 | r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m"); | |
| 5826 | goto fail; | |
| 5827 | } | |
| 5828 | ||
| 5829 | u->exported_log_extra_fields = true; | |
| 5830 | return 0; | |
| 5831 | ||
| 5832 | fail: | |
| 5833 | (void) unlink(pattern); | |
| 5834 | return r; | |
| 5835 | } | |
| 5836 | ||
| 5837 | static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) { | |
| 5838 | _cleanup_free_ char *buf = NULL; | |
| 5839 | const char *p; | |
| 5840 | int r; | |
| 5841 | ||
| 5842 | assert(u); | |
| 5843 | assert(c); | |
| 5844 | ||
| 5845 | if (u->exported_log_ratelimit_interval) | |
| 5846 | return 0; | |
| 5847 | ||
| 5848 | if (c->log_ratelimit.interval == 0) | |
| 5849 | return 0; | |
| 5850 | ||
| 5851 | p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id); | |
| 5852 | ||
| 5853 | if (asprintf(&buf, "%" PRIu64, c->log_ratelimit.interval) < 0) | |
| 5854 | return log_oom(); | |
| 5855 | ||
| 5856 | r = symlink_atomic(buf, p); | |
| 5857 | if (r < 0) | |
| 5858 | return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p); | |
| 5859 | ||
| 5860 | u->exported_log_ratelimit_interval = true; | |
| 5861 | return 0; | |
| 5862 | } | |
| 5863 | ||
| 5864 | static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) { | |
| 5865 | _cleanup_free_ char *buf = NULL; | |
| 5866 | const char *p; | |
| 5867 | int r; | |
| 5868 | ||
| 5869 | assert(u); | |
| 5870 | assert(c); | |
| 5871 | ||
| 5872 | if (u->exported_log_ratelimit_burst) | |
| 5873 | return 0; | |
| 5874 | ||
| 5875 | if (c->log_ratelimit.burst == 0) | |
| 5876 | return 0; | |
| 5877 | ||
| 5878 | p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id); | |
| 5879 | ||
| 5880 | if (asprintf(&buf, "%u", c->log_ratelimit.burst) < 0) | |
| 5881 | return log_oom(); | |
| 5882 | ||
| 5883 | r = symlink_atomic(buf, p); | |
| 5884 | if (r < 0) | |
| 5885 | return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p); | |
| 5886 | ||
| 5887 | u->exported_log_ratelimit_burst = true; | |
| 5888 | return 0; | |
| 5889 | } | |
| 5890 | ||
| 5891 | void unit_export_state_files(Unit *u) { | |
| 5892 | const ExecContext *c; | |
| 5893 | ||
| 5894 | assert(u); | |
| 5895 | ||
| 5896 | if (!u->id) | |
| 5897 | return; | |
| 5898 | ||
| 5899 | if (MANAGER_IS_TEST_RUN(u->manager)) | |
| 5900 | return; | |
| 5901 | ||
| 5902 | /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data | |
| 5903 | * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as | |
| 5904 | * the IPC system itself and PID 1 also log to the journal. | |
| 5905 | * | |
| 5906 | * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as | |
| 5907 | * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really | |
| 5908 | * apply to communication between the journal and systemd, as we assume that these two daemons live in the same | |
| 5909 | * namespace at least. | |
| 5910 | * | |
| 5911 | * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work | |
| 5912 | * better for storing small bits of data, in particular as we can write them with two system calls, and read | |
| 5913 | * them with one. */ | |
| 5914 | ||
| 5915 | (void) unit_export_invocation_id(u); | |
| 5916 | ||
| 5917 | if (!MANAGER_IS_SYSTEM(u->manager)) | |
| 5918 | return; | |
| 5919 | ||
| 5920 | c = unit_get_exec_context(u); | |
| 5921 | if (c) { | |
| 5922 | (void) unit_export_log_level_max(u, c->log_level_max, /* overwrite= */ false); | |
| 5923 | (void) unit_export_log_extra_fields(u, c); | |
| 5924 | (void) unit_export_log_ratelimit_interval(u, c); | |
| 5925 | (void) unit_export_log_ratelimit_burst(u, c); | |
| 5926 | } | |
| 5927 | } | |
| 5928 | ||
| 5929 | void unit_unlink_state_files(Unit *u) { | |
| 5930 | const char *p; | |
| 5931 | ||
| 5932 | assert(u); | |
| 5933 | ||
| 5934 | if (!u->id) | |
| 5935 | return; | |
| 5936 | ||
| 5937 | /* Undoes the effect of unit_export_state() */ | |
| 5938 | ||
| 5939 | if (u->exported_invocation_id) { | |
| 5940 | _cleanup_free_ char *invocation_path = NULL; | |
| 5941 | int r = unit_get_invocation_path(u, &invocation_path); | |
| 5942 | if (r >= 0) { | |
| 5943 | (void) unlink(invocation_path); | |
| 5944 | u->exported_invocation_id = false; | |
| 5945 | } | |
| 5946 | } | |
| 5947 | ||
| 5948 | if (!MANAGER_IS_SYSTEM(u->manager)) | |
| 5949 | return; | |
| 5950 | ||
| 5951 | if (u->exported_log_level_max) { | |
| 5952 | p = strjoina("/run/systemd/units/log-level-max:", u->id); | |
| 5953 | (void) unlink(p); | |
| 5954 | ||
| 5955 | u->exported_log_level_max = false; | |
| 5956 | } | |
| 5957 | ||
| 5958 | if (u->exported_log_extra_fields) { | |
| 5959 | p = strjoina("/run/systemd/units/extra-fields:", u->id); | |
| 5960 | (void) unlink(p); | |
| 5961 | ||
| 5962 | u->exported_log_extra_fields = false; | |
| 5963 | } | |
| 5964 | ||
| 5965 | if (u->exported_log_ratelimit_interval) { | |
| 5966 | p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id); | |
| 5967 | (void) unlink(p); | |
| 5968 | ||
| 5969 | u->exported_log_ratelimit_interval = false; | |
| 5970 | } | |
| 5971 | ||
| 5972 | if (u->exported_log_ratelimit_burst) { | |
| 5973 | p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id); | |
| 5974 | (void) unlink(p); | |
| 5975 | ||
| 5976 | u->exported_log_ratelimit_burst = false; | |
| 5977 | } | |
| 5978 | } | |
| 5979 | ||
| 5980 | int unit_set_debug_invocation(Unit *u, bool enable) { | |
| 5981 | int r; | |
| 5982 | ||
| 5983 | assert(u); | |
| 5984 | ||
| 5985 | if (u->debug_invocation == enable) | |
| 5986 | return 0; /* Nothing to do */ | |
| 5987 | ||
| 5988 | u->debug_invocation = enable; | |
| 5989 | ||
| 5990 | /* Ensure that the new log level is exported for the journal, in place of the previous one */ | |
| 5991 | if (u->exported_log_level_max) { | |
| 5992 | const ExecContext *ec = unit_get_exec_context(u); | |
| 5993 | if (ec) { | |
| 5994 | r = unit_export_log_level_max(u, enable ? LOG_PRI(LOG_DEBUG) : ec->log_level_max, /* overwrite= */ true); | |
| 5995 | if (r < 0) | |
| 5996 | return r; | |
| 5997 | } | |
| 5998 | } | |
| 5999 | ||
| 6000 | return 1; | |
| 6001 | } | |
| 6002 | ||
| 6003 | int unit_prepare_exec(Unit *u) { | |
| 6004 | int r; | |
| 6005 | ||
| 6006 | assert(u); | |
| 6007 | ||
| 6008 | /* Prepares everything so that we can fork of a process for this unit */ | |
| 6009 | ||
| 6010 | r = unit_realize_cgroup(u); | |
| 6011 | if (r < 0) | |
| 6012 | return r; | |
| 6013 | ||
| 6014 | CGroupRuntime *crt = unit_get_cgroup_runtime(u); | |
| 6015 | if (crt && crt->reset_accounting) { | |
| 6016 | (void) unit_reset_accounting(u); | |
| 6017 | crt->reset_accounting = false; | |
| 6018 | } | |
| 6019 | ||
| 6020 | unit_export_state_files(u); | |
| 6021 | ||
| 6022 | r = unit_setup_exec_runtime(u); | |
| 6023 | if (r < 0) | |
| 6024 | return r; | |
| 6025 | ||
| 6026 | return 0; | |
| 6027 | } | |
| 6028 | ||
| 6029 | static int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) { | |
| 6030 | const Unit *u = ASSERT_PTR(userdata); | |
| 6031 | _cleanup_free_ char *comm = NULL; | |
| 6032 | ||
| 6033 | assert(pidref_is_set(pid)); | |
| 6034 | ||
| 6035 | (void) pidref_get_comm(pid, &comm); | |
| 6036 | ||
| 6037 | if (ignore_leftover_process(comm)) | |
| 6038 | return 0; | |
| 6039 | ||
| 6040 | /* During start we print a warning */ | |
| 6041 | ||
| 6042 | log_unit_warning(u, | |
| 6043 | "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n" | |
| 6044 | "This usually indicates unclean termination of a previous run, or service implementation deficiencies.", | |
| 6045 | pid->pid, strna(comm)); | |
| 6046 | ||
| 6047 | return 1; | |
| 6048 | } | |
| 6049 | ||
| 6050 | static int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) { | |
| 6051 | const Unit *u = ASSERT_PTR(userdata); | |
| 6052 | _cleanup_free_ char *comm = NULL; | |
| 6053 | ||
| 6054 | assert(pidref_is_set(pid)); | |
| 6055 | ||
| 6056 | (void) pidref_get_comm(pid, &comm); | |
| 6057 | ||
| 6058 | if (ignore_leftover_process(comm)) | |
| 6059 | return 0; | |
| 6060 | ||
| 6061 | /* During stop we only print an informational message */ | |
| 6062 | ||
| 6063 | log_unit_info(u, | |
| 6064 | "Unit process " PID_FMT " (%s) remains running after unit stopped.", | |
| 6065 | pid->pid, strna(comm)); | |
| 6066 | ||
| 6067 | return 1; | |
| 6068 | } | |
| 6069 | ||
| 6070 | int unit_warn_leftover_processes(Unit *u, bool start) { | |
| 6071 | _cleanup_free_ char *cgroup = NULL; | |
| 6072 | int r; | |
| 6073 | ||
| 6074 | assert(u); | |
| 6075 | ||
| 6076 | r = unit_get_cgroup_path_with_fallback(u, &cgroup); | |
| 6077 | if (r < 0) | |
| 6078 | return r; | |
| 6079 | ||
| 6080 | return cg_kill_recursive( | |
| 6081 | cgroup, | |
| 6082 | /* sig= */ 0, | |
| 6083 | /* flags= */ 0, | |
| 6084 | /* killed_pids= */ NULL, | |
| 6085 | start ? unit_log_leftover_process_start : unit_log_leftover_process_stop, | |
| 6086 | u); | |
| 6087 | } | |
| 6088 | ||
| 6089 | bool unit_needs_console(Unit *u) { | |
| 6090 | ExecContext *ec; | |
| 6091 | UnitActiveState state; | |
| 6092 | ||
| 6093 | assert(u); | |
| 6094 | ||
| 6095 | state = unit_active_state(u); | |
| 6096 | ||
| 6097 | if (UNIT_IS_INACTIVE_OR_FAILED(state)) | |
| 6098 | return false; | |
| 6099 | ||
| 6100 | if (UNIT_VTABLE(u)->needs_console) | |
| 6101 | return UNIT_VTABLE(u)->needs_console(u); | |
| 6102 | ||
| 6103 | /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */ | |
| 6104 | ec = unit_get_exec_context(u); | |
| 6105 | if (!ec) | |
| 6106 | return false; | |
| 6107 | ||
| 6108 | return exec_context_may_touch_console(ec); | |
| 6109 | } | |
| 6110 | ||
| 6111 | int unit_pid_attachable(Unit *u, PidRef *pid, sd_bus_error *error) { | |
| 6112 | int r; | |
| 6113 | ||
| 6114 | assert(u); | |
| 6115 | ||
| 6116 | /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself, | |
| 6117 | * and not a kernel thread either */ | |
| 6118 | ||
| 6119 | /* First, a simple range check */ | |
| 6120 | if (!pidref_is_set(pid)) | |
| 6121 | return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid."); | |
| 6122 | ||
| 6123 | /* Some extra safety check */ | |
| 6124 | if (pid->pid == 1 || pidref_is_self(pid)) | |
| 6125 | return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid); | |
| 6126 | ||
| 6127 | /* Don't even begin to bother with kernel threads */ | |
| 6128 | r = pidref_is_kernel_thread(pid); | |
| 6129 | if (r == -ESRCH) | |
| 6130 | return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid); | |
| 6131 | if (r < 0) | |
| 6132 | return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid); | |
| 6133 | if (r > 0) | |
| 6134 | return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid); | |
| 6135 | ||
| 6136 | return 0; | |
| 6137 | } | |
| 6138 | ||
| 6139 | int unit_get_log_level_max(const Unit *u) { | |
| 6140 | if (u) { | |
| 6141 | if (u->debug_invocation) | |
| 6142 | return LOG_DEBUG; | |
| 6143 | ||
| 6144 | ExecContext *ec = unit_get_exec_context(u); | |
| 6145 | if (ec && ec->log_level_max >= 0) | |
| 6146 | return ec->log_level_max; | |
| 6147 | } | |
| 6148 | ||
| 6149 | return log_get_max_level(); | |
| 6150 | } | |
| 6151 | ||
| 6152 | bool unit_log_level_test(const Unit *u, int level) { | |
| 6153 | assert(u); | |
| 6154 | return LOG_PRI(level) <= unit_get_log_level_max(u); | |
| 6155 | } | |
| 6156 | ||
| 6157 | void unit_log_success(Unit *u) { | |
| 6158 | assert(u); | |
| 6159 | ||
| 6160 | /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode. | |
| 6161 | * This message has low information value for regular users and it might be a bit overwhelming on a system with | |
| 6162 | * a lot of devices. */ | |
| 6163 | log_unit_struct(u, | |
| 6164 | MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO, | |
| 6165 | LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SUCCESS_STR), | |
| 6166 | LOG_UNIT_INVOCATION_ID(u), | |
| 6167 | LOG_UNIT_MESSAGE(u, "Deactivated successfully.")); | |
| 6168 | } | |
| 6169 | ||
| 6170 | void unit_log_failure(Unit *u, const char *result) { | |
| 6171 | assert(u); | |
| 6172 | assert(result); | |
| 6173 | ||
| 6174 | log_unit_struct(u, LOG_WARNING, | |
| 6175 | LOG_MESSAGE_ID(SD_MESSAGE_UNIT_FAILURE_RESULT_STR), | |
| 6176 | LOG_UNIT_INVOCATION_ID(u), | |
| 6177 | LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result), | |
| 6178 | LOG_ITEM("UNIT_RESULT=%s", result)); | |
| 6179 | } | |
| 6180 | ||
| 6181 | void unit_log_skip(Unit *u, const char *result) { | |
| 6182 | assert(u); | |
| 6183 | assert(result); | |
| 6184 | ||
| 6185 | log_unit_struct(u, LOG_INFO, | |
| 6186 | LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SKIPPED_STR), | |
| 6187 | LOG_UNIT_INVOCATION_ID(u), | |
| 6188 | LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result), | |
| 6189 | LOG_ITEM("UNIT_RESULT=%s", result)); | |
| 6190 | } | |
| 6191 | ||
| 6192 | void unit_log_process_exit( | |
| 6193 | Unit *u, | |
| 6194 | const char *kind, | |
| 6195 | const char *command, | |
| 6196 | bool success, | |
| 6197 | int code, | |
| 6198 | int status) { | |
| 6199 | ||
| 6200 | int level; | |
| 6201 | ||
| 6202 | assert(u); | |
| 6203 | assert(kind); | |
| 6204 | ||
| 6205 | /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure | |
| 6206 | * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption | |
| 6207 | * that the service already logged the reason at a higher log level on its own. Otherwise, make it a | |
| 6208 | * WARNING. */ | |
| 6209 | if (success) | |
| 6210 | level = LOG_DEBUG; | |
| 6211 | else if (code == CLD_EXITED) | |
| 6212 | level = LOG_NOTICE; | |
| 6213 | else | |
| 6214 | level = LOG_WARNING; | |
| 6215 | ||
| 6216 | log_unit_struct(u, level, | |
| 6217 | LOG_MESSAGE_ID(SD_MESSAGE_UNIT_PROCESS_EXIT_STR), | |
| 6218 | LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s", | |
| 6219 | kind, | |
| 6220 | sigchld_code_to_string(code), status, | |
| 6221 | strna(code == CLD_EXITED | |
| 6222 | ? exit_status_to_string(status, EXIT_STATUS_FULL) | |
| 6223 | : signal_to_string(status)), | |
| 6224 | success ? " (success)" : ""), | |
| 6225 | LOG_ITEM("EXIT_CODE=%s", sigchld_code_to_string(code)), | |
| 6226 | LOG_ITEM("EXIT_STATUS=%i", status), | |
| 6227 | LOG_ITEM("COMMAND=%s", strna(command)), | |
| 6228 | LOG_UNIT_INVOCATION_ID(u)); | |
| 6229 | } | |
| 6230 | ||
| 6231 | int unit_exit_status(Unit *u) { | |
| 6232 | assert(u); | |
| 6233 | ||
| 6234 | /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range | |
| 6235 | * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA | |
| 6236 | * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main | |
| 6237 | * service process has exited abnormally (signal/coredump). */ | |
| 6238 | ||
| 6239 | if (!UNIT_VTABLE(u)->exit_status) | |
| 6240 | return -EOPNOTSUPP; | |
| 6241 | ||
| 6242 | return UNIT_VTABLE(u)->exit_status(u); | |
| 6243 | } | |
| 6244 | ||
| 6245 | int unit_failure_action_exit_status(Unit *u) { | |
| 6246 | int r; | |
| 6247 | ||
| 6248 | assert(u); | |
| 6249 | ||
| 6250 | /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */ | |
| 6251 | ||
| 6252 | if (u->failure_action_exit_status >= 0) | |
| 6253 | return u->failure_action_exit_status; | |
| 6254 | ||
| 6255 | r = unit_exit_status(u); | |
| 6256 | if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */ | |
| 6257 | return 255; | |
| 6258 | ||
| 6259 | return r; | |
| 6260 | } | |
| 6261 | ||
| 6262 | int unit_success_action_exit_status(Unit *u) { | |
| 6263 | int r; | |
| 6264 | ||
| 6265 | assert(u); | |
| 6266 | ||
| 6267 | /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */ | |
| 6268 | ||
| 6269 | if (u->success_action_exit_status >= 0) | |
| 6270 | return u->success_action_exit_status; | |
| 6271 | ||
| 6272 | r = unit_exit_status(u); | |
| 6273 | if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */ | |
| 6274 | return 255; | |
| 6275 | ||
| 6276 | return r; | |
| 6277 | } | |
| 6278 | ||
| 6279 | int unit_test_trigger_loaded(Unit *u) { | |
| 6280 | Unit *trigger; | |
| 6281 | ||
| 6282 | /* Tests whether the unit to trigger is loaded */ | |
| 6283 | ||
| 6284 | trigger = UNIT_TRIGGER(u); | |
| 6285 | if (!trigger) | |
| 6286 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), | |
| 6287 | "Refusing to start, no unit to trigger."); | |
| 6288 | if (trigger->load_state != UNIT_LOADED) | |
| 6289 | return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), | |
| 6290 | "Refusing to start, unit %s to trigger not loaded.", trigger->id); | |
| 6291 | ||
| 6292 | return 0; | |
| 6293 | } | |
| 6294 | ||
| 6295 | void unit_destroy_runtime_data(Unit *u, const ExecContext *context, bool destroy_runtime_dir) { | |
| 6296 | assert(u); | |
| 6297 | assert(u->manager); | |
| 6298 | assert(context); | |
| 6299 | ||
| 6300 | /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */ | |
| 6301 | if (destroy_runtime_dir && context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO) | |
| 6302 | exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]); | |
| 6303 | ||
| 6304 | exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id); | |
| 6305 | exec_context_destroy_mount_ns_dir(u); | |
| 6306 | } | |
| 6307 | ||
| 6308 | int unit_clean(Unit *u, ExecCleanMask mask) { | |
| 6309 | UnitActiveState state; | |
| 6310 | ||
| 6311 | assert(u); | |
| 6312 | ||
| 6313 | /* Special return values: | |
| 6314 | * | |
| 6315 | * -EOPNOTSUPP → cleaning not supported for this unit type | |
| 6316 | * -EUNATCH → cleaning not defined for this resource type | |
| 6317 | * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has | |
| 6318 | * a job queued or similar | |
| 6319 | */ | |
| 6320 | ||
| 6321 | if (!UNIT_VTABLE(u)->clean) | |
| 6322 | return -EOPNOTSUPP; | |
| 6323 | ||
| 6324 | if (mask == 0) | |
| 6325 | return -EUNATCH; | |
| 6326 | ||
| 6327 | if (u->load_state != UNIT_LOADED) | |
| 6328 | return -EBUSY; | |
| 6329 | ||
| 6330 | if (u->job) | |
| 6331 | return -EBUSY; | |
| 6332 | ||
| 6333 | state = unit_active_state(u); | |
| 6334 | if (state != UNIT_INACTIVE) | |
| 6335 | return -EBUSY; | |
| 6336 | ||
| 6337 | return UNIT_VTABLE(u)->clean(u, mask); | |
| 6338 | } | |
| 6339 | ||
| 6340 | int unit_can_clean(Unit *u, ExecCleanMask *ret) { | |
| 6341 | assert(u); | |
| 6342 | ||
| 6343 | if (!UNIT_VTABLE(u)->clean || | |
| 6344 | u->load_state != UNIT_LOADED) { | |
| 6345 | *ret = 0; | |
| 6346 | return 0; | |
| 6347 | } | |
| 6348 | ||
| 6349 | /* When the clean() method is set, can_clean() really should be set too */ | |
| 6350 | assert(UNIT_VTABLE(u)->can_clean); | |
| 6351 | ||
| 6352 | return UNIT_VTABLE(u)->can_clean(u, ret); | |
| 6353 | } | |
| 6354 | ||
| 6355 | bool unit_can_start_refuse_manual(Unit *u) { | |
| 6356 | return unit_can_start(u) && !u->refuse_manual_start; | |
| 6357 | } | |
| 6358 | ||
| 6359 | bool unit_can_stop_refuse_manual(Unit *u) { | |
| 6360 | return unit_can_stop(u) && !u->refuse_manual_stop; | |
| 6361 | } | |
| 6362 | ||
| 6363 | bool unit_can_isolate_refuse_manual(Unit *u) { | |
| 6364 | return unit_can_isolate(u) && !u->refuse_manual_start; | |
| 6365 | } | |
| 6366 | ||
| 6367 | void unit_next_freezer_state(Unit *u, FreezerAction action, FreezerState *ret_next, FreezerState *ret_objective) { | |
| 6368 | FreezerState current, parent, next, objective; | |
| 6369 | ||
| 6370 | assert(u); | |
| 6371 | assert(action >= 0); | |
| 6372 | assert(action < _FREEZER_ACTION_MAX); | |
| 6373 | assert(ret_next); | |
| 6374 | assert(ret_objective); | |
| 6375 | ||
| 6376 | /* This function determines the correct freezer state transitions for a unit | |
| 6377 | * given the action being requested. It returns the next state, and also the "objective", | |
| 6378 | * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we | |
| 6379 | * ultimately want to achieve. */ | |
| 6380 | ||
| 6381 | current = u->freezer_state; | |
| 6382 | ||
| 6383 | Unit *slice = UNIT_GET_SLICE(u); | |
| 6384 | if (slice) | |
| 6385 | parent = slice->freezer_state; | |
| 6386 | else | |
| 6387 | parent = FREEZER_RUNNING; | |
| 6388 | ||
| 6389 | switch (action) { | |
| 6390 | ||
| 6391 | case FREEZER_FREEZE: | |
| 6392 | /* We always "promote" a freeze initiated by parent into a normal freeze */ | |
| 6393 | if (IN_SET(current, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT)) | |
| 6394 | next = FREEZER_FROZEN; | |
| 6395 | else | |
| 6396 | next = FREEZER_FREEZING; | |
| 6397 | break; | |
| 6398 | ||
| 6399 | case FREEZER_THAW: | |
| 6400 | /* Thawing is the most complicated operation here, because we can't thaw a unit | |
| 6401 | * if its parent is frozen. So we instead "demote" a normal freeze into a freeze | |
| 6402 | * initiated by parent if the parent is frozen */ | |
| 6403 | if (IN_SET(current, FREEZER_RUNNING, FREEZER_THAWING, | |
| 6404 | FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT)) /* Should usually be refused by unit_freezer_action */ | |
| 6405 | next = current; | |
| 6406 | else if (current == FREEZER_FREEZING) { | |
| 6407 | if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING)) | |
| 6408 | next = FREEZER_THAWING; | |
| 6409 | else | |
| 6410 | next = FREEZER_FREEZING_BY_PARENT; | |
| 6411 | } else if (current == FREEZER_FROZEN) { | |
| 6412 | if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING)) | |
| 6413 | next = FREEZER_THAWING; | |
| 6414 | else | |
| 6415 | next = FREEZER_FROZEN_BY_PARENT; | |
| 6416 | } else | |
| 6417 | assert_not_reached(); | |
| 6418 | break; | |
| 6419 | ||
| 6420 | case FREEZER_PARENT_FREEZE: | |
| 6421 | /* We need to avoid accidentally demoting units frozen manually */ | |
| 6422 | if (IN_SET(current, FREEZER_FREEZING, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT)) | |
| 6423 | next = current; | |
| 6424 | else | |
| 6425 | next = FREEZER_FREEZING_BY_PARENT; | |
| 6426 | break; | |
| 6427 | ||
| 6428 | case FREEZER_PARENT_THAW: | |
| 6429 | /* We don't want to thaw units from a parent if they were frozen | |
| 6430 | * manually, so for such units this action is a no-op */ | |
| 6431 | if (IN_SET(current, FREEZER_RUNNING, FREEZER_FREEZING, FREEZER_FROZEN)) | |
| 6432 | next = current; | |
| 6433 | else | |
| 6434 | next = FREEZER_THAWING; | |
| 6435 | break; | |
| 6436 | ||
| 6437 | default: | |
| 6438 | assert_not_reached(); | |
| 6439 | } | |
| 6440 | ||
| 6441 | objective = freezer_state_objective(next); | |
| 6442 | assert(IN_SET(objective, FREEZER_RUNNING, FREEZER_FROZEN)); | |
| 6443 | ||
| 6444 | *ret_next = next; | |
| 6445 | *ret_objective = objective; | |
| 6446 | } | |
| 6447 | ||
| 6448 | bool unit_can_freeze(const Unit *u) { | |
| 6449 | assert(u); | |
| 6450 | ||
| 6451 | if (unit_has_name(u, SPECIAL_ROOT_SLICE) || unit_has_name(u, SPECIAL_INIT_SCOPE)) | |
| 6452 | return false; | |
| 6453 | ||
| 6454 | if (UNIT_VTABLE(u)->can_freeze) | |
| 6455 | return UNIT_VTABLE(u)->can_freeze(u); | |
| 6456 | ||
| 6457 | return UNIT_VTABLE(u)->freezer_action; | |
| 6458 | } | |
| 6459 | ||
| 6460 | void unit_set_freezer_state(Unit *u, FreezerState state) { | |
| 6461 | assert(u); | |
| 6462 | assert(state >= 0); | |
| 6463 | assert(state < _FREEZER_STATE_MAX); | |
| 6464 | ||
| 6465 | if (u->freezer_state == state) | |
| 6466 | return; | |
| 6467 | ||
| 6468 | log_unit_debug(u, "Freezer state changed %s -> %s", | |
| 6469 | freezer_state_to_string(u->freezer_state), freezer_state_to_string(state)); | |
| 6470 | ||
| 6471 | u->freezer_state = state; | |
| 6472 | ||
| 6473 | unit_add_to_dbus_queue(u); | |
| 6474 | } | |
| 6475 | ||
| 6476 | void unit_freezer_complete(Unit *u, FreezerState kernel_state) { | |
| 6477 | bool expected; | |
| 6478 | ||
| 6479 | assert(u); | |
| 6480 | assert(IN_SET(kernel_state, FREEZER_RUNNING, FREEZER_FROZEN)); | |
| 6481 | ||
| 6482 | expected = IN_SET(u->freezer_state, FREEZER_RUNNING, FREEZER_THAWING) == (kernel_state == FREEZER_RUNNING); | |
| 6483 | ||
| 6484 | unit_set_freezer_state(u, expected ? freezer_state_finish(u->freezer_state) : kernel_state); | |
| 6485 | log_unit_info(u, "Unit now %s.", u->freezer_state == FREEZER_RUNNING ? "thawed" : | |
| 6486 | freezer_state_to_string(u->freezer_state)); | |
| 6487 | ||
| 6488 | /* If the cgroup's final state is against what's requested by us, report as canceled. */ | |
| 6489 | bus_unit_send_pending_freezer_message(u, /* canceled = */ !expected); | |
| 6490 | } | |
| 6491 | ||
| 6492 | int unit_freezer_action(Unit *u, FreezerAction action) { | |
| 6493 | UnitActiveState s; | |
| 6494 | int r; | |
| 6495 | ||
| 6496 | assert(u); | |
| 6497 | assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW)); | |
| 6498 | ||
| 6499 | if (!unit_can_freeze(u)) | |
| 6500 | return -EOPNOTSUPP; | |
| 6501 | ||
| 6502 | if (u->job) | |
| 6503 | return -EBUSY; | |
| 6504 | ||
| 6505 | if (u->load_state != UNIT_LOADED) | |
| 6506 | return -EHOSTDOWN; | |
| 6507 | ||
| 6508 | s = unit_active_state(u); | |
| 6509 | if (s != UNIT_ACTIVE) | |
| 6510 | return -EHOSTDOWN; | |
| 6511 | ||
| 6512 | if (action == FREEZER_FREEZE && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT)) | |
| 6513 | return -EALREADY; | |
| 6514 | if (action == FREEZER_THAW && u->freezer_state == FREEZER_THAWING) | |
| 6515 | return -EALREADY; | |
| 6516 | if (action == FREEZER_THAW && IN_SET(u->freezer_state, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT)) | |
| 6517 | return -EDEADLK; | |
| 6518 | ||
| 6519 | r = UNIT_VTABLE(u)->freezer_action(u, action); | |
| 6520 | if (r <= 0) | |
| 6521 | return r; | |
| 6522 | ||
| 6523 | assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT, FREEZER_THAWING)); | |
| 6524 | return 1; | |
| 6525 | } | |
| 6526 | ||
| 6527 | Condition *unit_find_failed_condition(Unit *u) { | |
| 6528 | Condition *failed_trigger = NULL; | |
| 6529 | bool has_succeeded_trigger = false; | |
| 6530 | ||
| 6531 | if (u->condition_result) | |
| 6532 | return NULL; | |
| 6533 | ||
| 6534 | LIST_FOREACH(conditions, c, u->conditions) | |
| 6535 | if (c->trigger) { | |
| 6536 | if (c->result == CONDITION_SUCCEEDED) | |
| 6537 | has_succeeded_trigger = true; | |
| 6538 | else if (!failed_trigger) | |
| 6539 | failed_trigger = c; | |
| 6540 | } else if (c->result != CONDITION_SUCCEEDED) | |
| 6541 | return c; | |
| 6542 | ||
| 6543 | return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL; | |
| 6544 | } | |
| 6545 | ||
| 6546 | int unit_can_live_mount(Unit *u, sd_bus_error *error) { | |
| 6547 | assert(u); | |
| 6548 | ||
| 6549 | if (!UNIT_VTABLE(u)->live_mount) | |
| 6550 | return sd_bus_error_setf( | |
| 6551 | error, | |
| 6552 | SD_BUS_ERROR_NOT_SUPPORTED, | |
| 6553 | "Live mounting not supported by unit type '%s'", | |
| 6554 | unit_type_to_string(u->type)); | |
| 6555 | ||
| 6556 | if (u->load_state != UNIT_LOADED) | |
| 6557 | return sd_bus_error_setf( | |
| 6558 | error, | |
| 6559 | BUS_ERROR_NO_SUCH_UNIT, | |
| 6560 | "Unit '%s' not loaded, cannot live mount", | |
| 6561 | u->id); | |
| 6562 | ||
| 6563 | if (!UNIT_VTABLE(u)->can_live_mount) | |
| 6564 | return 0; | |
| 6565 | ||
| 6566 | return UNIT_VTABLE(u)->can_live_mount(u, error); | |
| 6567 | } | |
| 6568 | ||
| 6569 | int unit_live_mount( | |
| 6570 | Unit *u, | |
| 6571 | const char *src, | |
| 6572 | const char *dst, | |
| 6573 | sd_bus_message *message, | |
| 6574 | MountInNamespaceFlags flags, | |
| 6575 | const MountOptions *options, | |
| 6576 | sd_bus_error *error) { | |
| 6577 | ||
| 6578 | assert(u); | |
| 6579 | assert(UNIT_VTABLE(u)->live_mount); | |
| 6580 | ||
| 6581 | if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) { | |
| 6582 | log_unit_debug(u, "Unit not active, cannot perform live mount."); | |
| 6583 | return sd_bus_error_setf( | |
| 6584 | error, | |
| 6585 | BUS_ERROR_UNIT_INACTIVE, | |
| 6586 | "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: unit not active", | |
| 6587 | src, | |
| 6588 | dst, | |
| 6589 | u->id); | |
| 6590 | } | |
| 6591 | ||
| 6592 | if (unit_active_state(u) == UNIT_REFRESHING) { | |
| 6593 | log_unit_debug(u, "Unit already live mounting, refusing further requests."); | |
| 6594 | return sd_bus_error_setf( | |
| 6595 | error, | |
| 6596 | BUS_ERROR_UNIT_BUSY, | |
| 6597 | "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another live mount in progress", | |
| 6598 | src, | |
| 6599 | dst, | |
| 6600 | u->id); | |
| 6601 | } | |
| 6602 | ||
| 6603 | if (u->job) { | |
| 6604 | log_unit_debug(u, "Unit already has a job in progress, cannot live mount"); | |
| 6605 | return sd_bus_error_setf( | |
| 6606 | error, | |
| 6607 | BUS_ERROR_UNIT_BUSY, | |
| 6608 | "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another operation in progress", | |
| 6609 | src, | |
| 6610 | dst, | |
| 6611 | u->id); | |
| 6612 | } | |
| 6613 | ||
| 6614 | return UNIT_VTABLE(u)->live_mount(u, src, dst, message, flags, options, error); | |
| 6615 | } | |
| 6616 | ||
| 6617 | static const char* const collect_mode_table[_COLLECT_MODE_MAX] = { | |
| 6618 | [COLLECT_INACTIVE] = "inactive", | |
| 6619 | [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed", | |
| 6620 | }; | |
| 6621 | ||
| 6622 | DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode); | |
| 6623 | ||
| 6624 | Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) { | |
| 6625 | Unit *i; | |
| 6626 | ||
| 6627 | assert(u); | |
| 6628 | ||
| 6629 | /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is | |
| 6630 | * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other' | |
| 6631 | * is NULL the first entry found), or NULL if not found. */ | |
| 6632 | ||
| 6633 | UNIT_FOREACH_DEPENDENCY(i, u, atom) | |
| 6634 | if (!other || other == i) | |
| 6635 | return i; | |
| 6636 | ||
| 6637 | return NULL; | |
| 6638 | } | |
| 6639 | ||
| 6640 | int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) { | |
| 6641 | _cleanup_free_ Unit **array = NULL; | |
| 6642 | size_t n = 0; | |
| 6643 | Unit *other; | |
| 6644 | ||
| 6645 | assert(u); | |
| 6646 | assert(ret_array); | |
| 6647 | ||
| 6648 | /* Gets a list of units matching a specific atom as array. This is useful when iterating through | |
| 6649 | * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read | |
| 6650 | * while the dependency table is continuously updated. */ | |
| 6651 | ||
| 6652 | UNIT_FOREACH_DEPENDENCY(other, u, atom) { | |
| 6653 | if (!GREEDY_REALLOC(array, n + 1)) | |
| 6654 | return -ENOMEM; | |
| 6655 | ||
| 6656 | array[n++] = other; | |
| 6657 | } | |
| 6658 | ||
| 6659 | *ret_array = TAKE_PTR(array); | |
| 6660 | ||
| 6661 | assert(n <= INT_MAX); | |
| 6662 | return (int) n; | |
| 6663 | } | |
| 6664 | ||
| 6665 | int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) { | |
| 6666 | _cleanup_set_free_ Set *units = NULL, *queue = NULL; | |
| 6667 | Unit *other; | |
| 6668 | int r; | |
| 6669 | ||
| 6670 | assert(u); | |
| 6671 | assert(ret); | |
| 6672 | ||
| 6673 | /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */ | |
| 6674 | ||
| 6675 | do { | |
| 6676 | UNIT_FOREACH_DEPENDENCY(other, u, atom) { | |
| 6677 | r = set_ensure_put(&units, NULL, other); | |
| 6678 | if (r < 0) | |
| 6679 | return r; | |
| 6680 | if (r == 0) | |
| 6681 | continue; | |
| 6682 | r = set_ensure_put(&queue, NULL, other); | |
| 6683 | if (r < 0) | |
| 6684 | return r; | |
| 6685 | } | |
| 6686 | } while ((u = set_steal_first(queue))); | |
| 6687 | ||
| 6688 | *ret = TAKE_PTR(units); | |
| 6689 | return 0; | |
| 6690 | } | |
| 6691 | ||
| 6692 | int unit_arm_timer( | |
| 6693 | Unit *u, | |
| 6694 | sd_event_source **source, | |
| 6695 | bool relative, | |
| 6696 | usec_t usec, | |
| 6697 | sd_event_time_handler_t handler) { | |
| 6698 | ||
| 6699 | int r; | |
| 6700 | ||
| 6701 | assert(u); | |
| 6702 | assert(source); | |
| 6703 | assert(handler); | |
| 6704 | ||
| 6705 | if (*source) { | |
| 6706 | if (usec == USEC_INFINITY) | |
| 6707 | return sd_event_source_set_enabled(*source, SD_EVENT_OFF); | |
| 6708 | ||
| 6709 | r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec); | |
| 6710 | if (r < 0) | |
| 6711 | return r; | |
| 6712 | ||
| 6713 | return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT); | |
| 6714 | } | |
| 6715 | ||
| 6716 | if (usec == USEC_INFINITY) | |
| 6717 | return 0; | |
| 6718 | ||
| 6719 | r = (relative ? sd_event_add_time_relative : sd_event_add_time)( | |
| 6720 | u->manager->event, | |
| 6721 | source, | |
| 6722 | CLOCK_MONOTONIC, | |
| 6723 | usec, 0, | |
| 6724 | handler, | |
| 6725 | u); | |
| 6726 | if (r < 0) | |
| 6727 | return r; | |
| 6728 | ||
| 6729 | const char *d = strjoina(unit_type_to_string(u->type), "-timer"); | |
| 6730 | (void) sd_event_source_set_description(*source, d); | |
| 6731 | ||
| 6732 | return 0; | |
| 6733 | } | |
| 6734 | ||
| 6735 | bool unit_passes_filter(Unit *u, char * const *states, char * const *patterns) { | |
| 6736 | assert(u); | |
| 6737 | ||
| 6738 | if (!strv_isempty(states)) { | |
| 6739 | char * const *unit_states = STRV_MAKE( | |
| 6740 | unit_load_state_to_string(u->load_state), | |
| 6741 | unit_active_state_to_string(unit_active_state(u)), | |
| 6742 | unit_sub_state_to_string(u)); | |
| 6743 | ||
| 6744 | if (!strv_overlap(states, unit_states)) | |
| 6745 | return false; | |
| 6746 | } | |
| 6747 | ||
| 6748 | return strv_fnmatch_or_empty(patterns, u->id, FNM_NOESCAPE); | |
| 6749 | } | |
| 6750 | ||
| 6751 | static int unit_get_nice(Unit *u) { | |
| 6752 | ExecContext *ec; | |
| 6753 | ||
| 6754 | ec = unit_get_exec_context(u); | |
| 6755 | return ec ? ec->nice : 0; | |
| 6756 | } | |
| 6757 | ||
| 6758 | static uint64_t unit_get_cpu_weight(Unit *u) { | |
| 6759 | CGroupContext *cc; | |
| 6760 | ||
| 6761 | cc = unit_get_cgroup_context(u); | |
| 6762 | return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT; | |
| 6763 | } | |
| 6764 | ||
| 6765 | int unit_get_exec_quota_stats(Unit *u, ExecContext *c, ExecDirectoryType dt, uint64_t *ret_usage, uint64_t *ret_limit) { | |
| 6766 | int r; | |
| 6767 | _cleanup_close_ int fd = -EBADF; | |
| 6768 | _cleanup_free_ char *p = NULL, *pp = NULL; | |
| 6769 | ||
| 6770 | assert(u); | |
| 6771 | assert(c); | |
| 6772 | ||
| 6773 | if (c->directories[dt].n_items == 0) { | |
| 6774 | *ret_usage = UINT64_MAX; | |
| 6775 | *ret_limit = UINT64_MAX; | |
| 6776 | return 0; | |
| 6777 | } | |
| 6778 | ||
| 6779 | ExecDirectoryItem *i = &c->directories[dt].items[0]; | |
| 6780 | p = path_join(u->manager->prefix[dt], i->path); | |
| 6781 | if (!p) | |
| 6782 | return log_oom_debug(); | |
| 6783 | ||
| 6784 | if (exec_directory_is_private(c, dt)) { | |
| 6785 | pp = path_join(u->manager->prefix[dt], "private", i->path); | |
| 6786 | if (!pp) | |
| 6787 | return log_oom_debug(); | |
| 6788 | } | |
| 6789 | ||
| 6790 | const char *target_dir = pp ?: p; | |
| 6791 | fd = open(target_dir, O_PATH | O_CLOEXEC | O_DIRECTORY); | |
| 6792 | if (fd < 0) | |
| 6793 | return log_unit_debug_errno(u, errno, "Failed to get exec quota stats: %m"); | |
| 6794 | ||
| 6795 | uint32_t proj_id; | |
| 6796 | r = read_fs_xattr_fd(fd, /* ret_xflags = */ NULL, &proj_id); | |
| 6797 | if (r < 0) | |
| 6798 | return log_unit_debug_errno(u, r, "Failed to get project ID for exec quota stats: %m"); | |
| 6799 | ||
| 6800 | struct dqblk req; | |
| 6801 | r = quota_query_proj_id(fd, proj_id, &req); | |
| 6802 | if (r <= 0) | |
| 6803 | return log_unit_debug_errno(u, r, "Failed to query project ID for exec quota stats: %m"); | |
| 6804 | ||
| 6805 | *ret_usage = req.dqb_curspace; | |
| 6806 | *ret_limit = req.dqb_bhardlimit * QIF_DQBLKSIZE; | |
| 6807 | ||
| 6808 | return r; | |
| 6809 | } | |
| 6810 | ||
| 6811 | int unit_compare_priority(Unit *a, Unit *b) { | |
| 6812 | int ret; | |
| 6813 | ||
| 6814 | ret = CMP(a->type, b->type); | |
| 6815 | if (ret != 0) | |
| 6816 | return -ret; | |
| 6817 | ||
| 6818 | ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b)); | |
| 6819 | if (ret != 0) | |
| 6820 | return -ret; | |
| 6821 | ||
| 6822 | ret = CMP(unit_get_nice(a), unit_get_nice(b)); | |
| 6823 | if (ret != 0) | |
| 6824 | return ret; | |
| 6825 | ||
| 6826 | return strcmp(a->id, b->id); | |
| 6827 | } | |
| 6828 | ||
| 6829 | const char* unit_log_field(const Unit *u) { | |
| 6830 | return MANAGER_IS_SYSTEM(ASSERT_PTR(u)->manager) ? "UNIT=" : "USER_UNIT="; | |
| 6831 | } | |
| 6832 | ||
| 6833 | const char* unit_invocation_log_field(const Unit *u) { | |
| 6834 | return MANAGER_IS_SYSTEM(ASSERT_PTR(u)->manager) ? "INVOCATION_ID=" : "USER_INVOCATION_ID="; | |
| 6835 | } | |
| 6836 | ||
| 6837 | const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = { | |
| 6838 | [UNIT_PATH] = &activation_details_path_vtable, | |
| 6839 | [UNIT_TIMER] = &activation_details_timer_vtable, | |
| 6840 | }; | |
| 6841 | ||
| 6842 | ActivationDetails *activation_details_new(Unit *trigger_unit) { | |
| 6843 | _cleanup_free_ ActivationDetails *details = NULL; | |
| 6844 | ||
| 6845 | assert(trigger_unit); | |
| 6846 | assert(trigger_unit->type != _UNIT_TYPE_INVALID); | |
| 6847 | assert(trigger_unit->id); | |
| 6848 | ||
| 6849 | details = malloc0(activation_details_vtable[trigger_unit->type]->object_size); | |
| 6850 | if (!details) | |
| 6851 | return NULL; | |
| 6852 | ||
| 6853 | *details = (ActivationDetails) { | |
| 6854 | .n_ref = 1, | |
| 6855 | .trigger_unit_type = trigger_unit->type, | |
| 6856 | }; | |
| 6857 | ||
| 6858 | details->trigger_unit_name = strdup(trigger_unit->id); | |
| 6859 | if (!details->trigger_unit_name) | |
| 6860 | return NULL; | |
| 6861 | ||
| 6862 | if (ACTIVATION_DETAILS_VTABLE(details)->init) | |
| 6863 | ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit); | |
| 6864 | ||
| 6865 | return TAKE_PTR(details); | |
| 6866 | } | |
| 6867 | ||
| 6868 | static ActivationDetails *activation_details_free(ActivationDetails *details) { | |
| 6869 | if (!details) | |
| 6870 | return NULL; | |
| 6871 | ||
| 6872 | if (ACTIVATION_DETAILS_VTABLE(details)->done) | |
| 6873 | ACTIVATION_DETAILS_VTABLE(details)->done(details); | |
| 6874 | ||
| 6875 | free(details->trigger_unit_name); | |
| 6876 | ||
| 6877 | return mfree(details); | |
| 6878 | } | |
| 6879 | ||
| 6880 | void activation_details_serialize(const ActivationDetails *details, FILE *f) { | |
| 6881 | if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID) | |
| 6882 | return; | |
| 6883 | ||
| 6884 | (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type)); | |
| 6885 | if (details->trigger_unit_name) | |
| 6886 | (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name); | |
| 6887 | if (ACTIVATION_DETAILS_VTABLE(details)->serialize) | |
| 6888 | ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f); | |
| 6889 | } | |
| 6890 | ||
| 6891 | int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) { | |
| 6892 | int r; | |
| 6893 | ||
| 6894 | assert(key); | |
| 6895 | assert(value); | |
| 6896 | assert(details); | |
| 6897 | ||
| 6898 | if (!*details) { | |
| 6899 | UnitType t; | |
| 6900 | ||
| 6901 | if (!streq(key, "activation-details-unit-type")) | |
| 6902 | return -EINVAL; | |
| 6903 | ||
| 6904 | t = unit_type_from_string(value); | |
| 6905 | if (t < 0) | |
| 6906 | return t; | |
| 6907 | ||
| 6908 | /* The activation details vtable has defined ops only for path and timer units */ | |
| 6909 | if (!activation_details_vtable[t]) | |
| 6910 | return -EINVAL; | |
| 6911 | ||
| 6912 | *details = malloc0(activation_details_vtable[t]->object_size); | |
| 6913 | if (!*details) | |
| 6914 | return -ENOMEM; | |
| 6915 | ||
| 6916 | **details = (ActivationDetails) { | |
| 6917 | .n_ref = 1, | |
| 6918 | .trigger_unit_type = t, | |
| 6919 | }; | |
| 6920 | ||
| 6921 | return 0; | |
| 6922 | } | |
| 6923 | ||
| 6924 | if (streq(key, "activation-details-unit-name")) { | |
| 6925 | r = free_and_strdup(&(*details)->trigger_unit_name, value); | |
| 6926 | if (r < 0) | |
| 6927 | return r; | |
| 6928 | ||
| 6929 | return 0; | |
| 6930 | } | |
| 6931 | ||
| 6932 | if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize) | |
| 6933 | return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details); | |
| 6934 | ||
| 6935 | return -EINVAL; | |
| 6936 | } | |
| 6937 | ||
| 6938 | int activation_details_append_env(const ActivationDetails *details, char ***strv) { | |
| 6939 | int r = 0; | |
| 6940 | ||
| 6941 | assert(strv); | |
| 6942 | ||
| 6943 | if (!details) | |
| 6944 | return 0; | |
| 6945 | ||
| 6946 | if (!isempty(details->trigger_unit_name)) { | |
| 6947 | char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name); | |
| 6948 | if (!s) | |
| 6949 | return -ENOMEM; | |
| 6950 | ||
| 6951 | r = strv_consume(strv, TAKE_PTR(s)); | |
| 6952 | if (r < 0) | |
| 6953 | return r; | |
| 6954 | } | |
| 6955 | ||
| 6956 | if (ACTIVATION_DETAILS_VTABLE(details)->append_env) { | |
| 6957 | r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv); | |
| 6958 | if (r < 0) | |
| 6959 | return r; | |
| 6960 | } | |
| 6961 | ||
| 6962 | return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */ | |
| 6963 | } | |
| 6964 | ||
| 6965 | int activation_details_append_pair(const ActivationDetails *details, char ***strv) { | |
| 6966 | int r = 0; | |
| 6967 | ||
| 6968 | assert(strv); | |
| 6969 | ||
| 6970 | if (!details) | |
| 6971 | return 0; | |
| 6972 | ||
| 6973 | if (!isempty(details->trigger_unit_name)) { | |
| 6974 | r = strv_extend_many(strv, "trigger_unit", details->trigger_unit_name); | |
| 6975 | if (r < 0) | |
| 6976 | return r; | |
| 6977 | } | |
| 6978 | ||
| 6979 | if (ACTIVATION_DETAILS_VTABLE(details)->append_pair) { | |
| 6980 | r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv); | |
| 6981 | if (r < 0) | |
| 6982 | return r; | |
| 6983 | } | |
| 6984 | ||
| 6985 | return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */ | |
| 6986 | } | |
| 6987 | ||
| 6988 | DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free); | |
| 6989 | ||
| 6990 | static const char* const unit_mount_dependency_type_table[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX] = { | |
| 6991 | [UNIT_MOUNT_WANTS] = "WantsMountsFor", | |
| 6992 | [UNIT_MOUNT_REQUIRES] = "RequiresMountsFor", | |
| 6993 | }; | |
| 6994 | ||
| 6995 | DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type, UnitMountDependencyType); | |
| 6996 | ||
| 6997 | static const char* const oom_policy_table[_OOM_POLICY_MAX] = { | |
| 6998 | [OOM_CONTINUE] = "continue", | |
| 6999 | [OOM_STOP] = "stop", | |
| 7000 | [OOM_KILL] = "kill", | |
| 7001 | }; | |
| 7002 | ||
| 7003 | DEFINE_STRING_TABLE_LOOKUP(oom_policy, OOMPolicy); | |
| 7004 | ||
| 7005 | UnitDependency unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t) { | |
| 7006 | switch (t) { | |
| 7007 | ||
| 7008 | case UNIT_MOUNT_WANTS: | |
| 7009 | return UNIT_WANTS; | |
| 7010 | ||
| 7011 | case UNIT_MOUNT_REQUIRES: | |
| 7012 | return UNIT_REQUIRES; | |
| 7013 | ||
| 7014 | default: | |
| 7015 | assert_not_reached(); | |
| 7016 | } | |
| 7017 | } |