1 /*-*- Mode: C; c-basic-offset: 8 -*-*/
7 #include <sys/timerfd.h>
16 #include "load-fragment.h"
17 #include "load-dropin.h"
20 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
21 [UNIT_SERVICE
] = &service_vtable
,
22 [UNIT_TIMER
] = &timer_vtable
,
23 [UNIT_SOCKET
] = &socket_vtable
,
24 [UNIT_TARGET
] = &target_vtable
,
25 [UNIT_DEVICE
] = &device_vtable
,
26 [UNIT_MOUNT
] = &mount_vtable
,
27 [UNIT_AUTOMOUNT
] = &automount_vtable
,
28 [UNIT_SNAPSHOT
] = &snapshot_vtable
31 UnitType
unit_name_to_type(const char *n
) {
36 for (t
= 0; t
< _UNIT_TYPE_MAX
; t
++)
37 if (endswith(n
, unit_vtable
[t
]->suffix
))
40 return _UNIT_TYPE_INVALID
;
45 "abcdefghijklmnopqrstuvwxyz" \
46 "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
49 bool unit_name_is_valid(const char *n
) {
55 if (strlen(n
) >= UNIT_NAME_MAX
)
58 t
= unit_name_to_type(n
);
59 if (t
< 0 || t
>= _UNIT_TYPE_MAX
)
62 if (!(e
= strrchr(n
, '.')))
68 for (i
= n
; i
< e
; i
++)
69 if (!strchr(VALID_CHARS
, *i
))
75 char *unit_name_change_suffix(const char *n
, const char *suffix
) {
80 assert(unit_name_is_valid(n
));
83 assert_se(e
= strrchr(n
, '.'));
87 if (!(r
= new(char, a
+ b
+ 1)))
91 memcpy(r
+a
, suffix
, b
+1);
96 Unit
*unit_new(Manager
*m
) {
101 if (!(u
= new0(Unit
, 1)))
104 if (!(u
->meta
.names
= set_new(string_hash_func
, string_compare_func
))) {
110 u
->meta
.type
= _UNIT_TYPE_INVALID
;
115 int unit_add_name(Unit
*u
, const char *text
) {
123 if (!unit_name_is_valid(text
))
126 if ((t
= unit_name_to_type(text
)) == _UNIT_TYPE_INVALID
)
129 if (u
->meta
.type
!= _UNIT_TYPE_INVALID
&& t
!= u
->meta
.type
)
132 if (!(s
= strdup(text
)))
135 if ((r
= set_put(u
->meta
.names
, s
)) < 0) {
144 if ((r
= hashmap_put(u
->meta
.manager
->units
, s
, u
)) < 0) {
145 set_remove(u
->meta
.names
, s
);
150 if (u
->meta
.type
== _UNIT_TYPE_INVALID
)
151 LIST_PREPEND(Meta
, units_per_type
, u
->meta
.manager
->units_per_type
[t
], &u
->meta
);
161 int unit_choose_id(Unit
*u
, const char *name
) {
167 /* Selects one of the names of this unit as the id */
169 if (!(s
= set_get(u
->meta
.names
, (char*) name
)))
176 int unit_set_description(Unit
*u
, const char *description
) {
181 if (!(s
= strdup(description
)))
184 free(u
->meta
.description
);
185 u
->meta
.description
= s
;
189 void unit_add_to_load_queue(Unit
*u
) {
192 if (u
->meta
.load_state
!= UNIT_STUB
|| u
->meta
.in_load_queue
)
195 LIST_PREPEND(Meta
, load_queue
, u
->meta
.manager
->load_queue
, &u
->meta
);
196 u
->meta
.in_load_queue
= true;
199 static void bidi_set_free(Unit
*u
, Set
*s
) {
205 /* Frees the set and makes sure we are dropped from the
206 * inverse pointers */
208 SET_FOREACH(other
, s
, i
) {
211 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
212 set_remove(other
->meta
.dependencies
[d
], u
);
218 void unit_free(Unit
*u
) {
225 /* Detach from next 'bigger' objects */
227 SET_FOREACH(t
, u
->meta
.names
, i
)
228 hashmap_remove_value(u
->meta
.manager
->units
, t
, u
);
230 if (u
->meta
.type
!= _UNIT_TYPE_INVALID
)
231 LIST_REMOVE(Meta
, units_per_type
, u
->meta
.manager
->units_per_type
[u
->meta
.type
], &u
->meta
);
233 if (u
->meta
.in_load_queue
)
234 LIST_REMOVE(Meta
, load_queue
, u
->meta
.manager
->load_queue
, &u
->meta
);
236 if (u
->meta
.load_state
== UNIT_LOADED
)
237 if (UNIT_VTABLE(u
)->done
)
238 UNIT_VTABLE(u
)->done(u
);
240 /* Free data and next 'smaller' objects */
242 job_free(u
->meta
.job
);
244 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
245 bidi_set_free(u
, u
->meta
.dependencies
[d
]);
247 free(u
->meta
.description
);
248 free(u
->meta
.load_path
);
250 while ((t
= set_steal_first(u
->meta
.names
)))
252 set_free(u
->meta
.names
);
257 UnitActiveState
unit_active_state(Unit
*u
) {
260 if (u
->meta
.load_state
!= UNIT_LOADED
)
261 return UNIT_INACTIVE
;
263 return UNIT_VTABLE(u
)->active_state(u
);
266 static int ensure_merge(Set
**s
, Set
*other
) {
272 return set_merge(*s
, other
);
274 if (!(*s
= set_copy(other
)))
280 /* FIXME: Does not rollback on failure! Needs to fix special unit
281 * pointers. Needs to merge names and dependencies properly.*/
282 int unit_merge(Unit
*u
, Unit
*other
) {
288 assert(u
->meta
.manager
== other
->meta
.manager
);
290 /* This merges 'other' into 'unit'. FIXME: This does not
291 * rollback on failure. */
293 if (u
->meta
.type
!= u
->meta
.type
)
296 if (u
->meta
.load_state
!= UNIT_STUB
)
300 if ((r
= ensure_merge(&u
->meta
.names
, other
->meta
.names
)) < 0)
303 /* Merge dependencies */
304 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
305 /* fixme, the inverse mapping is missing */
306 if ((r
= ensure_merge(&u
->meta
.dependencies
[d
], other
->meta
.dependencies
[d
])) < 0)
312 const char* unit_id(Unit
*u
) {
318 return set_first(u
->meta
.names
);
321 const char *unit_description(Unit
*u
) {
324 if (u
->meta
.description
)
325 return u
->meta
.description
;
330 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
332 static const char* const load_state_table
[_UNIT_LOAD_STATE_MAX
] = {
333 [UNIT_STUB
] = "stub",
334 [UNIT_LOADED
] = "loaded",
335 [UNIT_FAILED
] = "failed"
338 static const char* const active_state_table
[_UNIT_ACTIVE_STATE_MAX
] = {
339 [UNIT_ACTIVE
] = "active",
340 [UNIT_INACTIVE
] = "inactive",
341 [UNIT_ACTIVATING
] = "activating",
342 [UNIT_DEACTIVATING
] = "deactivating"
345 static const char* const dependency_table
[_UNIT_DEPENDENCY_MAX
] = {
346 [UNIT_REQUIRES
] = "Requires",
347 [UNIT_SOFT_REQUIRES
] = "SoftRequires",
348 [UNIT_WANTS
] = "Wants",
349 [UNIT_REQUISITE
] = "Requisite",
350 [UNIT_SOFT_REQUISITE
] = "SoftRequisite",
351 [UNIT_REQUIRED_BY
] = "RequiredBy",
352 [UNIT_SOFT_REQUIRED_BY
] = "SoftRequiredBy",
353 [UNIT_WANTED_BY
] = "WantedBy",
354 [UNIT_CONFLICTS
] = "Conflicts",
355 [UNIT_BEFORE
] = "Before",
356 [UNIT_AFTER
] = "After",
368 prefix2
= strappend(prefix
, "\t");
374 "%s\tDescription: %s\n"
375 "%s\tUnit Load State: %s\n"
376 "%s\tUnit Active State: %s\n"
377 "%s\tRecursive Deactivate: %s\n"
378 "%s\tStop When Unneeded: %s\n",
380 prefix
, unit_description(u
),
381 prefix
, load_state_table
[u
->meta
.load_state
],
382 prefix
, active_state_table
[unit_active_state(u
)],
383 prefix
, yes_no(u
->meta
.recursive_stop
),
384 prefix
, yes_no(u
->meta
.stop_when_unneeded
));
386 if (u
->meta
.load_path
)
387 fprintf(f
, "%s\tLoad Path: %s\n", prefix
, u
->meta
.load_path
);
389 SET_FOREACH(t
, u
->meta
.names
, i
)
390 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
392 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
395 if (set_isempty(u
->meta
.dependencies
[d
]))
398 SET_FOREACH(other
, u
->meta
.dependencies
[d
], i
)
399 fprintf(f
, "%s\t%s: %s\n", prefix
, dependency_table
[d
], unit_id(other
));
402 if (UNIT_VTABLE(u
)->dump
)
403 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
406 job_dump(u
->meta
.job
, f
, prefix2
);
411 /* Common implementation for multiple backends */
412 int unit_load_fragment_and_dropin(Unit
*u
) {
417 /* Load a .socket file */
418 if ((r
= unit_load_fragment(u
)) < 0)
423 /* Load drop-in directory data */
424 if ((r
= unit_load_dropin(u
)) < 0)
430 int unit_load(Unit
*u
) {
435 if (u
->meta
.in_load_queue
) {
436 LIST_REMOVE(Meta
, load_queue
, u
->meta
.manager
->load_queue
, &u
->meta
);
437 u
->meta
.in_load_queue
= false;
440 if (u
->meta
.load_state
!= UNIT_STUB
)
443 if (UNIT_VTABLE(u
)->init
)
444 if ((r
= UNIT_VTABLE(u
)->init(u
)) < 0)
447 u
->meta
.load_state
= UNIT_LOADED
;
451 u
->meta
.load_state
= UNIT_FAILED
;
456 * -EBADR: This unit type does not support starting.
457 * -EALREADY: Unit is already started.
458 * -EAGAIN: An operation is already in progress. Retry later.
460 int unit_start(Unit
*u
) {
461 UnitActiveState state
;
465 if (!UNIT_VTABLE(u
)->start
)
468 state
= unit_active_state(u
);
469 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
472 /* We don't suppress calls to ->start() here when we are
473 * already starting, to allow this request to be used as a
474 * "hurry up" call, for example when the unit is in some "auto
475 * restart" state where it waits for a holdoff timer to elapse
476 * before it will start again. */
478 return UNIT_VTABLE(u
)->start(u
);
481 bool unit_can_start(Unit
*u
) {
484 return !!UNIT_VTABLE(u
)->start
;
488 * -EBADR: This unit type does not support stopping.
489 * -EALREADY: Unit is already stopped.
490 * -EAGAIN: An operation is already in progress. Retry later.
492 int unit_stop(Unit
*u
) {
493 UnitActiveState state
;
497 if (!UNIT_VTABLE(u
)->stop
)
500 state
= unit_active_state(u
);
501 if (state
== UNIT_INACTIVE
)
504 if (state
== UNIT_DEACTIVATING
)
507 return UNIT_VTABLE(u
)->stop(u
);
511 * -EBADR: This unit type does not support reloading.
512 * -ENOEXEC: Unit is not started.
513 * -EAGAIN: An operation is already in progress. Retry later.
515 int unit_reload(Unit
*u
) {
516 UnitActiveState state
;
520 if (!unit_can_reload(u
))
523 state
= unit_active_state(u
);
524 if (unit_active_state(u
) == UNIT_ACTIVE_RELOADING
)
527 if (unit_active_state(u
) != UNIT_ACTIVE
)
530 return UNIT_VTABLE(u
)->reload(u
);
533 bool unit_can_reload(Unit
*u
) {
536 if (!UNIT_VTABLE(u
)->reload
)
539 if (!UNIT_VTABLE(u
)->can_reload
)
542 return UNIT_VTABLE(u
)->can_reload(u
);
545 static void unit_check_uneeded(Unit
*u
) {
551 /* If this service shall be shut down when unneeded then do
554 if (!u
->meta
.stop_when_unneeded
)
557 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
560 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUIRED_BY
], i
)
561 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
564 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_SOFT_REQUIRED_BY
], i
)
565 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
568 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_WANTED_BY
], i
)
569 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
572 log_debug("Service %s is not needed anymore. Stopping.", unit_id(u
));
574 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
575 manager_add_job(u
->meta
.manager
, JOB_STOP
, u
, JOB_FAIL
, true, NULL
);
578 static void retroactively_start_dependencies(Unit
*u
) {
583 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
585 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUIRES
], i
)
586 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
587 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_REPLACE
, true, NULL
);
589 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_SOFT_REQUIRES
], i
)
590 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
591 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_FAIL
, false, NULL
);
593 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUISITE
], i
)
594 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
595 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_REPLACE
, true, NULL
);
597 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_WANTS
], i
)
598 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
599 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_FAIL
, false, NULL
);
601 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_CONFLICTS
], i
)
602 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
603 manager_add_job(u
->meta
.manager
, JOB_STOP
, other
, JOB_REPLACE
, true, NULL
);
606 static void retroactively_stop_dependencies(Unit
*u
) {
611 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
613 if (u
->meta
.recursive_stop
) {
614 /* Pull down units need us recursively if enabled */
615 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUIRED_BY
], i
)
616 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
617 manager_add_job(u
->meta
.manager
, JOB_STOP
, other
, JOB_REPLACE
, true, NULL
);
620 /* Garbage collect services that might not be needed anymore, if enabled */
621 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUIRES
], i
)
622 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
623 unit_check_uneeded(other
);
624 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_SOFT_REQUIRES
], i
)
625 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
626 unit_check_uneeded(other
);
627 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_WANTS
], i
)
628 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
629 unit_check_uneeded(other
);
630 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUISITE
], i
)
631 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
632 unit_check_uneeded(other
);
633 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_SOFT_REQUISITE
], i
)
634 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
635 unit_check_uneeded(other
);
638 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
) {
640 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
641 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
642 assert(!(os
== UNIT_ACTIVE
&& ns
== UNIT_ACTIVATING
));
643 assert(!(os
== UNIT_INACTIVE
&& ns
== UNIT_DEACTIVATING
));
648 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
649 u
->meta
.active_enter_timestamp
= now(CLOCK_REALTIME
);
650 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
651 u
->meta
.active_exit_timestamp
= now(CLOCK_REALTIME
);
655 if (u
->meta
.job
->state
== JOB_WAITING
)
657 /* So we reached a different state for this
658 * job. Let's see if we can run it now if it
659 * failed previously due to EAGAIN. */
660 job_schedule_run(u
->meta
.job
);
663 assert(u
->meta
.job
->state
== JOB_RUNNING
);
665 /* Let's check whether this state change
666 * constitutes a finished job, or maybe
667 * cotradicts a running job and hence needs to
668 * invalidate jobs. */
670 switch (u
->meta
.job
->type
) {
673 case JOB_VERIFY_ACTIVE
:
675 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
676 job_finish_and_invalidate(u
->meta
.job
, true);
678 } else if (ns
== UNIT_ACTIVATING
)
681 job_finish_and_invalidate(u
->meta
.job
, false);
686 case JOB_RELOAD_OR_START
:
688 if (ns
== UNIT_ACTIVE
) {
689 job_finish_and_invalidate(u
->meta
.job
, true);
691 } else if (ns
== UNIT_ACTIVATING
|| ns
== UNIT_ACTIVE_RELOADING
)
694 job_finish_and_invalidate(u
->meta
.job
, false);
700 case JOB_TRY_RESTART
:
702 if (ns
== UNIT_INACTIVE
) {
703 job_finish_and_invalidate(u
->meta
.job
, true);
705 } else if (ns
== UNIT_DEACTIVATING
)
708 job_finish_and_invalidate(u
->meta
.job
, false);
713 assert_not_reached("Job type unknown");
718 /* If this state change happened without being requested by a
719 * job, then let's retroactively start or stop dependencies */
721 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
722 retroactively_start_dependencies(u
);
723 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
724 retroactively_stop_dependencies(u
);
726 /* Maybe we finished startup and are now ready for being
727 * stopped because unneeded? */
728 unit_check_uneeded(u
);
731 int unit_watch_fd(Unit
*u
, int fd
, uint32_t events
, Watch
*w
) {
732 struct epoll_event ev
;
737 assert(w
->type
== WATCH_INVALID
|| (w
->type
== WATCH_FD
&& w
->fd
== fd
&& w
->unit
== u
));
743 if (epoll_ctl(u
->meta
.manager
->epoll_fd
,
744 w
->type
== WATCH_INVALID
? EPOLL_CTL_ADD
: EPOLL_CTL_MOD
,
756 void unit_unwatch_fd(Unit
*u
, Watch
*w
) {
760 if (w
->type
== WATCH_INVALID
)
763 assert(w
->type
== WATCH_FD
&& w
->unit
== u
);
764 assert_se(epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_DEL
, w
->fd
, NULL
) >= 0);
767 w
->type
= WATCH_INVALID
;
771 int unit_watch_pid(Unit
*u
, pid_t pid
) {
775 return hashmap_put(u
->meta
.manager
->watch_pids
, UINT32_TO_PTR(pid
), u
);
778 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
782 hashmap_remove(u
->meta
.manager
->watch_pids
, UINT32_TO_PTR(pid
));
785 int unit_watch_timer(Unit
*u
, usec_t delay
, Watch
*w
) {
786 struct itimerspec its
;
792 assert(w
->type
== WATCH_INVALID
|| (w
->type
== WATCH_TIMER
&& w
->unit
== u
));
794 /* This will try to reuse the old timer if there is one */
796 if (w
->type
== WATCH_TIMER
) {
801 if ((fd
= timerfd_create(CLOCK_MONOTONIC
, TFD_NONBLOCK
|TFD_CLOEXEC
)) < 0)
808 /* Set absolute time in the past, but not 0, since we
809 * don't want to disarm the timer */
810 its
.it_value
.tv_sec
= 0;
811 its
.it_value
.tv_nsec
= 1;
813 flags
= TFD_TIMER_ABSTIME
;
815 timespec_store(&its
.it_value
, delay
);
819 /* This will also flush the elapse counter */
820 if (timerfd_settime(fd
, flags
, &its
, NULL
) < 0)
823 if (w
->type
== WATCH_INVALID
) {
824 struct epoll_event ev
;
830 if (epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_ADD
, fd
, &ev
) < 0)
835 w
->type
= WATCH_TIMER
;
842 assert_se(close_nointr(fd
) == 0);
847 void unit_unwatch_timer(Unit
*u
, Watch
*w
) {
851 if (w
->type
== WATCH_INVALID
)
854 assert(w
->type
== WATCH_TIMER
&& w
->unit
== u
);
856 assert_se(epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_DEL
, w
->fd
, NULL
) >= 0);
857 assert_se(close_nointr(w
->fd
) == 0);
860 w
->type
= WATCH_INVALID
;
864 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
866 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
870 case JOB_VERIFY_ACTIVE
:
876 case JOB_TRY_RESTART
:
877 return unit_can_start(u
);
880 return unit_can_reload(u
);
882 case JOB_RELOAD_OR_START
:
883 return unit_can_reload(u
) && unit_can_start(u
);
886 assert_not_reached("Invalid job type");
890 int unit_add_dependency(Unit
*u
, UnitDependency d
, Unit
*other
) {
892 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
893 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
894 [UNIT_SOFT_REQUIRES
] = UNIT_SOFT_REQUIRED_BY
,
895 [UNIT_WANTS
] = UNIT_WANTED_BY
,
896 [UNIT_REQUISITE
] = UNIT_REQUIRED_BY
,
897 [UNIT_SOFT_REQUISITE
] = UNIT_SOFT_REQUIRED_BY
,
898 [UNIT_REQUIRED_BY
] = _UNIT_DEPENDENCY_INVALID
,
899 [UNIT_SOFT_REQUIRED_BY
] = _UNIT_DEPENDENCY_INVALID
,
900 [UNIT_WANTED_BY
] = _UNIT_DEPENDENCY_INVALID
,
901 [UNIT_CONFLICTS
] = UNIT_CONFLICTS
,
902 [UNIT_BEFORE
] = UNIT_AFTER
,
903 [UNIT_AFTER
] = UNIT_BEFORE
908 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
909 assert(inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
);
912 /* We won't allow dependencies on ourselves. We will not
913 * consider them an error however. */
917 if ((r
= set_ensure_allocated(&u
->meta
.dependencies
[d
], trivial_hash_func
, trivial_compare_func
)) < 0)
920 if ((r
= set_ensure_allocated(&other
->meta
.dependencies
[inverse_table
[d
]], trivial_hash_func
, trivial_compare_func
)) < 0)
923 if ((r
= set_put(u
->meta
.dependencies
[d
], other
)) < 0)
926 if ((r
= set_put(other
->meta
.dependencies
[inverse_table
[d
]], u
)) < 0) {
927 set_remove(u
->meta
.dependencies
[d
], other
);
934 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
) {
938 if ((r
= manager_load_unit(u
->meta
.manager
, name
, &other
)) < 0)
941 if ((r
= unit_add_dependency(u
, d
, other
)) < 0)
947 const char *unit_path(void) {
950 if ((e
= getenv("UNIT_PATH")))
951 if (path_is_absolute(e
))
957 int set_unit_path(const char *p
) {
961 /* This is mostly for debug purposes */
963 if (path_is_absolute(p
)) {
964 if (!(c
= strdup(p
)))
967 if (!(cwd
= get_current_dir_name()))
970 r
= asprintf(&c
, "%s/%s", cwd
, p
);
977 if (setenv("UNIT_PATH", c
, 0) < 0) {
986 char *unit_name_escape_path(const char *prefix
, const char *path
, const char *suffix
) {
993 /* Takes a path and a suffix and prefix and makes a nice
994 * string suitable as unit name of it, escaping all weird
997 * / becomes ., and all chars not alloweed in a unit name get
998 * escaped as \xFF, including \ and ., of course. This
999 * escaping is hence reversible.
1012 if (!(r
= new(char, a
+b
*4+c
+1)))
1015 memcpy(r
, prefix
, a
);
1017 for (f
= path
, t
= r
+a
; *f
; f
++) {
1020 else if (*f
== '.' || *f
== '\\' || !strchr(VALID_CHARS
, *f
)) {
1023 *(t
++) = hexchar(*f
> 4);
1024 *(t
++) = hexchar(*f
);
1029 memcpy(t
, suffix
, c
+1);