1 /*-*- Mode: C; c-basic-offset: 8 -*-*/
7 #include <sys/timerfd.h>
16 #include "load-fragment.h"
17 #include "load-dropin.h"
20 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
21 [UNIT_SERVICE
] = &service_vtable
,
22 [UNIT_TIMER
] = &timer_vtable
,
23 [UNIT_SOCKET
] = &socket_vtable
,
24 [UNIT_TARGET
] = &target_vtable
,
25 [UNIT_DEVICE
] = &device_vtable
,
26 [UNIT_MOUNT
] = &mount_vtable
,
27 [UNIT_AUTOMOUNT
] = &automount_vtable
,
28 [UNIT_SNAPSHOT
] = &snapshot_vtable
31 UnitType
unit_name_to_type(const char *n
) {
36 for (t
= 0; t
< _UNIT_TYPE_MAX
; t
++)
37 if (endswith(n
, unit_vtable
[t
]->suffix
))
40 return _UNIT_TYPE_INVALID
;
45 "abcdefghijklmnopqrstuvwxyz" \
46 "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
49 bool unit_name_is_valid(const char *n
) {
55 if (strlen(n
) >= UNIT_NAME_MAX
)
58 t
= unit_name_to_type(n
);
59 if (t
< 0 || t
>= _UNIT_TYPE_MAX
)
62 if (!(e
= strrchr(n
, '.')))
65 for (i
= n
; i
< e
; i
++)
66 if (!strchr(VALID_CHARS
, *i
))
72 char *unit_name_change_suffix(const char *n
, const char *suffix
) {
77 assert(unit_name_is_valid(n
));
80 assert_se(e
= strrchr(n
, '.'));
84 if (!(r
= new(char, a
+ b
+ 1)))
88 memcpy(r
+a
, suffix
, b
+1);
93 Unit
*unit_new(Manager
*m
) {
98 if (!(u
= new0(Unit
, 1)))
101 if (!(u
->meta
.names
= set_new(string_hash_func
, string_compare_func
))) {
107 u
->meta
.type
= _UNIT_TYPE_INVALID
;
112 int unit_add_name(Unit
*u
, const char *text
) {
120 if (!unit_name_is_valid(text
))
123 if ((t
= unit_name_to_type(text
)) == _UNIT_TYPE_INVALID
)
126 if (u
->meta
.type
!= _UNIT_TYPE_INVALID
&& t
!= u
->meta
.type
)
129 if (!(s
= strdup(text
)))
132 if ((r
= set_put(u
->meta
.names
, s
)) < 0) {
141 if ((r
= hashmap_put(u
->meta
.manager
->units
, s
, u
)) < 0) {
142 set_remove(u
->meta
.names
, s
);
155 void unit_add_to_load_queue(Unit
*u
) {
158 if (u
->meta
.load_state
!= UNIT_STUB
|| u
->meta
.in_load_queue
)
161 LIST_PREPEND(Meta
, load_queue
, u
->meta
.manager
->load_queue
, &u
->meta
);
162 u
->meta
.in_load_queue
= true;
165 static void bidi_set_free(Unit
*u
, Set
*s
) {
171 /* Frees the set and makes sure we are dropped from the
172 * inverse pointers */
174 SET_FOREACH(other
, s
, i
) {
177 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
178 set_remove(other
->meta
.dependencies
[d
], u
);
184 void unit_free(Unit
*u
) {
191 /* Detach from next 'bigger' objects */
193 SET_FOREACH(t
, u
->meta
.names
, i
)
194 hashmap_remove_value(u
->meta
.manager
->units
, t
, u
);
196 if (u
->meta
.in_load_queue
)
197 LIST_REMOVE(Meta
, load_queue
, u
->meta
.manager
->load_queue
, &u
->meta
);
199 if (u
->meta
.load_state
== UNIT_LOADED
)
200 if (UNIT_VTABLE(u
)->done
)
201 UNIT_VTABLE(u
)->done(u
);
203 /* Free data and next 'smaller' objects */
205 job_free(u
->meta
.job
);
207 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
208 bidi_set_free(u
, u
->meta
.dependencies
[d
]);
210 free(u
->meta
.description
);
211 free(u
->meta
.load_path
);
213 while ((t
= set_steal_first(u
->meta
.names
)))
215 set_free(u
->meta
.names
);
220 UnitActiveState
unit_active_state(Unit
*u
) {
223 if (u
->meta
.load_state
!= UNIT_LOADED
)
224 return UNIT_INACTIVE
;
226 return UNIT_VTABLE(u
)->active_state(u
);
229 static int ensure_merge(Set
**s
, Set
*other
) {
235 return set_merge(*s
, other
);
237 if (!(*s
= set_copy(other
)))
243 /* FIXME: Does not rollback on failure! */
244 int unit_merge(Unit
*u
, Unit
*other
) {
250 assert(u
->meta
.manager
== other
->meta
.manager
);
252 /* This merges 'other' into 'unit'. FIXME: This does not
253 * rollback on failure. */
255 if (u
->meta
.type
!= u
->meta
.type
)
258 if (u
->meta
.load_state
!= UNIT_STUB
)
262 if ((r
= ensure_merge(&u
->meta
.names
, other
->meta
.names
)) < 0)
265 /* Merge dependencies */
266 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
267 /* fixme, the inverse mapping is missing */
268 if ((r
= ensure_merge(&u
->meta
.dependencies
[d
], other
->meta
.dependencies
[d
])) < 0)
274 const char* unit_id(Unit
*u
) {
280 return set_first(u
->meta
.names
);
283 const char *unit_description(Unit
*u
) {
286 if (u
->meta
.description
)
287 return u
->meta
.description
;
292 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
294 static const char* const load_state_table
[_UNIT_LOAD_STATE_MAX
] = {
295 [UNIT_STUB
] = "stub",
296 [UNIT_LOADED
] = "loaded",
297 [UNIT_FAILED
] = "failed"
300 static const char* const active_state_table
[_UNIT_ACTIVE_STATE_MAX
] = {
301 [UNIT_ACTIVE
] = "active",
302 [UNIT_INACTIVE
] = "inactive",
303 [UNIT_ACTIVATING
] = "activating",
304 [UNIT_DEACTIVATING
] = "deactivating"
307 static const char* const dependency_table
[_UNIT_DEPENDENCY_MAX
] = {
308 [UNIT_REQUIRES
] = "Requires",
309 [UNIT_SOFT_REQUIRES
] = "SoftRequires",
310 [UNIT_WANTS
] = "Wants",
311 [UNIT_REQUISITE
] = "Requisite",
312 [UNIT_SOFT_REQUISITE
] = "SoftRequisite",
313 [UNIT_REQUIRED_BY
] = "RequiredBy",
314 [UNIT_SOFT_REQUIRED_BY
] = "SoftRequiredBy",
315 [UNIT_WANTED_BY
] = "WantedBy",
316 [UNIT_CONFLICTS
] = "Conflicts",
317 [UNIT_BEFORE
] = "Before",
318 [UNIT_AFTER
] = "After",
330 prefix2
= strappend(prefix
, "\t");
336 "%s\tDescription: %s\n"
337 "%s\tUnit Load State: %s\n"
338 "%s\tUnit Active State: %s\n",
340 prefix
, unit_description(u
),
341 prefix
, load_state_table
[u
->meta
.load_state
],
342 prefix
, active_state_table
[unit_active_state(u
)]);
344 if (u
->meta
.load_path
)
345 fprintf(f
, "%s\tLoad Path: %s\n", prefix
, u
->meta
.load_path
);
347 SET_FOREACH(t
, u
->meta
.names
, i
)
348 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
350 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
353 if (set_isempty(u
->meta
.dependencies
[d
]))
356 SET_FOREACH(other
, u
->meta
.dependencies
[d
], i
)
357 fprintf(f
, "%s\t%s: %s\n", prefix
, dependency_table
[d
], unit_id(other
));
360 if (UNIT_VTABLE(u
)->dump
)
361 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
364 job_dump(u
->meta
.job
, f
, prefix2
);
369 /* Common implementation for multiple backends */
370 int unit_load_fragment_and_dropin(Unit
*u
) {
375 /* Load a .socket file */
376 if ((r
= unit_load_fragment(u
)) < 0)
379 /* Load drop-in directory data */
380 if ((r
= unit_load_dropin(u
)) < 0)
386 int unit_load(Unit
*u
) {
391 if (u
->meta
.in_load_queue
) {
392 LIST_REMOVE(Meta
, load_queue
, u
->meta
.manager
->load_queue
, &u
->meta
);
393 u
->meta
.in_load_queue
= false;
396 if (u
->meta
.load_state
!= UNIT_STUB
)
399 if (UNIT_VTABLE(u
)->init
)
400 if ((r
= UNIT_VTABLE(u
)->init(u
)) < 0)
403 u
->meta
.load_state
= UNIT_LOADED
;
407 u
->meta
.load_state
= UNIT_FAILED
;
412 * -EBADR: This unit type does not support starting.
413 * -EALREADY: Unit is already started.
414 * -EAGAIN: An operation is already in progress. Retry later.
416 int unit_start(Unit
*u
) {
417 UnitActiveState state
;
421 if (!UNIT_VTABLE(u
)->start
)
424 state
= unit_active_state(u
);
425 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
428 /* We don't suppress calls to ->start() here when we are
429 * already starting, to allow this request to be used as a
430 * "hurry up" call, for example when the unit is in some "auto
431 * restart" state where it waits for a holdoff timer to elapse
432 * before it will start again. */
434 return UNIT_VTABLE(u
)->start(u
);
437 bool unit_can_start(Unit
*u
) {
440 return !!UNIT_VTABLE(u
)->start
;
444 * -EBADR: This unit type does not support stopping.
445 * -EALREADY: Unit is already stopped.
446 * -EAGAIN: An operation is already in progress. Retry later.
448 int unit_stop(Unit
*u
) {
449 UnitActiveState state
;
453 if (!UNIT_VTABLE(u
)->stop
)
456 state
= unit_active_state(u
);
457 if (state
== UNIT_INACTIVE
)
460 if (state
== UNIT_DEACTIVATING
)
463 return UNIT_VTABLE(u
)->stop(u
);
467 * -EBADR: This unit type does not support reloading.
468 * -ENOEXEC: Unit is not started.
469 * -EAGAIN: An operation is already in progress. Retry later.
471 int unit_reload(Unit
*u
) {
472 UnitActiveState state
;
476 if (!unit_can_reload(u
))
479 state
= unit_active_state(u
);
480 if (unit_active_state(u
) == UNIT_ACTIVE_RELOADING
)
483 if (unit_active_state(u
) != UNIT_ACTIVE
)
486 return UNIT_VTABLE(u
)->reload(u
);
489 bool unit_can_reload(Unit
*u
) {
492 if (!UNIT_VTABLE(u
)->reload
)
495 if (!UNIT_VTABLE(u
)->can_reload
)
498 return UNIT_VTABLE(u
)->can_reload(u
);
501 static void retroactively_start_dependencies(Unit
*u
) {
506 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
508 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUIRES
], i
)
509 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
510 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_REPLACE
, true, NULL
);
512 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_SOFT_REQUIRES
], i
)
513 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
514 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_FAIL
, false, NULL
);
516 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUISITE
], i
)
517 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
518 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_REPLACE
, true, NULL
);
520 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_WANTS
], i
)
521 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
522 manager_add_job(u
->meta
.manager
, JOB_START
, other
, JOB_FAIL
, false, NULL
);
524 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_CONFLICTS
], i
)
525 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
526 manager_add_job(u
->meta
.manager
, JOB_STOP
, other
, JOB_REPLACE
, true, NULL
);
529 static void retroactively_stop_dependencies(Unit
*u
) {
534 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
536 SET_FOREACH(other
, u
->meta
.dependencies
[UNIT_REQUIRED_BY
], i
)
537 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
538 manager_add_job(u
->meta
.manager
, JOB_STOP
, other
, JOB_REPLACE
, true, NULL
);
541 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
) {
543 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
544 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
545 assert(!(os
== UNIT_ACTIVE
&& ns
== UNIT_ACTIVATING
));
546 assert(!(os
== UNIT_INACTIVE
&& ns
== UNIT_DEACTIVATING
));
551 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
552 u
->meta
.active_enter_timestamp
= now(CLOCK_REALTIME
);
553 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
554 u
->meta
.active_exit_timestamp
= now(CLOCK_REALTIME
);
558 if (u
->meta
.job
->state
== JOB_WAITING
)
560 /* So we reached a different state for this
561 * job. Let's see if we can run it now if it
562 * failed previously due to EAGAIN. */
563 job_schedule_run(u
->meta
.job
);
566 assert(u
->meta
.job
->state
== JOB_RUNNING
);
568 /* Let's check of this state change
569 * constitutes a finished job, or maybe
570 * cotradicts a running job and hence needs to
571 * invalidate jobs. */
573 switch (u
->meta
.job
->type
) {
576 case JOB_VERIFY_ACTIVE
:
578 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
579 job_finish_and_invalidate(u
->meta
.job
, true);
581 } else if (ns
== UNIT_ACTIVATING
)
584 job_finish_and_invalidate(u
->meta
.job
, false);
589 case JOB_RELOAD_OR_START
:
591 if (ns
== UNIT_ACTIVE
) {
592 job_finish_and_invalidate(u
->meta
.job
, true);
594 } else if (ns
== UNIT_ACTIVATING
|| ns
== UNIT_ACTIVE_RELOADING
)
597 job_finish_and_invalidate(u
->meta
.job
, false);
603 case JOB_TRY_RESTART
:
605 if (ns
== UNIT_INACTIVE
) {
606 job_finish_and_invalidate(u
->meta
.job
, true);
608 } else if (ns
== UNIT_DEACTIVATING
)
611 job_finish_and_invalidate(u
->meta
.job
, false);
616 assert_not_reached("Job type unknown");
621 /* If this state change happened without being requested by a
622 * job, then let's retroactively start or stop dependencies */
624 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
625 retroactively_start_dependencies(u
);
626 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
627 retroactively_stop_dependencies(u
);
630 int unit_watch_fd(Unit
*u
, int fd
, uint32_t events
) {
631 struct epoll_event ev
;
639 ev
.data
.u32
= MANAGER_FD
;
642 if (epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_ADD
, fd
, &ev
) >= 0)
646 if (epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_MOD
, fd
, &ev
) >= 0)
652 void unit_unwatch_fd(Unit
*u
, int fd
) {
656 assert_se(epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_DEL
, fd
, NULL
) >= 0 || errno
== ENOENT
);
659 int unit_watch_pid(Unit
*u
, pid_t pid
) {
663 return hashmap_put(u
->meta
.manager
->watch_pids
, UINT32_TO_PTR(pid
), u
);
666 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
670 hashmap_remove(u
->meta
.manager
->watch_pids
, UINT32_TO_PTR(pid
));
673 int unit_watch_timer(Unit
*u
, usec_t delay
, int *id
) {
674 struct epoll_event ev
;
676 struct itimerspec its
;
683 /* This will try to reuse the old timer if there is one */
692 if ((fd
= timerfd_create(CLOCK_MONOTONIC
, TFD_NONBLOCK
|TFD_CLOEXEC
)) < 0)
699 /* Set absolute time in the past, but not 0, since we
700 * don't want to disarm the timer */
701 its
.it_value
.tv_sec
= 0;
702 its
.it_value
.tv_nsec
= 1;
704 flags
= TFD_TIMER_ABSTIME
;
706 timespec_store(&its
.it_value
, delay
);
710 /* This will also flush the elapse counter */
711 if (timerfd_settime(fd
, flags
, &its
, NULL
) < 0)
717 ev
.data
.u32
= MANAGER_TIMER
;
720 if (epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_ADD
, fd
, &ev
) < 0)
728 assert_se(close_nointr(fd
) == 0);
733 void unit_unwatch_timer(Unit
*u
, int *id
) {
740 assert_se(epoll_ctl(u
->meta
.manager
->epoll_fd
, EPOLL_CTL_DEL
, *id
, NULL
) >= 0);
741 assert_se(close_nointr(*id
) == 0);
745 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
747 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
751 case JOB_VERIFY_ACTIVE
:
757 case JOB_TRY_RESTART
:
758 return unit_can_start(u
);
761 return unit_can_reload(u
);
763 case JOB_RELOAD_OR_START
:
764 return unit_can_reload(u
) && unit_can_start(u
);
767 assert_not_reached("Invalid job type");
771 int unit_add_dependency(Unit
*u
, UnitDependency d
, Unit
*other
) {
773 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
774 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
775 [UNIT_SOFT_REQUIRES
] = UNIT_SOFT_REQUIRED_BY
,
776 [UNIT_WANTS
] = UNIT_WANTED_BY
,
777 [UNIT_REQUISITE
] = UNIT_REQUIRED_BY
,
778 [UNIT_SOFT_REQUISITE
] = UNIT_SOFT_REQUIRED_BY
,
779 [UNIT_REQUIRED_BY
] = _UNIT_DEPENDENCY_INVALID
,
780 [UNIT_SOFT_REQUIRED_BY
] = _UNIT_DEPENDENCY_INVALID
,
781 [UNIT_WANTED_BY
] = _UNIT_DEPENDENCY_INVALID
,
782 [UNIT_CONFLICTS
] = UNIT_CONFLICTS
,
783 [UNIT_BEFORE
] = UNIT_AFTER
,
784 [UNIT_AFTER
] = UNIT_BEFORE
789 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
790 assert(inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
);
793 /* We won't allow dependencies on ourselves. We will not
794 * consider them an error however. */
798 if ((r
= set_ensure_allocated(&u
->meta
.dependencies
[d
], trivial_hash_func
, trivial_compare_func
)) < 0)
801 if ((r
= set_ensure_allocated(&other
->meta
.dependencies
[inverse_table
[d
]], trivial_hash_func
, trivial_compare_func
)) < 0)
804 if ((r
= set_put(u
->meta
.dependencies
[d
], other
)) < 0)
807 if ((r
= set_put(other
->meta
.dependencies
[inverse_table
[d
]], u
)) < 0) {
808 set_remove(u
->meta
.dependencies
[d
], other
);
815 const char *unit_path(void) {
818 if ((e
= getenv("UNIT_PATH")))
819 if (path_is_absolute(e
))
825 int set_unit_path(const char *p
) {
829 /* This is mostly for debug purposes */
831 if (path_is_absolute(p
)) {
832 if (!(c
= strdup(p
)))
835 if (!(cwd
= get_current_dir_name()))
838 r
= asprintf(&c
, "%s/%s", cwd
, p
);
845 if (setenv("UNIT_PATH", c
, 0) < 0) {