if (r < 0)
return r;
- r = unit_test_start_limit(u);
- if (r < 0) {
- automount_enter_dead(a, AUTOMOUNT_FAILURE_START_LIMIT_HIT);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
return supported;
}
+static int automount_test_start_limit(Unit *u) {
+ Automount *a = AUTOMOUNT(u);
+ int r;
+
+ assert(a);
+
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ automount_enter_dead(a, AUTOMOUNT_FAILURE_START_LIMIT_HIT);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const automount_result_table[_AUTOMOUNT_RESULT_MAX] = {
[AUTOMOUNT_SUCCESS] = "success",
[AUTOMOUNT_FAILURE_RESOURCES] = "resources",
[JOB_FAILED] = "Failed to unset automount %s.",
},
},
+
+ .test_start_limit = automount_test_start_limit,
};
assert(IN_SET(m->state, MOUNT_DEAD, MOUNT_FAILED));
- r = unit_test_start_limit(u);
- if (r < 0) {
- mount_enter_dead(m, MOUNT_FAILURE_START_LIMIT_HIT);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
return exec_context_get_clean_mask(&m->exec_context, ret);
}
+static int mount_test_start_limit(Unit *u) {
+ Mount *m = MOUNT(u);
+ int r;
+
+ assert(m);
+
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ mount_enter_dead(m, MOUNT_FAILURE_START_LIMIT_HIT);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const mount_exec_command_table[_MOUNT_EXEC_COMMAND_MAX] = {
[MOUNT_EXEC_MOUNT] = "ExecMount",
[MOUNT_EXEC_UNMOUNT] = "ExecUnmount",
[JOB_TIMEOUT] = "Timed out unmounting %s.",
},
},
+
+ .test_start_limit = mount_test_start_limit,
};
if (r < 0)
return r;
- r = unit_test_start_limit(u);
- if (r < 0) {
- path_enter_dead(p, PATH_FAILURE_START_LIMIT_HIT);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
p->result = PATH_SUCCESS;
}
+static int path_test_start_limit(Unit *u) {
+ Path *p = PATH(u);
+ int r;
+
+ assert(p);
+
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ path_enter_dead(p, PATH_FAILURE_START_LIMIT_HIT);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const path_type_table[_PATH_TYPE_MAX] = {
[PATH_EXISTS] = "PathExists",
[PATH_EXISTS_GLOB] = "PathExistsGlob",
.reset_failed = path_reset_failed,
.bus_set_property = bus_path_set_property,
+
+ .test_start_limit = path_test_start_limit,
};
assert(IN_SET(s->state, SERVICE_DEAD, SERVICE_FAILED));
- /* Make sure we don't enter a busy loop of some kind. */
- r = unit_test_start_limit(u);
- if (r < 0) {
- service_enter_dead(s, SERVICE_FAILURE_START_LIMIT_HIT, false);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
return NULL;
}
+static int service_test_start_limit(Unit *u) {
+ Service *s = SERVICE(u);
+ int r;
+
+ assert(s);
+
+ /* Make sure we don't enter a busy loop of some kind. */
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ service_enter_dead(s, SERVICE_FAILURE_START_LIMIT_HIT, false);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const service_restart_table[_SERVICE_RESTART_MAX] = {
[SERVICE_RESTART_NO] = "no",
[SERVICE_RESTART_ON_SUCCESS] = "on-success",
},
.finished_job = service_finished_job,
},
+
+ .test_start_limit = service_test_start_limit,
};
assert(IN_SET(s->state, SOCKET_DEAD, SOCKET_FAILED));
- r = unit_test_start_limit(u);
- if (r < 0) {
- socket_enter_dead(s, SOCKET_FAILURE_START_LIMIT_HIT);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
return exec_context_get_clean_mask(&s->exec_context, ret);
}
+static int socket_test_start_limit(Unit *u) {
+ Socket *s = SOCKET(u);
+ int r;
+
+ assert(s);
+
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ socket_enter_dead(s, SOCKET_FAILURE_START_LIMIT_HIT);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const socket_exec_command_table[_SOCKET_EXEC_COMMAND_MAX] = {
[SOCKET_EXEC_START_PRE] = "ExecStartPre",
[SOCKET_EXEC_START_CHOWN] = "ExecStartChown",
[JOB_TIMEOUT] = "Timed out stopping %s.",
},
},
+
+ .test_start_limit = socket_test_start_limit,
};
if (UNIT(other)->job && UNIT(other)->job->state == JOB_RUNNING)
return -EAGAIN;
- r = unit_test_start_limit(u);
- if (r < 0) {
- swap_enter_dead(s, SWAP_FAILURE_START_LIMIT_HIT);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
return exec_context_get_clean_mask(&s->exec_context, ret);
}
+static int swap_test_start_limit(Unit *u) {
+ Swap *s = SWAP(u);
+ int r;
+
+ assert(s);
+
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ swap_enter_dead(s, SWAP_FAILURE_START_LIMIT_HIT);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const swap_exec_command_table[_SWAP_EXEC_COMMAND_MAX] = {
[SWAP_EXEC_ACTIVATE] = "ExecActivate",
[SWAP_EXEC_DEACTIVATE] = "ExecDeactivate",
[JOB_TIMEOUT] = "Timed out deactivating swap %s.",
},
},
+
+ .test_start_limit = swap_test_start_limit,
};
if (r < 0)
return r;
- r = unit_test_start_limit(u);
- if (r < 0) {
- timer_enter_dead(t, TIMER_FAILURE_START_LIMIT_HIT);
- return r;
- }
-
r = unit_acquire_invocation_id(u);
if (r < 0)
return r;
return 0;
}
+static int timer_test_start_limit(Unit *u) {
+ Timer *t = TIMER(u);
+ int r;
+
+ assert(t);
+
+ r = unit_test_start_limit(u);
+ if (r < 0) {
+ timer_enter_dead(t, TIMER_FAILURE_START_LIMIT_HIT);
+ return r;
+ }
+
+ return 0;
+}
+
static const char* const timer_base_table[_TIMER_BASE_MAX] = {
[TIMER_ACTIVE] = "OnActiveSec",
[TIMER_BOOT] = "OnBootSec",
.timezone_change = timer_timezone_change,
.bus_set_property = bus_timer_set_property,
+
+ .test_start_limit = timer_test_start_limit,
};
assert(u);
+ /* Check start rate limiting early so that failure conditions don't cause us to enter a busy loop. */
+ if (UNIT_VTABLE(u)->test_start_limit) {
+ int r = UNIT_VTABLE(u)->test_start_limit(u);
+ if (r < 0)
+ return r;
+ }
+
/* If this is already started, then this will succeed. Note that this will even succeed if this unit
* is not startable by the user. This is relied on to detect when we need to wait for units and when
* waiting is finished. */
* of this type will immediately fail. */
bool (*supported)(void);
+ /* If this function is set, it's invoked first as part of starting a unit to allow start rate
+ * limiting checks to occur before we do anything else. */
+ int (*test_start_limit)(Unit *u);
+
/* The strings to print in status messages */
UnitStatusMessageFormats status_message_formats;
--- /dev/null
+../TEST-01-BASIC/Makefile
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env bash
+set -e
+
+TEST_DESCRIPTION="https://github.com/systemd/systemd/issues/17433"
+
+# shellcheck source=test/test-functions
+. "${TEST_BASE_DIR:?}/test-functions"
+
+do_test "$@"
install_dir : testdata_dir)
install_subdir('testsuite-52.units',
install_dir : testdata_dir)
+ install_subdir('testsuite-63.units',
+ install_dir : testdata_dir)
testsuite08_dir = testdata_dir + '/testsuite-08.units'
install_data('testsuite-08.units/-.mount',
[Unit]
Requires=test10.socket
ConditionPathExistsGlob=/tmp/nonexistent
+# Make sure we hit the socket trigger limit in the test and not the service start limit.
+StartLimitInterval=1000
+StartLimitBurst=1000
[Service]
ExecStart=true
--- /dev/null
+[Path]
+PathExists=/tmp/test63
--- /dev/null
+[Unit]
+ConditionPathExists=!/tmp/nonexistent
+
+[Service]
+ExecStart=true
--- /dev/null
+[Unit]
+Description=TEST-63-ISSUE-17433
+
+[Service]
+ExecStartPre=rm -f /failed /testok
+Type=oneshot
+ExecStart=rm -f /tmp/nonexistent
+ExecStart=systemctl start test63.path
+ExecStart=touch /tmp/test63
+# Make sure systemd has sufficient time to hit the start limit for test63.service.
+ExecStart=sleep 2
+ExecStart=sh -x -c 'test "$(systemctl show test63.service -P ActiveState)" = failed'
+ExecStart=sh -x -c 'test "$(systemctl show test63.service -P Result)" = start-limit-hit'
+ExecStart=sh -x -c 'test "$(systemctl show test63.path -P ActiveState)" = failed'
+ExecStart=sh -x -c 'test "$(systemctl show test63.path -P Result)" = unit-start-limit-hit'
+ExecStart=sh -x -c 'echo OK >/testok'