RemoveSubgroup(in s subcgroup,
in t flags);
properties:
+ @org.freedesktop.DBus.Property.EmitsChangedSignal("false")
+ readonly u ConcurrencyHardMax = ...;
+ @org.freedesktop.DBus.Property.EmitsChangedSignal("false")
+ readonly u ConcurrencySoftMax = ...;
+ @org.freedesktop.DBus.Property.EmitsChangedSignal("false")
+ readonly u NCurrentlyActive = ...;
@org.freedesktop.DBus.Property.EmitsChangedSignal("false")
readonly s Slice = '...';
@org.freedesktop.DBus.Property.EmitsChangedSignal("false")
<!--method RemoveSubgroup is not documented!-->
+ <!--property ConcurrencyHardMax is not documented!-->
+
+ <!--property ConcurrencySoftMax is not documented!-->
+
+ <!--property NCurrentlyActive is not documented!-->
+
<!--property Slice is not documented!-->
<!--property ControlGroupId is not documented!-->
<variablelist class="dbus-method" generated="True" extra-ref="RemoveSubgroup()"/>
+ <variablelist class="dbus-property" generated="True" extra-ref="ConcurrencyHardMax"/>
+
+ <variablelist class="dbus-property" generated="True" extra-ref="ConcurrencySoftMax"/>
+
+ <variablelist class="dbus-property" generated="True" extra-ref="NCurrentlyActive"/>
+
<variablelist class="dbus-property" generated="True" extra-ref="Slice"/>
<variablelist class="dbus-property" generated="True" extra-ref="ControlGroup"/>
<varname>EffectiveTasksMax</varname>, and
<varname>MemoryZSwapWriteback</varname> were added in version 256.</para>
<para><varname>ManagedOOMMemoryPressureDurationUSec</varname> was added in version 257.</para>
- <para><function>RemoveSubgroup()</function> was added in version 258.</para>
+ <para><varname>ConcurrencyHardMax</varname>,
+ <varname>ConcurrencySoftMax</varname>,
+ <varname>NCurrentlyActive</varname> and
+ <function>RemoveSubgroup()</function> were added in version 258.</para>
</refsect2>
<refsect2>
<title>Scope Unit Objects</title>
"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
<!-- SPDX-License-Identifier: LGPL-2.1-or-later -->
-<refentry id="systemd.slice">
+<refentry id="systemd.slice" xmlns:xi="http://www.w3.org/2001/XInclude">
<refentryinfo>
<title>systemd.slice</title>
<productname>systemd</productname>
<para>Slice unit files may include [Unit] and [Install] sections, which are described in
<citerefentry><refentrytitle>systemd.unit</refentrytitle><manvolnum>5</manvolnum></citerefentry>.</para>
- <para>Slice files may include a [Slice] section. Options that may be used in this section are shared with other unit types.
- These options are documented in
+ <para>Slice files may include a [Slice] section. Many options that may be used in this section are shared
+ with other unit types. These options are documented in
<citerefentry><refentrytitle>systemd.resource-control</refentrytitle><manvolnum>5</manvolnum></citerefentry>.</para>
+
+ <para>The options specific to the [Slice] section of slice units are the following:</para>
+
+ <variablelist class='unit-directives'>
+ <varlistentry>
+ <term><varname>ConcurrencyHardMax=</varname></term>
+ <term><varname>ConcurrencySoftMax=</varname></term>
+
+ <listitem><para>Configures a hard and a soft limit on the maximum number of units assigned to this
+ slice (or any descendent slices) that may be active at the same time. If the hard limit is reached no
+ further units associated with the slice may be activated, and their activation will fail with an
+ error. If the soft limit is reached any further requested activation of units will be queued, but no
+ immediate error is generated. The queued activation job will remain queued until the number of
+ concurrent active units within the slice is below the limit again.</para>
+
+ <para>If the special value <literal>infinity</literal> is specified, no concurrency limit is
+ enforced. This is the default.</para>
+
+ <para>Note that if multiple start jobs are queued for units, and all their dependencies are fulfilled
+ they'll be processed in an order that is dependent on the unit type, the CPU weight (for unit types
+ that know the concept, such as services), the nice level (similar), and finally in alphabetical order
+ by the unit name. This may be used to influence dispatching order when using
+ <varname>ConcurrencySoftMax=</varname> to pace concurrency within a slice unit.</para>
+
+ <para>Note that these options have a hierarchial effect: a limit set for a slice unit will apply to
+ both the units immediately within the slice, but also all units further down the slice tree. Also
+ note that each sub-slice unit counts as one unit each too, and thus when choosing a limit for a slice
+ hierarchy the limit must provide room for both the payload units (i.e. services, mounts, …) and
+ structural units (i.e. slice units), if any are defined.</para>
+
+ <xi:include href="version-info.xml" xpointer="v258"/></listitem>
+ </varlistentry>
+ </variablelist>
+
</refsect1>
<refsect1>
/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#include "bus-get-properties.h"
#include "dbus-cgroup.h"
#include "dbus-slice.h"
+#include "dbus-util.h"
#include "slice.h"
#include "unit.h"
+static int property_get_currently_active(
+ sd_bus *bus,
+ const char *path,
+ const char *interface,
+ const char *property,
+ sd_bus_message *reply,
+ void *userdata,
+ sd_bus_error *error) {
+
+ Slice *s = ASSERT_PTR(userdata);
+
+ assert(bus);
+ assert(reply);
+
+ return sd_bus_message_append(
+ reply,
+ "u",
+ (uint32_t) slice_get_currently_active(s, /* ignore= */ NULL, /* with_pending= */ false));
+}
+
const sd_bus_vtable bus_slice_vtable[] = {
SD_BUS_VTABLE_START(0),
+ /* The following are currently constant, but we should change that eventually (i.e. open them up via
+ * systemctl set-property), hence they aren't marked as constant */
+ SD_BUS_PROPERTY("ConcurrencyHardMax", "u", bus_property_get_unsigned, offsetof(Slice, concurrency_hard_max), 0),
+ SD_BUS_PROPERTY("ConcurrencySoftMax", "u", bus_property_get_unsigned, offsetof(Slice, concurrency_soft_max), 0),
+ SD_BUS_PROPERTY("NCurrentlyActive", "u", property_get_currently_active, 0, 0),
SD_BUS_VTABLE_END
};
+static int bus_slice_set_transient_property(
+ Slice *s,
+ const char *name,
+ sd_bus_message *message,
+ UnitWriteFlags flags,
+ sd_bus_error *error) {
+
+ Unit *u = UNIT(s);
+
+ assert(s);
+ assert(name);
+ assert(message);
+
+ flags |= UNIT_PRIVATE;
+
+ if (streq(name, "ConcurrencyHardMax"))
+ return bus_set_transient_unsigned(u, name, &s->concurrency_hard_max, message, flags, error);
+
+ if (streq(name, "ConcurrencySoftMax"))
+ return bus_set_transient_unsigned(u, name, &s->concurrency_soft_max, message, flags, error);
+
+ return 0;
+}
+
int bus_slice_set_property(
Unit *u,
const char *name,
sd_bus_error *error) {
Slice *s = SLICE(u);
+ int r;
assert(name);
assert(u);
- return bus_cgroup_set_property(u, &s->cgroup_context, name, message, flags, error);
+ r = bus_cgroup_set_property(u, &s->cgroup_context, name, message, flags, error);
+ if (r != 0)
+ return r;
+
+ if (u->transient && u->load_state == UNIT_STUB) {
+ /* This is a transient unit, let's allow a little more */
+
+ r = bus_slice_set_transient_property(s, name, message, flags, error);
+ if (r != 0)
+ return r;
+ }
+
+ return 0;
}
int bus_slice_commit_properties(Unit *u) {
[JOB_COLLECTED] = "Unnecessary job was removed for %s.",
[JOB_ONCE] = "Unit %s has been started before and cannot be started again.",
[JOB_FROZEN] = "Cannot start frozen unit %s.",
+ [JOB_CONCURRENCY] = "Hard concurrency limit hit for slice of unit %s.",
};
static const char* const generic_finished_stop_job[_JOB_RESULT_MAX] = {
[JOB_DONE] = "Stopped %s.",
[JOB_COLLECTED] = { LOG_INFO, },
[JOB_ONCE] = { LOG_ERR, ANSI_HIGHLIGHT_RED, " ONCE " },
[JOB_FROZEN] = { LOG_ERR, ANSI_HIGHLIGHT_RED, "FROZEN" },
+ [JOB_CONCURRENCY] = { LOG_ERR, ANSI_HIGHLIGHT_RED, "CONCUR" },
};
static const char* job_done_mid(JobType type, JobResult result) {
r = job_finish_and_invalidate(j, JOB_ONCE, true, false);
else if (r == -EDEADLK)
r = job_finish_and_invalidate(j, JOB_FROZEN, true, false);
+ else if (r == -ETOOMANYREFS)
+ r = job_finish_and_invalidate(j, JOB_CONCURRENCY, /* recursive= */ true, /* already= */ false);
else if (r < 0)
r = job_finish_and_invalidate(j, JOB_FAILED, true, false);
}
goto finish;
}
- if (IN_SET(result, JOB_FAILED, JOB_INVALID, JOB_FROZEN))
+ if (IN_SET(result, JOB_FAILED, JOB_INVALID, JOB_FROZEN, JOB_CONCURRENCY))
j->manager->n_failed_jobs++;
job_uninstall(j);
[JOB_COLLECTED] = "collected",
[JOB_ONCE] = "once",
[JOB_FROZEN] = "frozen",
+ [JOB_CONCURRENCY] = "concurrency",
};
DEFINE_STRING_TABLE_LOOKUP(job_result, JobResult);
JOB_COLLECTED, /* Job was garbage collected, since nothing needed it anymore */
JOB_ONCE, /* Unit was started before, and hence can't be started again */
JOB_FROZEN, /* Unit is currently frozen, so we can't safely operate on it */
+ JOB_CONCURRENCY, /* Slice the unit is in has its hard concurrency limit reached */
_JOB_RESULT_MAX,
_JOB_RESULT_INVALID = -EINVAL,
};
Path.DirectoryMode, config_parse_mode, 0, offsetof(Path, directory_mode)
Path.TriggerLimitIntervalSec, config_parse_sec, 0, offsetof(Path, trigger_limit.interval)
Path.TriggerLimitBurst, config_parse_unsigned, 0, offsetof(Path, trigger_limit.burst)
+Slice.ConcurrencySoftMax, config_parse_concurrency_max, 0, offsetof(Slice, concurrency_soft_max)
+Slice.ConcurrencyHardMax, config_parse_concurrency_max, 0, offsetof(Slice, concurrency_hard_max)
{{ CGROUP_CONTEXT_CONFIG_ITEMS('Slice') }}
{{ CGROUP_CONTEXT_CONFIG_ITEMS('Scope') }}
{{ KILL_CONTEXT_CONFIG_ITEMS('Scope') }}
return config_parse_string(unit, filename, line, section, section_line, lvalue, ltype, path, data, userdata);
}
+int config_parse_concurrency_max(
+ const char *unit,
+ const char *filename,
+ unsigned line,
+ const char *section,
+ unsigned section_line,
+ const char *lvalue,
+ int ltype,
+ const char *rvalue,
+ void *data,
+ void *userdata) {
+
+ unsigned *concurrency_max = ASSERT_PTR(data);
+
+ if (isempty(rvalue) || streq(rvalue, "infinity")) {
+ *concurrency_max = UINT_MAX;
+ return 0;
+ }
+
+ return config_parse_unsigned(unit, filename, line, section, section_line, lvalue, ltype, rvalue, data, userdata);
+}
+
static int merge_by_names(Unit *u, Set *names, const char *id) {
char *k;
int r;
CONFIG_PARSER_PROTOTYPE(config_parse_memory_pressure_watch);
CONFIG_PARSER_PROTOTYPE(config_parse_cgroup_nft_set);
CONFIG_PARSER_PROTOTYPE(config_parse_mount_node);
+CONFIG_PARSER_PROTOTYPE(config_parse_concurrency_max);
/* gperf prototypes */
const struct ConfigPerfItem* load_fragment_gperf_lookup(const char *key, GPERF_LEN_TYPE length);
};
static void slice_init(Unit *u) {
+ Slice *s = ASSERT_PTR(SLICE(u));
+
assert(u);
assert(u->load_state == UNIT_STUB);
u->ignore_on_isolate = true;
+ s->concurrency_hard_max = UINT_MAX;
+ s->concurrency_soft_max = UINT_MAX;
}
static void slice_set_state(Slice *s, SliceState state) {
return unit_cgroup_freezer_action(s, action);
}
+unsigned slice_get_currently_active(Slice *slice, Unit *ignore, bool with_pending) {
+ Unit *u = ASSERT_PTR(UNIT(slice));
+
+ /* If 'ignore' is non-NULL and a unit contained in this slice (or any below) we'll ignore it when
+ * counting. */
+
+ unsigned n = 0;
+ Unit *member;
+ UNIT_FOREACH_DEPENDENCY(member, u, UNIT_ATOM_SLICE_OF) {
+ if (member == ignore)
+ continue;
+
+ if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(member)) ||
+ (with_pending && member->job && IN_SET(member->job->type, JOB_START, JOB_RESTART, JOB_RELOAD)))
+ n++;
+
+ if (member->type == UNIT_SLICE)
+ n += slice_get_currently_active(SLICE(member), ignore, with_pending);
+ }
+
+ return n;
+}
+
+bool slice_concurrency_soft_max_reached(Slice *slice, Unit *ignore) {
+ assert(slice);
+
+ if (slice->concurrency_soft_max != UINT_MAX &&
+ slice_get_currently_active(slice, ignore, /* with_pending= */ false) >= slice->concurrency_soft_max)
+ return true;
+
+ Unit *parent = UNIT_GET_SLICE(UNIT(slice));
+ if (parent)
+ return slice_concurrency_soft_max_reached(SLICE(parent), ignore);
+
+ return false;
+}
+
+bool slice_concurrency_hard_max_reached(Slice *slice, Unit *ignore) {
+ assert(slice);
+
+ if (slice->concurrency_hard_max != UINT_MAX &&
+ slice_get_currently_active(slice, ignore, /* with_pending= */ true) >= slice->concurrency_hard_max)
+ return true;
+
+ Unit *parent = UNIT_GET_SLICE(UNIT(slice));
+ if (parent)
+ return slice_concurrency_hard_max_reached(SLICE(parent), ignore);
+
+ return false;
+}
+
const UnitVTable slice_vtable = {
.object_size = sizeof(Slice),
.cgroup_context_offset = offsetof(Slice, cgroup_context),
SliceState state, deserialized_state;
+ unsigned concurrency_soft_max;
+ unsigned concurrency_hard_max;
+
CGroupContext cgroup_context;
CGroupRuntime *cgroup_runtime;
extern const UnitVTable slice_vtable;
DEFINE_CAST(SLICE, Slice);
+
+unsigned slice_get_currently_active(Slice *slice, Unit *ignore, bool with_pending);
+
+bool slice_concurrency_hard_max_reached(Slice *slice, Unit *ignore);
+bool slice_concurrency_soft_max_reached(Slice *slice, Unit *ignore);
#include "bus-common-errors.h"
#include "bus-error.h"
#include "dbus-unit.h"
+#include "slice.h"
#include "strv.h"
#include "terminal-util.h"
#include "transaction.h"
"Job type %s is not applicable for unit %s.",
job_type_to_string(type), unit->id);
+ if (type == JOB_START) {
+ /* The hard concurrency limit for slice units we already enforce when a job is enqueued */
+ Slice *slice = SLICE(UNIT_GET_SLICE(unit));
+ if (slice && slice_concurrency_hard_max_reached(slice, unit))
+ return sd_bus_error_setf(
+ e, BUS_ERROR_CONCURRENCY_LIMIT_REACHED,
+ "Concurrency limit of the slice unit '%s' (or any of its parents) the unit '%s' is contained in has been reached, refusing start job.",
+ UNIT(slice)->id, unit->id);
+ }
+
/* First add the job. */
ret = transaction_add_one_job(tr, type, unit, &is_new);
if (!ret)
}
/* Errors that aren't really errors:
- * -EALREADY: Unit is already started.
- * -ECOMM: Condition failed
- * -EAGAIN: An operation is already in progress. Retry later.
+ * -EALREADY: Unit is already started.
+ * -ECOMM: Condition failed
+ * -EAGAIN: An operation is already in progress. Retry later.
*
* Errors that are real errors:
- * -EBADR: This unit type does not support starting.
- * -ECANCELED: Start limit hit, too many requests for now
- * -EPROTO: Assert failed
- * -EINVAL: Unit not loaded
- * -EOPNOTSUPP: Unit type not supported
- * -ENOLINK: The necessary dependencies are not fulfilled.
- * -ESTALE: This unit has been started before and can't be started a second time
- * -ENOENT: This is a triggering unit and unit to trigger is not loaded
+ * -EBADR: This unit type does not support starting.
+ * -ECANCELED: Start limit hit, too many requests for now
+ * -EPROTO: Assert failed
+ * -EINVAL: Unit not loaded
+ * -EOPNOTSUPP: Unit type not supported
+ * -ENOLINK: The necessary dependencies are not fulfilled.
+ * -ESTALE: This unit has been started before and can't be started a second time
+ * -EDEADLK: This unit is frozen
+ * -ENOENT: This is a triggering unit and unit to trigger is not loaded
+ * -ETOOMANYREFS: The hard concurrency limit of at least one of the slices the unit is contained in has been reached
*/
int unit_start(Unit *u, ActivationDetails *details) {
UnitActiveState state;
if (!UNIT_VTABLE(u)->start)
return -EBADR;
+ if (UNIT_IS_INACTIVE_OR_FAILED(state)) {
+ Slice *slice = SLICE(UNIT_GET_SLICE(u));
+
+ if (slice) {
+ /* Check hard concurrency limit. Note this is partially redundant, we already checked
+ * this when enqueuing jobs. However, between the time when we enqueued this and the
+ * time we are dispatching the queue the configuration might have changed, hence
+ * check here again */
+ if (slice_concurrency_hard_max_reached(slice, u))
+ return -ETOOMANYREFS;
+
+ /* Also check soft concurrenty limit, and return EAGAIN so that the job is kept in
+ * the queue */
+ if (slice_concurrency_soft_max_reached(slice, u))
+ return -EAGAIN; /* Try again, keep in queue */
+ }
+ }
+
/* We don't suppress calls to ->start() here when we are already starting, to allow this request to
* be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
* waits for a holdoff timer to elapse before it will start again. */
return unexpected;
}
+static void unit_recursive_add_to_run_queue(Unit *u) {
+ assert(u);
+
+ if (u->job)
+ job_add_to_run_queue(u->job);
+
+ Unit *child;
+ UNIT_FOREACH_DEPENDENCY(child, u, UNIT_ATOM_SLICE_OF) {
+
+ if (!child->job)
+ continue;
+
+ unit_recursive_add_to_run_queue(child);
+ }
+}
+
+static void unit_check_concurrency_limit(Unit *u) {
+ assert(u);
+
+ Unit *slice = UNIT_GET_SLICE(u);
+ if (!slice)
+ return;
+
+ /* If a unit was stopped, maybe it has pending siblings (or children thereof) that can be started now */
+
+ if (SLICE(slice)->concurrency_soft_max != UINT_MAX) {
+ Unit *sibling;
+ UNIT_FOREACH_DEPENDENCY(sibling, slice, UNIT_ATOM_SLICE_OF) {
+ if (sibling == u)
+ continue;
+
+ unit_recursive_add_to_run_queue(sibling);
+ }
+ }
+
+ /* Also go up the tree. */
+ unit_check_concurrency_limit(slice);
+}
+
void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
assert(u);
assert(os < _UNIT_ACTIVE_STATE_MAX);
/* Maybe we can release some resources now? */
unit_submit_to_release_resources_queue(u);
+ /* Maybe the concurrency limits now allow dispatching of another start job in this slice? */
+ unit_check_concurrency_limit(u);
+
} else if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
/* Start uphold units regardless if going up was expected or not */
check_uphold_dependencies(u);
SD_BUS_ERROR_MAP(BUS_ERROR_UNIT_GENERATED, EADDRNOTAVAIL),
SD_BUS_ERROR_MAP(BUS_ERROR_UNIT_LINKED, ELOOP),
SD_BUS_ERROR_MAP(BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, EBADR),
+ SD_BUS_ERROR_MAP(BUS_ERROR_CONCURRENCY_LIMIT_REACHED, ETOOMANYREFS),
SD_BUS_ERROR_MAP(BUS_ERROR_NO_ISOLATION, EPERM),
SD_BUS_ERROR_MAP(BUS_ERROR_SHUTTING_DOWN, ECANCELED),
SD_BUS_ERROR_MAP(BUS_ERROR_SCOPE_NOT_RUNNING, EHOSTDOWN),
#define BUS_ERROR_UNIT_LINKED "org.freedesktop.systemd1.UnitLinked"
#define BUS_ERROR_UNIT_BAD_PATH "org.freedesktop.systemd1.UnitBadPath"
#define BUS_ERROR_JOB_TYPE_NOT_APPLICABLE "org.freedesktop.systemd1.JobTypeNotApplicable"
+#define BUS_ERROR_CONCURRENCY_LIMIT_REACHED "org.freedesktop.systemd1.ConcurrencyLimitReached"
#define BUS_ERROR_NO_ISOLATION "org.freedesktop.systemd1.NoIsolation"
#define BUS_ERROR_SHUTTING_DOWN "org.freedesktop.systemd1.ShuttingDown"
#define BUS_ERROR_SCOPE_NOT_RUNNING "org.freedesktop.systemd1.ScopeNotRunning"
log_error("Unit %s was started already once and can't be started again.", d->name);
else if (streq(d->result, "frozen"))
log_error("Cannot perform operation on frozen unit %s.", d->name);
+ else if (streq(d->result, "concurrency"))
+ log_error("Concurrency limit of a slice unit %s is contained in has been reached.", d->name);
else if (endswith(d->name, ".service")) {
/* Job result is unknown. For services, let's also try Result property. */
_cleanup_free_ char *result = NULL;
return -ESTALE;
else if (streq(d->result, "frozen"))
return -EDEADLK;
+ else if (streq(d->result, "concurrency"))
+ return -ETOOMANYREFS;
return log_debug_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
"Unexpected job result '%s' for unit '%s', assuming server side newer than us.",
/* Swap */
const char *what;
+ /* Slice */
+ unsigned concurrency_hard_max;
+ unsigned concurrency_soft_max;
+ unsigned n_currently_active;
+
/* CGroup */
uint64_t memory_current;
uint64_t memory_peak;
putchar('\n');
}
+ if (endswith(i->id, ".slice")) {
+ printf(" Act. Units: %u", i->n_currently_active);
+
+ if (i->concurrency_soft_max != UINT_MAX || i->concurrency_hard_max != UINT_MAX) {
+ fputs(" (", stdout);
+
+ if (i->concurrency_soft_max != UINT_MAX && i->concurrency_soft_max < i->concurrency_hard_max) {
+ printf("soft limit: %u", i->concurrency_soft_max);
+ if (i->concurrency_hard_max != UINT_MAX)
+ fputs("; ", stdout);
+ }
+ if (i->concurrency_hard_max != UINT_MAX)
+ printf("hard limit: %u", i->concurrency_hard_max);
+
+ putchar(')');
+ }
+
+ putchar('\n');
+ }
+
if (i->ip_ingress_bytes != UINT64_MAX && i->ip_egress_bytes != UINT64_MAX)
printf(" IP: %s in, %s out\n",
FORMAT_BYTES(i->ip_ingress_bytes),
{ "SysFSPath", "s", NULL, offsetof(UnitStatusInfo, sysfs_path) },
{ "Where", "s", NULL, offsetof(UnitStatusInfo, where) },
{ "What", "s", NULL, offsetof(UnitStatusInfo, what) },
+ { "ConcurrencyHardMax", "u", NULL, offsetof(UnitStatusInfo, concurrency_hard_max) },
+ { "ConcurrencySoftMax", "u", NULL, offsetof(UnitStatusInfo, concurrency_soft_max) },
+ { "NCurrentlyActive", "u", NULL, offsetof(UnitStatusInfo, n_currently_active) },
{ "MemoryCurrent", "t", NULL, offsetof(UnitStatusInfo, memory_current) },
{ "MemoryPeak", "t", NULL, offsetof(UnitStatusInfo, memory_peak) },
{ "MemorySwapCurrent", "t", NULL, offsetof(UnitStatusInfo, memory_swap_current) },