fetch-depth: 0
- name: Differential ShellCheck
- uses: redhat-plumbers-in-action/differential-shellcheck@a14889568f6210b361eb29e16f3b07f512fca846
+ uses: redhat-plumbers-in-action/differential-shellcheck@1b1b75e42f0694c1012228513b21617a748c866e
with:
token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b
- name: Parse issue form
- uses: stefanbuck/github-issue-parser@fc06b2a0adc5ccb7702ab6b641fd8a742a5e9cc0
+ uses: stefanbuck/github-issue-parser@f80b14f78892a66d7a35bba44f0a7d63bee03602
id: issue-parser
with:
template-path: .github/ISSUE_TEMPLATE/${{ matrix.template }}
permissions:
pull-requests: write
steps:
- - uses: actions/labeler@9fd24f1f9d6ceb64ba34d181b329ee72f99978a0
+ - uses: actions/labeler@e54e5b338fbd6e6cdb5d60f51c22335fc57c401e
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/labeler.yml
-meson==0.63.1 \
- --hash=sha256:06fe13297213d6ff0121c5d5aab25a56ef938ffec57414ed6086fda272cb65e9 \
- --hash=sha256:b90d3ff3ba5ce4d192a7441f288966d904b7ddb9cb20fe0c8929e871aef6a638
-ninja==1.10.2.3 \
- --hash=sha256:0560eea57199e41e86ac2c1af0108b63ae77c3ca4d05a9425a750e908135935a \
- --hash=sha256:21a1d84d4c7df5881bfd86c25cce4cf7af44ba2b8b255c57bc1c434ec30a2dfc \
- --hash=sha256:279836285975e3519392c93c26e75755e8a8a7fafec9f4ecbb0293119ee0f9c6 \
- --hash=sha256:29570a18d697fc84d361e7e6330f0021f34603ae0fcb0ef67ae781e9814aae8d \
- --hash=sha256:5ea785bf6a15727040835256577239fa3cf5da0d60e618c307aa5efc31a1f0ce \
- --hash=sha256:688167841b088b6802e006f911d911ffa925e078c73e8ef2f88286107d3204f8 \
- --hash=sha256:6bd76a025f26b9ae507cf8b2b01bb25bb0031df54ed685d85fc559c411c86cf4 \
- --hash=sha256:740d61fefb4ca13573704ee8fe89b973d40b8dc2a51aaa4e9e68367233743bb6 \
- --hash=sha256:840a0b042d43a8552c4004966e18271ec726e5996578f28345d9ce78e225b67e \
- --hash=sha256:84be6f9ec49f635dc40d4b871319a49fa49b8d55f1d9eae7cd50d8e57ddf7a85 \
- --hash=sha256:9ca8dbece144366d5f575ffc657af03eb11c58251268405bc8519d11cf42f113 \
- --hash=sha256:cc8b31b5509a2129e4d12a35fc21238c157038022560aaf22e49ef0a77039086 \
- --hash=sha256:d5e0275d28997a750a4f445c00bdd357b35cc334c13cdff13edf30e544704fbd \
- --hash=sha256:e1b86ad50d4e681a7dbdff05fc23bb52cb773edb90bc428efba33fa027738408
+meson==0.63.2 \
+ --hash=sha256:16222f17ef76be0542c91c07994f9676ae879f46fc21c0c786a21ef2cb518bbf \
+ --hash=sha256:64a83ef257b2962b52c8b07ad9ec536c2de1b72fd9f14bcd9c21fe45730edd46
+ninja==1.10.2.4 \
+ --hash=sha256:24e3bc4713667a9a1d15484ad2bb77bbaedb1e6d45254cb03f7964b8b497231a \
+ --hash=sha256:251fb21cd6691accd0d95e28721ad8a50a6ec0ace97f9a8de3976f39301686f6 \
+ --hash=sha256:327c319176c5a4af21908b727b776e9f5caf275680403da632821ba071fd6296 \
+ --hash=sha256:3300f3f37d62dcc7bdd19284dff9eaed7d629f4ed2725019a6ce3291c655fb83 \
+ --hash=sha256:34c8e44f6e2e35ff9444994bfc7bf451c8d4bf15e31ad1e3ef7b06f78647b35b \
+ --hash=sha256:3fa6e69838613815c80abcaca34681c5b7cf15bf921543e518f5c918d7098bb7 \
+ --hash=sha256:5b973b1ce7075e9091db290adbbf93ba9066a94f97c369d0ff631251c633e81b \
+ --hash=sha256:685daebd1bc21480256351000a01dfb520636832fa65efc9f121474ff640e3df \
+ --hash=sha256:b0350784b37c5080223ec1bedc507153cc714b502c17dd5a64552e930b0dca25 \
+ --hash=sha256:b12cfed6382e510a597b3d08d7eec96664f7c8b8ee436eef645736c453d1c135 \
+ --hash=sha256:b264085e409533aecb57040c5e90fbfb64db91a61575c7e637411780446412fa \
+ --hash=sha256:b86a4e4ba2ed999d8b10f2b3f2ed56d7457ff647268f4098dd0b63dd145ede32 \
+ --hash=sha256:da7a6d9b2ed2018165fbf90068e2c64da08f2568c700fdb8abea07a245dc4664 \
+ --hash=sha256:ea245943a9849e5b1ebd74c1a4c1edd2c9801b62c0386165c7ac47623e353627
Features:
+* tree-wide: convert as much as possible over to use sd_event_set_signal_exit(), instead
+ of manually hooking into SIGINT/SIGTERM
+
+* tree-wide: convert as much as possible over to SD_EVENT_SIGNAL_PROCMASK
+ instead of manual blocking.
+
* sd-boot: for each installed OS, grey out older entries (i.e. all but the
newest), to indicate they are obsolete
* coredump:
- save coredump in Windows/Mozilla minidump format
- when truncating coredumps, also log the full size that the process had, and make a metadata field so we can report truncated coredumps
- - when using package notes, compact output to one-line-per-module:
- libfoo.so, build-id DEADF12340, foo-libs-33.4-1.fc12
+ - add examples for other distros in ELF_PACKAGE_METADATA
* support crash reporting operation modes (https://live.gnome.org/GnomeOS/Design/Whiteboards/ProblemReporting)
## Filing Issues
-* We use [GitHub Issues](https://github.com/systemd/systemd/issues) **exclusively** for tracking **bugs** and **feature** **requests** (RFEs) of systemd. If you are looking for help, please contact [systemd-devel mailing list](https://lists.freedesktop.org/mailman/listinfo/systemd-devel) instead.
+* We use [GitHub Issues](https://github.com/systemd/systemd/issues) **exclusively** for tracking **bugs** and **feature** **requests** (RFEs) of systemd. If you are looking for help, please try the forums of your distribution first, or [systemd-devel mailing list](https://lists.freedesktop.org/mailman/listinfo/systemd-devel) for general questions about systemd.
* We only track bugs in the **two** **most** **recently** **released** (non-rc) **versions** of systemd in the GitHub Issue tracker. If you are using an older version of systemd, please contact your distribution's bug tracker instead (see below). See [GitHub Release Page](https://github.com/systemd/systemd/releases) for the list of most recent releases.
* When filing a feature request issue (RFE), please always check first if the newest upstream version of systemd already implements the feature, and whether there's already an issue filed for your feature by someone else.
* When filing an issue, specify the **systemd** **version** you are experiencing the issue with. Also, indicate which **distribution** you are using.
* Make sure to post PRs only relative to a recent tip of the `main` branch.
* Follow our [Coding Style](CODING_STYLE.md) when contributing code. This is a requirement for all code we merge.
* Please make sure to test your change before submitting the PR. See the [Hacking guide](HACKING.md) for details on how to do this.
-* Make sure to run the test suite locally, before posting your PR. We use a CI system, meaning we don't even look at your PR, if the build and tests don't pass.
+* Make sure to run the test suite locally, before posting your PR. We use a CI system, meaning we don't even look at your PR if the build and tests don't pass.
* If you need to update the code in an existing PR, force-push into the same branch, overriding old commits with new versions.
-* After you have pushed a new version, add a comment. If you are a member of the systemd project on GitHub, remove the `reviewed/needs-rework` label.
-* If you are copying existing code from another source (eg: a compat header), please make sure the license is compatible with LGPL-2.1-or-later. If the license is not LGPL-2.1-or-later, please add a note to LICENSES/README.md.
+* After you have pushed a new version, add a comment explaining the latest changes. If you are a member of the systemd project on GitHub, remove the `reviewed/needs-rework`/`ci-fails/needs-rework`/`needs-rebase` labels.
+* If you are copying existing code from another source (eg: a compat header), please make sure the license is compatible with `LGPL-2.1-or-later`. If the license is not `LGPL-2.1-or-later`, please add a note to [`LICENSES/README.md`](https://github.com/systemd/systemd/blob/main/LICENSES/README.md).
* If the pull request stalls without review, post a ping in a comment after some time has passed. We are always short on reviewer time, and pull requests which haven't seen any recent activity can be easily forgotten.
+## Reviewing Pull Requests
+
+* See [filtered list of pull requests](https://github.com/systemd/systemd/pulls?q=is%3Aopen+is%3Apr+-label%3A%22reviewed%2Fneeds-rework+%F0%9F%94%A8%22+-label%3Aneeds-rebase+-label%3Agood-to-merge%2Fwith-minor-suggestions+-label%3A%22good-to-merge%2Fwaiting-for-ci+%F0%9F%91%8D%22+-label%3Apostponed+-label%3A%22needs-reporter-feedback+%E2%9D%93%22+-label%3A%22dont-merge+%F0%9F%92%A3%22+-label%3A%22ci-fails%2Fneeds-rework+%F0%9F%94%A5%22+sort%3Aupdated-desc) for requests that are ready for review.
+* After performing a review, set
+
+ * `reviewed/needs-rework` if the pull request needs significant changes
+ * `ci-fails/needs-rework` if the automatic tests fail and the failure is relevant to the pull request
+ * `ci-failure-appears-unrelated` if the test failures seem irrelevant
+ * `needs-rebase` if the pull request needs a rebase because of conflicts
+ * `good-to-merge/waiting-for-ci` if the pull request should be merged without further review
+ * `good-to-merge/with-minor-suggestions` if the pull request should be merged after an update without going through another round of reviews
+
+Unfortunately only members of the `systemd` organization on github can change labels.
+If your pull request is mislabeled, make a comment in the pull request and somebody will fix it.
+Reviews from non-members are still welcome.
+
## Final Words
We'd like to apologize in advance if we are not able to process and reply to your issue or PR right-away. We have a lot of work to do, but we are trying our best!
# Backward Compatibility And External Dependencies
We strive to keep backward compatibility where possible and reasonable. The following are general guidelines, not hard
-rules, and case-by-case exceptions might be applied at the discretion of the maintainers. The current set of build time
-and runtime dependencies are documented in the [README](../README).
+rules, and case-by-case exceptions might be applied at the discretion of the maintainers. The current set of build-time
+and runtime dependencies are documented in the [README](https://github.com/systemd/systemd/blob/main/README).
## New features
It is fine for new features/functionality/tools/daemons to require bleeding edge external dependencies, provided there
-are runtime and build time graceful fallbacks (e.g.: daemon will not be built, runtime functionality will be skipped with
-clear log message).
+are runtime and build-time graceful fallbacks (e.g.: a daemon will not be built, runtime functionality will be skipped with a clear log message).
In case a new feature is added to both `systemd` and one of its dependencies, we expect the corresponding feature code to
be merged upstream in the dependency before accepting our side of the implementation.
Making use of new kernel syscalls can be achieved through compat wrappers in our tree (see: `src/basic/missing_syscall_def.h`),
## External Build/Runtime Dependencies
-It is often tempting to bump external dependencies minimum versions to cut cruft, and in general it's an essential part
-of the maintenance process. But as a generic rule, existing dependencies should not be bumped without very strong
+It is often tempting to bump external dependencies' minimum versions to cut cruft, and in general it's an essential part
+of the maintenance process. But as a general rule, existing dependencies should not be bumped without strong
reasons. When possible, we try to keep compatibility with the most recent LTS releases of each mainstream distribution
for optional components, and with all currently maintained (i.e.: not EOL) LTS releases for core components. When in
doubt, ask before committing time to work on contributions if it's not clear that cutting support would be obviously
functionality, especially for core components. It is not uncommon, for example, for embedded systems to be stuck on older
kernel versions due to hardware requirements, so do not assume everybody is running with latest and greatest at all times.
In general, [currently maintained LTS branches](https://www.kernel.org/category/releases.html) should keep being supported
-for existing functionality, especially for core components.
+for existing functionality.
## `libsystemd.so`
-`libsystemd.so` is a shared public library, so breaking ABI/API compatibility creates a lot of work for its users, and should
-always be avoided apart from the most extreme circumstances. For example, always add a new interface instead of modifying
+`libsystemd.so` is a shared public library, so breaking ABI/API compatibility would create lot of work for everyone, and is not allowed. Instead, always add a new interface instead of modifying
the signature of an existing function. It is fine to mark an interface as deprecated to gently nudge users toward a newer one,
-but in general support for the old one should be maintained whenever possible.
+but support for the old one must be maintained.
Symbol versioning and the compiler's deprecated attribute should be used when managing the lifetime of a public interface.
## `libudev.so`
evdev:atkbd:dmi:bvn*:bvr*:bd*:svnHP:pnVictus*:pvr*
KEYBOARD_KEY_a1=!calc
+# HP Elite Dragonfly G2
+evdev:atkbd:dmi:bvn*:bvr*:bd*:svnHP*:pnHPEliteDragonflyG2*:pvr*
+ KEYBOARD_KEY_f8=unknown # rfkill is also reported by HP Wireless hotkeys
+ KEYBOARD_KEY_68=prog1 # Fn+F12 HP Programmable Key
+
+# HP Elite Dragonfly G2
+evdev:name:Intel HID events:dmi:bvn*:bvr*:bd*:svnHP*:pnHPEliteDragonflyG2*:pvr*
+ KEYBOARD_KEY_08=unknown # rfkill is also reported by HP Wireless hotkeys
+
##########################################################
# Huawei
##########################################################
KEYBOARD_KEY_17=prog1
KEYBOARD_KEY_1a=f20 # Microphone mute button; should be micmute
KEYBOARD_KEY_45=bookmarks
- KEYBOARD_KEY_46=prog2
+ KEYBOARD_KEY_46=prog2 # Fn + PrtSc, on Windows: Snipping tool
+ KEYBOARD_KEY_4a=prog3 # Fn + Right shift, on Windows: No idea
+ KEYBOARD_KEY_4b=chat # Fn + F9, on Windows: Notifications panel key
+ KEYBOARD_KEY_4c=connect # Fn + F10, on Windows: Answer (Teams) call
+ KEYBOARD_KEY_4d=cancel # Fn + F11, on Windows: Hangup/decline (Teams) call
# ThinkPad Keyboard with TrackPoint
evdev:input:b0003v17EFp6009*
<para>The <function>AttachImageWithExtensions()</function>,
<function>DetachImageWithExtensions()</function> and
<function>ReattachImageWithExtensions()</function> methods take in options as flags instead of
- booleans to allow for extendability, defined as follows:</para>
+ booleans to allow for extendability. <varname>SD_SYSTEMD_PORTABLE_FORCE</varname> will cause
+ safety checks that ensure the units are not running while the new image is attached or detached
+ to be skipped. They are defined as follows:</para>
<programlisting>
#define SD_SYSTEMD_PORTABLE_RUNTIME (UINT64_C(1) << 0)
+#define SD_SYSTEMD_PORTABLE_FORCE (UINT64_C(1) << 1)
</programlisting>
</refsect2>
readonly s DNSStubListener = '...';
@org.freedesktop.DBus.Property.EmitsChangedSignal("false")
readonly s ResolvConfMode = '...';
- readonly b Monitor = ...;
};
interface org.freedesktop.DBus.Peer { ... };
interface org.freedesktop.DBus.Introspectable { ... };
<variablelist class="dbus-property" generated="True" extra-ref="ResolvConfMode"/>
- <variablelist class="dbus-property" generated="True" extra-ref="Monitor"/>
-
<!--End of Autogenerated section-->
<refsect2>
enabled. Possible values are <literal>yes</literal> (enabled), <literal>no</literal> (disabled),
<literal>udp</literal> (only the UDP listener is enabled), and <literal>tcp</literal> (only the TCP
listener is enabled).</para>
-
- <para>The <varname>Monitor</varname> boolean property reports whether DNS monitoring is enabled.</para>
</refsect2>
</refsect1>
<!--method ListUnitsByNames is not documented!-->
- <!--method Dump is not documented!-->
-
- <!--method DumpByFileDescriptor is not documented!-->
-
<!--method ListUnitFilesByPatterns is not documented!-->
<!--method PresetUnitFilesWithMode is not documented!-->
all clients which previously asked for <function>Subscribe()</function> either closed their connection
to the bus or invoked <function>Unsubscribe()</function>.</para>
+ <para><function>Dump()</function> returns a text dump of the internal service manager state. This is a
+ privileged, low-level debugging interface only. The returned string is supposed to be readable
+ exclusively by developers, and not programmatically. There's no interface stability on the returned
+ string guaranteed, and new fields may be added any time, and old fields removed. The general structure
+ may be rearranged drastically between releases. This is exposed by
+ <citerefentry><refentrytitle>systemd-analyze</refentrytitle><manvolnum>1</manvolnum></citerefentry>'s
+ <command>dump</command> command. The <function>DumpByFileDescriptor()</function> method is identical to
+ <function>Dump()</function> but returns the data serialized into a file descriptor (the client should
+ read the text data from it until hitting EOF). Given the size limits on D-Bus messages and the possibly
+ large size of the returned string, <function>DumpByFileDescriptor()</function> is usually the
+ preferable interface, since it ensures the data can be passed reliably from the service manager to the
+ client. (Note though that <function>DumpByFileDescriptor()</function> cannot work when communicating
+ with the service manager remotely, as file descriptors are strictly local to a system.)</para>
+
<para><function>Reload()</function> may be invoked to reload all unit files.</para>
<para><function>Reexecute()</function> may be invoked to reexecute the main manager process. It will
and detaching.</para></listitem>
</varlistentry>
+ <varlistentry>
+ <term><option>--force</option></term>
+
+ <listitem><para>Skip safety checks and attach or detach images (with extensions) without first ensuring
+ that the units are not running.</para></listitem>
+ </varlistentry>
+
<xi:include href="user-system-options.xml" xpointer="host" />
<xi:include href="user-system-options.xml" xpointer="machine" />
automatically, an explicit reverting is not necessary in that case.</para></listitem>
</varlistentry>
+ <varlistentry>
+ <term><command>monitor</command></term>
+
+ <listitem><para>Show a continous stream of local client resolution queries and their
+ responses. Whenever a local query is completed the query's DNS resource lookup key and resource
+ records are shown. Note that this displays queries issued locally only, and does not immediately
+ relate to DNS requests submitted to configured DNS servers or the LLMNR or MulticastDNS zones, as
+ lookups may be answered from the local cache, or might result in multiple DNS transactions (for
+ example to validate DNSSEC information). If CNAME/CNAME redirection chains are followed, a separate
+ query will be displayed for each element of the chain. Use <option>--json=</option> to enable JSON
+ output.</para></listitem>
+ </varlistentry>
+
<xi:include href="systemctl.xml" xpointer="log-level" />
</variablelist>
</refsect1>
query response are shown. Otherwise, this output is suppressed.</para></listitem>
</varlistentry>
+ <xi:include href="standard-options.xml" xpointer="json" />
+
+ <varlistentry>
+ <term><option>-j</option></term>
+
+ <listitem><para>Short for <option>--json=auto</option></para></listitem>
+ </varlistentry>
+
+ <xi:include href="standard-options.xml" xpointer="no-pager" />
<xi:include href="standard-options.xml" xpointer="help" />
<xi:include href="standard-options.xml" xpointer="version" />
- <xi:include href="standard-options.xml" xpointer="no-pager" />
</variablelist>
</refsect1>
''],
['sd_event_add_signal',
'3',
- ['sd_event_signal_handler_t', 'sd_event_source_get_signal'],
+ ['SD_EVENT_SIGNAL_PROCMASK',
+ 'sd_event_signal_handler_t',
+ 'sd_event_source_get_signal'],
''],
['sd_event_add_time',
'3',
''],
['sd_event_now', '3', [], ''],
['sd_event_run', '3', ['sd_event_loop'], ''],
+ ['sd_event_set_signal_exit', '3', [], ''],
['sd_event_set_watchdog', '3', ['sd_event_get_watchdog'], ''],
['sd_event_source_get_event', '3', [], ''],
['sd_event_source_get_pending', '3', [], ''],
<refname>sd_event_add_signal</refname>
<refname>sd_event_source_get_signal</refname>
<refname>sd_event_signal_handler_t</refname>
+ <refname>SD_EVENT_SIGNAL_PROCMASK</refname>
<refpurpose>Add a UNIX process signal event source to an event
loop</refpurpose>
<funcsynopsisinfo><token>typedef</token> struct sd_event_source sd_event_source;</funcsynopsisinfo>
+ <funcsynopsisinfo><constant>SD_EVENT_SIGNAL_PROCMASK</constant></funcsynopsisinfo>
+
<funcprototype>
<funcdef>typedef int (*<function>sd_event_signal_handler_t</function>)</funcdef>
<paramdef>sd_event_source *<parameter>s</parameter></paramdef>
<funcdef>int <function>sd_event_source_get_signal</function></funcdef>
<paramdef>sd_event_source *<parameter>source</parameter></paramdef>
</funcprototype>
-
</funcsynopsis>
</refsynopsisdiv>
<refsect1>
<title>Description</title>
- <para><function>sd_event_add_signal()</function> adds a new UNIX
- process signal event source to an event loop. The event loop
- object is specified in the <parameter>event</parameter> parameter,
- and the event source object is returned in the
- <parameter>source</parameter> parameter. The
- <parameter>signal</parameter> parameter specifies the numeric
- signal to be handled (see <citerefentry
+ <para><function>sd_event_add_signal()</function> adds a new UNIX process signal event source to an event
+ loop. The event loop object is specified in the <parameter>event</parameter> parameter, and the event
+ source object is returned in the <parameter>source</parameter> parameter. The
+ <parameter>signal</parameter> parameter specifies the numeric signal to be handled (see <citerefentry
project='man-pages'><refentrytitle>signal</refentrytitle><manvolnum>7</manvolnum></citerefentry>).</para>
<para>The <parameter>handler</parameter> parameter is a function to call when the signal is received or
<constant>NULL</constant>. The handler function will be passed the <parameter>userdata</parameter>
pointer, which may be chosen freely by the caller. The handler also receives a pointer to a
<structname>signalfd_siginfo</structname> structure containing information about the received signal. See
- <citerefentry project='man-pages'><refentrytitle>signalfd</refentrytitle><manvolnum>2</manvolnum></citerefentry>
- for further information. The handler may return negative to signal an error (see below), other return
- values are ignored. If <parameter>handler</parameter> is <constant>NULL</constant>, a default handler
- that calls
+ <citerefentry
+ project='man-pages'><refentrytitle>signalfd</refentrytitle><manvolnum>2</manvolnum></citerefentry> for
+ further information. The handler may return negative to signal an error (see below), other return values
+ are ignored. If <parameter>handler</parameter> is <constant>NULL</constant>, a default handler that calls
<citerefentry><refentrytitle>sd_event_exit</refentrytitle><manvolnum>3</manvolnum></citerefentry> will be
used.</para>
threads before this function is called (using <citerefentry
project='man-pages'><refentrytitle>sigprocmask</refentrytitle><manvolnum>2</manvolnum></citerefentry> or
<citerefentry
- project='man-pages'><refentrytitle>pthread_sigmask</refentrytitle><manvolnum>3</manvolnum></citerefentry>).</para>
-
- <para>By default, the event source is enabled permanently
- (<constant>SD_EVENT_ON</constant>), but this may be changed with
+ project='man-pages'><refentrytitle>pthread_sigmask</refentrytitle><manvolnum>3</manvolnum></citerefentry>). For
+ convenience, if the special flag <constant>SD_EVENT_SIGNAL_PROCMASK</constant> is ORed into the specified
+ signal the signal will be automatically masked as necessary, for the calling thread. Note that this only
+ works reliably if the signal is already masked in all other threads of the process, or if there are no
+ other threads at the moment of invocation.</para>
+
+ <para>By default, the event source is enabled permanently (<constant>SD_EVENT_ON</constant>), but this
+ may be changed with
<citerefentry><refentrytitle>sd_event_source_set_enabled</refentrytitle><manvolnum>3</manvolnum></citerefentry>.
- If the handler function returns a negative error code, it will either be disabled after the
- invocation, even if the <constant>SD_EVENT_ON</constant> mode was requested before, or it will cause the
- loop to terminate, see
+ If the handler function returns a negative error code, it will either be disabled after the invocation,
+ even if the <constant>SD_EVENT_ON</constant> mode was requested before, or it will cause the loop to
+ terminate, see
<citerefentry><refentrytitle>sd_event_source_set_exit_on_failure</refentrytitle><manvolnum>3</manvolnum></citerefentry>.
</para>
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<!-- SPDX-License-Identifier: LGPL-2.1-or-later -->
+
+<refentry id="sd_event_set_signal_exit" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+ <refentryinfo>
+ <title>sd_event_set_signal_exit</title>
+ <productname>systemd</productname>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>sd_event_set_signal_exit</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>sd_event_set_signal_exit</refname>
+
+ <refpurpose>Automatically leave event loop on <constant>SIGINT</constant> and <constant>SIGTERM</constant></refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include <systemd/sd-event.h></funcsynopsisinfo>
+
+ <funcprototype>
+ <funcdef>int <function>sd_event_set_signal_exit</function></funcdef>
+ <paramdef>sd_event *<parameter>event</parameter></paramdef>
+ <paramdef>int b</paramdef>
+ </funcprototype>
+
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para><function>sd_event_set_signal_exit()</function> may be used to ensure the event loop terminates
+ once a <constant>SIGINT</constant> or <constant>SIGTERM</constant> signal is received. It is a
+ convencience wrapper around invocations of
+ <citerefentry><refentrytitle>sd_event_add_signal</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ for both signals. The two signals are automatically added to the calling thread's signal mask (if a
+ program is multi-threaded care should be taken to either invoke this function before the first thread is
+ started or to manually block the two signals process-wide first).</para>
+
+ <para>If the parameter <parameter>b</parameter> is specified as true, the event loop will terminate on
+ <constant>SIGINT</constant> and <constant>SIGTERM</constant>. If specified as false, it will no
+ longer. When this functionality is turned off the calling thread's signal mask is restored to match the
+ state before it was turned on, for the two signals. By default the two signals are not handled by the
+ event loop, and Linux' default signal handling for them is in effect.</para>
+
+ <para>It's customary for UNIX programs to exit on either of these two signals, hence it's typically a
+ good idea to enable this functionality for the main event loop of a program.</para>
+ </refsect1>
+
+ <refsect1>
+ <title>Return Value</title>
+
+ <para><function>sd_event_set_signal_exit()</function> returns a positive non-zero value when the setting
+ was successfully changed. It returns a zero when the specified setting was already in effect. On failure,
+ it returns a negative errno-style error code.</para>
+
+ <refsect2>
+ <title>Errors</title>
+
+ <para>Returned errors may indicate the following problems:</para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><constant>-ECHILD</constant></term>
+
+ <listitem><para>The event loop has been created in a different process.</para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><constant>-EINVAL</constant></term>
+
+ <listitem><para>The passed event loop object was invalid.</para></listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect2>
+ </refsect1>
+
+ <xi:include href="libsystemd-pkgconfig.xml" />
+
+ <refsect1>
+ <title>See Also</title>
+
+ <para>
+ <citerefentry><refentrytitle>systemd</refentrytitle><manvolnum>1</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>sd-event</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>sd_event_new</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>sd_event_add_signal</refentrytitle><manvolnum>3</manvolnum></citerefentry>
+ </para>
+ </refsect1>
+
+</refentry>
<para>Processes will first be terminated via <constant>SIGTERM</constant> (unless the signal to send
is changed via <varname>KillSignal=</varname> or <varname>RestartKillSignal=</varname>). Optionally,
this is immediately followed by a <constant>SIGHUP</constant> (if enabled with
- <varname>SendSIGHUP=</varname>). If processes still remain after the main process of a unit has
- exited or the delay configured via the <varname>TimeoutStopSec=</varname> has passed, the termination
- request is repeated with the <constant>SIGKILL</constant> signal or the signal specified via
+ <varname>SendSIGHUP=</varname>). If processes still remain after:
+ <itemizedlist>
+ <listitem><para>the main process of a unit has exited (applies to <varname>KillMode=</varname>:
+ <option>mixed</option>)</para></listitem>
+ <listitem><para>the delay configured via the <varname>TimeoutStopSec=</varname> has passed
+ (applies to <varname>KillMode=</varname>: <option>control-group</option>, <option>mixed</option>,
+ <option>process</option>)</para></listitem>
+ </itemizedlist>
+
+ the termination request is repeated with the <constant>SIGKILL</constant> signal or the signal specified via
<varname>FinalKillSignal=</varname> (unless this is disabled via the <varname>SendSIGKILL=</varname>
option). See <citerefentry><refentrytitle>kill</refentrytitle><manvolnum>2</manvolnum></citerefentry>
for more information.</para>
endif
want_bpf_framework = get_option('bpf-framework')
+bpf_compiler = get_option('bpf-compiler')
bpf_framework_required = want_bpf_framework == 'true'
-libbpf = dependency('libbpf', required : bpf_framework_required, version : '>= 0.2')
+libbpf_version_requirement = '>= 0.2.0'
+if bpf_compiler == 'gcc'
+ libbpf_version_requirement = '>= 1.0.0'
+endif
+libbpf = dependency('libbpf', required : bpf_framework_required, version : libbpf_version_requirement)
conf.set10('HAVE_LIBBPF', libbpf.found())
+bpftool_strip_version_requirement = '>= 5.13.0'
+if bpf_compiler == 'gcc'
+ bpftool_strip_version_requirement = '>= 7.0.0'
+endif
+
if want_bpf_framework == 'false' or not libbpf.found() or skip_deps
conf.set10('BPF_FRAMEWORK', false)
else
- bpf_compiler = get_option('bpf-compiler')
clang_found = false
clang_supports_bpf = false
bpf_gcc_found = false
+ bpftool_strip = false
deps_found = false
if bpf_compiler == 'clang'
# Debian installs this in /usr/sbin/ which is not in $PATH.
# We check for 'bpftool' first, honouring $PATH, and in /usr/sbin/ for Debian.
# We use 'bpftool gen object' subcommand for bpftool strip, it was added by d80b2fcbe0a023619e0fc73112f2a02c2662f6ab (v5.13).
+ bpftool_strip_required = bpf_framework_required and bpf_compiler == 'gcc'
bpftool = find_program('bpftool',
'/usr/sbin/bpftool',
- required : false,
- version : '>= 5.13.0')
+ required : bpftool_strip_required,
+ version : bpftool_strip_version_requirement)
if bpftool.found()
bpftool_strip = true
deps_found = true
- else
- bpftool_strip = false
+ elif bpf_compiler == 'clang'
# We require the 'bpftool gen skeleton' subcommand, it was added by 985ead416df39d6fe8e89580cc1db6aa273e0175 (v5.6).
bpftool = find_program('bpftool',
'/usr/sbin/bpftool',
[Content]
Packages=
cryptsetup-bin
+ fdisk
iproute2
isc-dhcp-server
libbpf0
[Content]
Packages=
cryptsetup-bin
+ fdisk
iproute2
isc-dhcp-server
libbpf0
[SPECIAL_GLYPH_LIGHT_SHADE] = "-",
[SPECIAL_GLYPH_DARK_SHADE] = "X",
[SPECIAL_GLYPH_SIGMA] = "S",
+ [SPECIAL_GLYPH_ARROW_LEFT] = "<-",
[SPECIAL_GLYPH_ARROW_RIGHT] = "->",
[SPECIAL_GLYPH_ARROW_UP] = "^",
[SPECIAL_GLYPH_ARROW_DOWN] = "v",
[SPECIAL_GLYPH_ARROW_DOWN] = u8"↓", /* actually called: DOWNWARDS ARROW */
/* Single glyph in Unicode, two in ASCII */
+ [SPECIAL_GLYPH_ARROW_LEFT] = u8"←", /* actually called: LEFTWARDS ARROW */
[SPECIAL_GLYPH_ARROW_RIGHT] = u8"→", /* actually called: RIGHTWARDS ARROW */
/* Single glyph in Unicode, three in ASCII */
SPECIAL_GLYPH_MU,
SPECIAL_GLYPH_CHECK_MARK,
SPECIAL_GLYPH_CROSS_MARK,
+ SPECIAL_GLYPH_ARROW_LEFT,
SPECIAL_GLYPH_ARROW_RIGHT,
SPECIAL_GLYPH_ARROW_UP,
SPECIAL_GLYPH_ARROW_DOWN,
"/dev/null");
}
-int fsck_exists(const char *fstype) {
+int fsck_exists(void) {
+ return executable_is_good("fsck");
+}
+
+int fsck_exists_for_fstype(const char *fstype) {
const char *checker;
+ int r;
assert(fstype);
if (streq(fstype, "auto"))
return -EINVAL;
+ r = fsck_exists();
+ if (r <= 0)
+ return r;
+
checker = strjoina("fsck.", fstype);
return executable_is_good(checker);
}
bool paths_check_timestamp(const char* const* paths, usec_t *paths_ts_usec, bool update);
-int fsck_exists(const char *fstype);
+int fsck_exists(void);
+int fsck_exists_for_fstype(const char *fstype);
/* Iterates through the path prefixes of the specified path, going up
* the tree, to root. Also returns "" (and not "/"!) for the root
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-const volatile __u8 is_allow_list SEC(".rodata") = 0;
+const volatile __u8 is_allow_list = 0;
/* Map containing the network interfaces indexes.
* The interpretation of the map depends on the value of is_allow_list.
assert(m);
assert(f);
+ /* NB: this is a debug interface for developers. It's not supposed to be machine readable or be
+ * stable between versions. We take the liberty to restructure it entirely between versions and
+ * add/remove fields at will. */
+
fprintf(f, "%sManager: systemd " STRINGIFY(PROJECT_VERSION) " (" GIT_VERSION ")\n", strempty(prefix));
fprintf(f, "%sFeatures: %s\n", strempty(prefix), systemd_features);
return log_error_errno(r, "Failed to get devname of block device " DEVNUM_FORMAT_STR ": %m",
DEVNUM_FORMAT_VAL(devno));
- r = loop_device_open(devname, 0, LOCK_EX, &d);
+ r = loop_device_open_from_path(devname, 0, LOCK_EX, &d);
if (r < 0)
return log_error_errno(r, "Failed to open loop device '%s': %m", devname);
}
if (sd_device_get_property_value(dev, "ID_FS_TYPE", &type) >= 0) {
- r = fsck_exists(type);
+ r = fsck_exists_for_fstype(type);
if (r < 0)
log_device_warning_errno(dev, r, "Couldn't detect if fsck.%s may be used, proceeding: %m", type);
else if (r == 0) {
log_device_info(dev, "fsck.%s doesn't exist, not checking file system.", type);
return 0;
}
+ } else {
+ r = fsck_exists();
+ if (r < 0)
+ log_device_warning_errno(dev, r, "Couldn't detect if the fsck command may be used, proceeding: %m");
+ else if (r == 0) {
+ log_device_info(dev, "The fsck command does not exist, not checking file system.");
+ return 0;
+ }
}
console = fopen("/dev/console", "we");
/* Let's take a LOCK_SH lock on the block device, in case udevd is already running. If we don't take
* the lock, udevd might end up issuing BLKRRPART in the middle, and we don't want that, since that
* might remove all partitions while we are operating on them. */
- r = loop_device_open(devname, O_RDONLY, LOCK_SH, &loop);
+ r = loop_device_open_from_path(devname, O_RDONLY, LOCK_SH, &loop);
if (r < 0)
return log_debug_errno(r, "Failed to open %s: %m", devname);
assert(node);
assert(fstype);
- r = fsck_exists(fstype);
+ r = fsck_exists_for_fstype(fstype);
if (r < 0)
return log_error_errno(r, "Failed to check if fsck for file system %s exists: %m", fstype);
if (r == 0) {
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Failed to determine backing device for DM %s.", setup->dm_name);
if (!setup->loop) {
- r = loop_device_open(n, O_RDWR, LOCK_UN, &setup->loop);
+ r = loop_device_open_from_path(n, O_RDWR, LOCK_UN, &setup->loop);
if (r < 0)
return log_error_errno(r, "Failed to open loopback device %s: %m", n);
}
sd_device_monitor_set_description;
sd_device_monitor_get_description;
+ sd_event_set_signal_exit;
+
sd_id128_string_equal;
sd_hwdb_new_from_path;
sd_event_signal_handler_t callback;
struct signalfd_siginfo siginfo;
int sig;
+ bool unblock;
} signal;
struct {
sd_event_child_handler_t callback;
LIST_HEAD(sd_event_source, sources);
+ sd_event_source *sigint_event_source, *sigterm_event_source;
+
usec_t last_run_usec, last_log_usec;
unsigned delays[sizeof(usec_t) * 8];
};
assert(e);
+ e->sigterm_event_source = sd_event_source_unref(e->sigterm_event_source);
+ e->sigint_event_source = sd_event_source_unref(e->sigint_event_source);
+
while ((s = e->sources)) {
assert(s->floating);
source_disconnect(s);
static void source_disconnect(sd_event_source *s) {
sd_event *event;
+ int r;
assert(s);
s->event->signal_sources[s->signal.sig] = NULL;
event_gc_signal_data(s->event, &s->priority, s->signal.sig);
+
+ if (s->signal.unblock) {
+ sigset_t new_ss;
+
+ if (sigemptyset(&new_ss) < 0)
+ log_debug_errno(errno, "Failed to reset signal set, ignoring: %m");
+ else if (sigaddset(&new_ss, s->signal.sig) < 0)
+ log_debug_errno(errno, "Failed to add signal %i to signal mask, ignoring: %m", s->signal.sig);
+ else {
+ r = pthread_sigmask(SIG_UNBLOCK, &new_ss, NULL);
+ if (r != 0)
+ log_debug_errno(r, "Failed to unblock signal %i, ignoring: %m", s->signal.sig);
+ }
+ }
}
break;
_cleanup_(source_freep) sd_event_source *s = NULL;
struct signal_data *d;
+ sigset_t new_ss;
+ bool block_it;
int r;
assert_return(e, -EINVAL);
assert_return(e = event_resolve(e), -ENOPKG);
- assert_return(SIGNAL_VALID(sig), -EINVAL);
assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
assert_return(!event_pid_changed(e), -ECHILD);
+ /* Let's make sure our special flag stays outside of the valid signal range */
+ assert_cc(_NSIG < SD_EVENT_SIGNAL_PROCMASK);
+
+ if (sig & SD_EVENT_SIGNAL_PROCMASK) {
+ sig &= ~SD_EVENT_SIGNAL_PROCMASK;
+ assert_return(SIGNAL_VALID(sig), -EINVAL);
+
+ block_it = true;
+ } else {
+ assert_return(SIGNAL_VALID(sig), -EINVAL);
+
+ r = signal_is_blocked(sig);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -EBUSY;
+
+ block_it = false;
+ }
+
if (!callback)
callback = signal_exit_callback;
- r = signal_is_blocked(sig);
- if (r < 0)
- return r;
- if (r == 0)
- return -EBUSY;
-
if (!e->signal_sources) {
e->signal_sources = new0(sd_event_source*, _NSIG);
if (!e->signal_sources)
e->signal_sources[sig] = s;
+ if (block_it) {
+ sigset_t old_ss;
+
+ if (sigemptyset(&new_ss) < 0)
+ return -errno;
+
+ if (sigaddset(&new_ss, sig) < 0)
+ return -errno;
+
+ r = pthread_sigmask(SIG_BLOCK, &new_ss, &old_ss);
+ if (r != 0)
+ return -r;
+
+ r = sigismember(&old_ss, sig);
+ if (r < 0)
+ return -errno;
+
+ s->signal.unblock = !r;
+ } else
+ s->signal.unblock = false;
+
r = event_make_signal_data(e, sig, &d);
- if (r < 0)
+ if (r < 0) {
+ if (s->signal.unblock)
+ (void) pthread_sigmask(SIG_UNBLOCK, &new_ss, NULL);
+
return r;
+ }
/* Use the signal name as description for the event source by default */
(void) sd_event_source_set_description(s, signal_to_string(sig));
return s->ratelimited;
}
+
+_public_ int sd_event_set_signal_exit(sd_event *e, int b) {
+ bool change = false;
+ int r;
+
+ assert_return(e, -EINVAL);
+
+ if (b) {
+ /* We want to maintain pointers to these event sources, so that we can destroy them when told
+ * so. But we also don't want them to pin the event loop itself. Hence we mark them as
+ * floating after creation (and undo this before deleting them again). */
+
+ if (!e->sigint_event_source) {
+ r = sd_event_add_signal(e, &e->sigint_event_source, SIGINT | SD_EVENT_SIGNAL_PROCMASK, NULL, NULL);
+ if (r < 0)
+ return r;
+
+ assert(sd_event_source_set_floating(e->sigint_event_source, true) >= 0);
+ change = true;
+ }
+
+ if (!e->sigterm_event_source) {
+ r = sd_event_add_signal(e, &e->sigterm_event_source, SIGTERM | SD_EVENT_SIGNAL_PROCMASK, NULL, NULL);
+ if (r < 0) {
+ if (change) {
+ assert(sd_event_source_set_floating(e->sigint_event_source, false) >= 0);
+ e->sigint_event_source = sd_event_source_unref(e->sigint_event_source);
+ }
+
+ return r;
+ }
+
+ assert(sd_event_source_set_floating(e->sigterm_event_source, true) >= 0);
+ change = true;
+ }
+
+ } else {
+ if (e->sigint_event_source) {
+ assert(sd_event_source_set_floating(e->sigint_event_source, false) >= 0);
+ e->sigint_event_source = sd_event_source_unref(e->sigint_event_source);
+ change = true;
+ }
+
+ if (e->sigterm_event_source) {
+ assert(sd_event_source_set_floating(e->sigterm_event_source, false) >= 0);
+ e->sigterm_event_source = sd_event_source_unref(e->sigterm_event_source);
+ change = true;
+ }
+ }
+
+ return change;
+}
bg_pho-utf8 bg,us pc105 ,phonetic terminate:ctrl_alt_bksp,grp:shifts_toggle,grp_led:scroll
it-ibm it pc105 - terminate:ctrl_alt_bksp
cz-us-qwertz cz,us pc105 - terminate:ctrl_alt_bksp,grp:shifts_toggle,grp_led:scroll
-cz-qwerty cz,us pc105 qwerty terminate:ctrl_alt_bksp,grp:shifts_toggle,grp_led:scroll
+cz-qwerty cz,us pc105 qwerty, terminate:ctrl_alt_bksp,grp:shifts_toggle,grp_led:scroll
br-abnt2 br abnt2 - terminate:ctrl_alt_bksp
ro ro pc105 - terminate:ctrl_alt_bksp
us-acentos us pc105 intl terminate:ctrl_alt_bksp
arg_fsck = false;
if (arg_fsck && arg_mount_type && arg_transport == BUS_TRANSPORT_LOCAL) {
- r = fsck_exists(arg_mount_type);
+ r = fsck_exists_for_fstype(arg_mount_type);
if (r < 0)
log_warning_errno(r, "Couldn't determine whether fsck for %s exists, proceeding anyway.", arg_mount_type);
else if (r == 0) {
if (r < 0)
return r;
- HASHMAP_FOREACH(item, unit_files) {
- r = unit_file_exists(LOOKUP_SCOPE_SYSTEM, &paths, item->name);
- if (r < 0)
- return sd_bus_error_set_errnof(error, r, "Failed to determine whether unit '%s' exists on the host: %m", item->name);
- if (!FLAGS_SET(flags, PORTABLE_REATTACH) && r > 0)
- return sd_bus_error_setf(error, BUS_ERROR_UNIT_EXISTS, "Unit file '%s' exists on the host already, refusing.", item->name);
+ if (!FLAGS_SET(flags, PORTABLE_REATTACH) && !FLAGS_SET(flags, PORTABLE_FORCE))
+ HASHMAP_FOREACH(item, unit_files) {
+ r = unit_file_exists(LOOKUP_SCOPE_SYSTEM, &paths, item->name);
+ if (r < 0)
+ return sd_bus_error_set_errnof(error, r, "Failed to determine whether unit '%s' exists on the host: %m", item->name);
+ if (r > 0)
+ return sd_bus_error_setf(error, BUS_ERROR_UNIT_EXISTS, "Unit file '%s' exists on the host already, refusing.", item->name);
- r = unit_file_is_active(bus, item->name, error);
- if (r < 0)
- return r;
- if (!FLAGS_SET(flags, PORTABLE_REATTACH) && r > 0)
- return sd_bus_error_setf(error, BUS_ERROR_UNIT_EXISTS, "Unit file '%s' is active already, refusing.", item->name);
- }
+ r = unit_file_is_active(bus, item->name, error);
+ if (r < 0)
+ return r;
+ if (r > 0)
+ return sd_bus_error_setf(error, BUS_ERROR_UNIT_EXISTS, "Unit file '%s' is active already, refusing.", item->name);
+ }
HASHMAP_FOREACH(item, unit_files) {
r = attach_unit_file(&paths, image->path, image->type, extension_images,
if (r == 0)
continue;
- r = unit_file_is_active(bus, unit_name, error);
- if (r < 0)
- return r;
- if (!FLAGS_SET(flags, PORTABLE_REATTACH) && r > 0)
- return sd_bus_error_setf(error, BUS_ERROR_UNIT_EXISTS, "Unit file '%s' is active, can't detach.", unit_name);
+ if (!FLAGS_SET(flags, PORTABLE_REATTACH) && !FLAGS_SET(flags, PORTABLE_FORCE)) {
+ r = unit_file_is_active(bus, unit_name, error);
+ if (r < 0)
+ return r;
+ if (r > 0)
+ return sd_bus_error_setf(error, BUS_ERROR_UNIT_EXISTS, "Unit file '%s' is active, can't detach.", unit_name);
+ }
r = set_ensure_consume(&unit_files, &string_hash_ops_free, TAKE_PTR(unit_name));
if (r < 0)
typedef enum PortableFlags {
PORTABLE_RUNTIME = 1 << 0, /* Public API via DBUS, do not change */
- PORTABLE_PREFER_COPY = 1 << 1,
- PORTABLE_PREFER_SYMLINK = 1 << 2,
- PORTABLE_REATTACH = 1 << 3,
- _PORTABLE_MASK_PUBLIC = PORTABLE_RUNTIME,
+ PORTABLE_FORCE = 1 << 1, /* Public API via DBUS, do not change */
+ PORTABLE_PREFER_COPY = 1 << 2,
+ PORTABLE_PREFER_SYMLINK = 1 << 3,
+ PORTABLE_REATTACH = 1 << 4,
+ _PORTABLE_MASK_PUBLIC = PORTABLE_RUNTIME | PORTABLE_FORCE,
_PORTABLE_TYPE_MAX,
_PORTABLE_TYPE_INVALID = -EINVAL,
} PortableFlags;
static bool arg_now = false;
static bool arg_no_block = false;
static char **arg_extension_images = NULL;
+static bool arg_force = false;
STATIC_DESTRUCTOR_REGISTER(arg_extension_images, strv_freep);
return bus_log_create_error(r);
if (STR_IN_SET(method, "AttachImageWithExtensions", "ReattachImageWithExtensions")) {
- uint64_t flags = arg_runtime ? PORTABLE_RUNTIME : 0;
+ uint64_t flags = (arg_runtime ? PORTABLE_RUNTIME : 0) | (arg_force ? PORTABLE_FORCE : 0);
r = sd_bus_message_append(m, "st", arg_copy_mode, flags);
} else
if (strv_isempty(arg_extension_images))
r = sd_bus_message_append(m, "b", arg_runtime);
else {
- uint64_t flags = arg_runtime ? PORTABLE_RUNTIME : 0;
+ uint64_t flags = (arg_runtime ? PORTABLE_RUNTIME : 0) | (arg_force ? PORTABLE_FORCE : 0);
r = sd_bus_message_append(m, "t", flags);
}
" attach/before detach\n"
" --no-block Don't block waiting for attach --now to complete\n"
" --extension=PATH Extend the image with an overlay\n"
+ " --force Skip 'already active' check when attaching or\n"
+ " detaching an image (with extensions)\n"
"\nSee the %s for details.\n",
program_invocation_short_name,
ansi_highlight(),
ARG_NOW,
ARG_NO_BLOCK,
ARG_EXTENSION,
+ ARG_FORCE,
};
static const struct option options[] = {
{ "now", no_argument, NULL, ARG_NOW },
{ "no-block", no_argument, NULL, ARG_NO_BLOCK },
{ "extension", required_argument, NULL, ARG_EXTENSION },
+ { "force", no_argument, NULL, ARG_FORCE },
{}
};
return log_oom();
break;
+ case ARG_FORCE:
+ arg_force = true;
+ break;
+
case '?':
return -EINVAL;
#include "bus-map-properties.h"
#include "bus-message-util.h"
#include "dns-domain.h"
+#include "errno-list.h"
#include "escape.h"
#include "format-table.h"
#include "format-util.h"
#include "gcrypt-util.h"
#include "hostname-util.h"
+#include "json.h"
#include "main-func.h"
#include "missing_network.h"
#include "netlink-util.h"
#include "strv.h"
#include "terminal-util.h"
#include "utf8.h"
+#include "varlink.h"
#include "verb-log-control.h"
#include "verbs.h"
static uint16_t arg_class = 0;
static bool arg_legend = true;
static uint64_t arg_flags = 0;
+static JsonFormatFlags arg_json_format_flags = JSON_FORMAT_OFF;
static PagerFlags arg_pager_flags = 0;
bool arg_ifindex_permissive = false; /* If true, don't generate an error if the specified interface index doesn't exist */
static const char *arg_service_family = NULL;
static int output_rr_packet(const void *d, size_t l, int ifindex) {
_cleanup_(dns_resource_record_unrefp) DnsResourceRecord *rr = NULL;
- _cleanup_(dns_packet_unrefp) DnsPacket *p = NULL;
int r;
- r = dns_packet_new(&p, DNS_PROTOCOL_DNS, 0, DNS_PACKET_SIZE_MAX);
- if (r < 0)
- return log_oom();
-
- p->refuse_compression = true;
-
- r = dns_packet_append_blob(p, d, l, NULL);
- if (r < 0)
- return log_oom();
-
- r = dns_packet_read_rr(p, &rr, NULL, NULL);
+ r = dns_resource_record_new_from_raw(&rr, d, l);
if (r < 0)
return log_error_errno(r, "Failed to parse RR: %m");
return verb_log_control_common(bus, "org.freedesktop.resolve1", argv[0], argc == 2 ? argv[1] : NULL);
}
+static int monitor_rkey_from_json(JsonVariant *v, DnsResourceKey **ret_key) {
+ _cleanup_(dns_resource_key_unrefp) DnsResourceKey *key = NULL;
+ uint16_t type = 0, class = 0;
+ const char *name = NULL;
+ int r;
+
+ JsonDispatch dispatch_table[] = {
+ { "class", JSON_VARIANT_INTEGER, json_dispatch_uint16, PTR_TO_SIZE(&class), JSON_MANDATORY },
+ { "type", JSON_VARIANT_INTEGER, json_dispatch_uint16, PTR_TO_SIZE(&type), JSON_MANDATORY },
+ { "name", JSON_VARIANT_STRING, json_dispatch_const_string, PTR_TO_SIZE(&name), JSON_MANDATORY },
+ {}
+ };
+
+ assert(v);
+ assert(ret_key);
+
+ r = json_dispatch(v, dispatch_table, NULL, 0, NULL);
+ if (r < 0)
+ return r;
+
+ key = dns_resource_key_new(class, type, name);
+ if (!key)
+ return -ENOMEM;
+
+ *ret_key = TAKE_PTR(key);
+ return 0;
+}
+
+static int print_question(char prefix, const char *color, JsonVariant *question) {
+ JsonVariant *q = NULL;
+ int r;
+
+ assert(color);
+
+ JSON_VARIANT_ARRAY_FOREACH(q, question) {
+ _cleanup_(dns_resource_key_unrefp) DnsResourceKey *key = NULL;
+ char buf[DNS_RESOURCE_KEY_STRING_MAX];
+
+ r = monitor_rkey_from_json(q, &key);
+ if (r < 0) {
+ log_warning_errno(r, "Received monitor message with invalid question key, ignoring: %m");
+ continue;
+ }
+
+ printf("%s%s %c%s: %s\n",
+ color,
+ special_glyph(SPECIAL_GLYPH_ARROW_RIGHT),
+ prefix,
+ ansi_normal(),
+ dns_resource_key_to_string(key, buf, sizeof(buf)));
+ }
+
+ return 0;
+}
+
+static int print_answer(JsonVariant *answer) {
+ JsonVariant *a;
+ int r;
+
+ JSON_VARIANT_ARRAY_FOREACH(a, answer) {
+ _cleanup_(dns_resource_record_unrefp) DnsResourceRecord *rr = NULL;
+ _cleanup_free_ void *d = NULL;
+ JsonVariant *jraw;
+ const char *s;
+ size_t l;
+
+ jraw = json_variant_by_key(a, "raw");
+ if (!jraw) {
+ log_warning("Received monitor answer lacking valid raw data, ignoring.");
+ continue;
+ }
+
+ r = json_variant_unbase64(jraw, &d, &l);
+ if (r < 0) {
+ log_warning_errno(r, "Failed to undo base64 encoding of monitor answer raw data, ignoring.");
+ continue;
+ }
+
+ r = dns_resource_record_new_from_raw(&rr, d, l);
+ if (r < 0) {
+ log_warning_errno(r, "Failed to parse monitor answer RR, ingoring: %m");
+ continue;
+ }
+
+ s = dns_resource_record_to_string(rr);
+ if (!s)
+ return log_oom();
+
+ printf("%s%s A%s: %s\n",
+ ansi_highlight_yellow(),
+ special_glyph(SPECIAL_GLYPH_ARROW_LEFT),
+ ansi_normal(),
+ s);
+ }
+
+ return 0;
+}
+
+static void monitor_query_dump(JsonVariant *v) {
+ _cleanup_(json_variant_unrefp) JsonVariant *question = NULL, *answer = NULL, *collected_questions = NULL;
+ int rcode = -1, error = 0, r;
+ const char *state = NULL;
+
+ assert(v);
+
+ JsonDispatch dispatch_table[] = {
+ { "question", JSON_VARIANT_ARRAY, json_dispatch_variant, PTR_TO_SIZE(&question), JSON_MANDATORY },
+ { "answer", JSON_VARIANT_ARRAY, json_dispatch_variant, PTR_TO_SIZE(&answer), 0 },
+ { "collectedQuestions", JSON_VARIANT_ARRAY, json_dispatch_variant, PTR_TO_SIZE(&collected_questions), 0 },
+ { "state", JSON_VARIANT_STRING, json_dispatch_const_string, PTR_TO_SIZE(&state), JSON_MANDATORY },
+ { "rcode", JSON_VARIANT_INTEGER, json_dispatch_int, PTR_TO_SIZE(&rcode), 0 },
+ { "errno", JSON_VARIANT_INTEGER, json_dispatch_int, PTR_TO_SIZE(&error), 0 },
+ {}
+ };
+
+ r = json_dispatch(v, dispatch_table, NULL, 0, NULL);
+ if (r < 0)
+ return (void) log_warning("Received malformed monitor message, ignoring.");
+
+ /* First show the current question */
+ print_question('Q', ansi_highlight_cyan(), question);
+
+ /* And then show the questions that led to this one in case this was a CNAME chain */
+ print_question('C', ansi_highlight_grey(), collected_questions);
+
+ printf("%s%s S%s: %s\n",
+ streq_ptr(state, "success") ? ansi_highlight_green() : ansi_highlight_red(),
+ special_glyph(SPECIAL_GLYPH_ARROW_LEFT),
+ ansi_normal(),
+ strna(streq_ptr(state, "errno") ? errno_to_name(error) :
+ streq_ptr(state, "rcode-failure") ? dns_rcode_to_string(rcode) :
+ state));
+
+ print_answer(answer);
+}
+
+static int monitor_reply(
+ Varlink *link,
+ JsonVariant *parameters,
+ const char *error_id,
+ VarlinkReplyFlags flags,
+ void *userdata) {
+
+ assert(link);
+
+ if (error_id) {
+ bool disconnect;
+
+ disconnect = streq(error_id, VARLINK_ERROR_DISCONNECTED);
+ if (disconnect)
+ log_info("Disconnected.");
+ else
+ log_error("Varlink error: %s", error_id);
+
+ (void) sd_event_exit(ASSERT_PTR(varlink_get_event(link)), disconnect ? EXIT_SUCCESS : EXIT_FAILURE);
+ return 0;
+ }
+
+ if (json_variant_by_key(parameters, "ready")) {
+ /* The first message coming in will just indicate that we are now subscribed. We let our
+ * caller know if they asked for it. Once the caller sees this they should know that we are
+ * not going to miss any queries anymore. */
+ (void) sd_notify(/* unset_environment=false */ false, "READY=1");
+ return 0;
+ }
+
+ if (arg_json_format_flags & JSON_FORMAT_OFF) {
+ monitor_query_dump(parameters);
+ printf("\n");
+ } else
+ json_variant_dump(parameters, arg_json_format_flags, NULL, NULL);
+
+ fflush(stdout);
+
+ return 0;
+}
+
+static int verb_monitor(int argc, char *argv[], void *userdata) {
+ _cleanup_(sd_event_unrefp) sd_event *event = NULL;
+ _cleanup_(varlink_unrefp) Varlink *vl = NULL;
+ int r, c;
+
+ r = sd_event_default(&event);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get event loop: %m");
+
+ r = sd_event_set_signal_exit(event, true);
+ if (r < 0)
+ return log_error_errno(r, "Failed to enable exit on SIGINT/SIGTERM: %m");
+
+ r = varlink_connect_address(&vl, "/run/systemd/resolve/io.systemd.Resolve.Monitor");
+ if (r < 0)
+ return log_error_errno(r, "Failed to connect to query monitoring service /run/systemd/resolve/io.systemd.Resolve.Monitor: %m");
+
+ r = varlink_set_relative_timeout(vl, USEC_INFINITY); /* We want the monitor to run basically forever */
+ if (r < 0)
+ return log_error_errno(r, "Failed to set varlink time-out: %m");
+
+ r = varlink_attach_event(vl, event, SD_EVENT_PRIORITY_NORMAL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to attach varlink connection to event loop: %m");
+
+ r = varlink_bind_reply(vl, monitor_reply);
+ if (r < 0)
+ return log_error_errno(r, "Failed to bind reply callback to varlink connection: %m");
+
+ r = varlink_observe(vl, "io.systemd.Resolve.Monitor.SubscribeQueryResults", NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to issue SubscribeQueryResults() varlink call: %m");
+
+ r = sd_event_loop(event);
+ if (r < 0)
+ return log_error_errno(r, "Failed to run event loop: %m");
+
+ r = sd_event_get_exit_code(event, &c);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get exit code: %m");
+
+ return c;
+}
+
static void help_protocol_types(void) {
if (arg_legend)
puts("Known protocol types:");
" reset-statistics Reset resolver statistics\n"
" flush-caches Flush all local DNS caches\n"
" reset-server-features Forget learnt DNS server feature levels\n"
+ " monitor Monitor DNS queries\n"
" dns [LINK [SERVER...]] Get/set per-interface DNS server address\n"
" domain [LINK [DOMAIN...]] Get/set per-interface search domain\n"
" default-route [LINK [BOOL]] Get/set per-interface default route flag\n"
" --cache=BOOL Allow response from cache (default: yes)\n"
" --zone=BOOL Allow response from locally registered mDNS/LLMNR\n"
" records (default: yes)\n"
- " --trust-anchor=BOOL Allow response from local trust anchor (default: yes)\n"
+ " --trust-anchor=BOOL Allow response from local trust anchor (default:\n"
+ " yes)\n"
" --network=BOOL Allow response from network (default: yes)\n"
- " --search=BOOL Use search domains for single-label names (default: yes)\n"
+ " --search=BOOL Use search domains for single-label names (default:\n"
+ " yes)\n"
" --raw[=payload|packet] Dump the answer as binary data\n"
" --legend=BOOL Print headers and additional info (default: yes)\n"
+ " --json=MODE Output as JSON\n"
+ " -j Same as --json=pretty on tty, --json=short\n"
+ " otherwise\n"
"\nSee the %s for details.\n",
program_invocation_short_name,
ansi_highlight(),
ARG_RAW,
ARG_SEARCH,
ARG_NO_PAGER,
+ ARG_JSON,
};
static const struct option options[] = {
{ "raw", optional_argument, NULL, ARG_RAW },
{ "search", required_argument, NULL, ARG_SEARCH },
{ "no-pager", no_argument, NULL, ARG_NO_PAGER },
+ { "json", required_argument, NULL, ARG_JSON },
{}
};
assert(argc >= 0);
assert(argv);
- while ((c = getopt_long(argc, argv, "h46i:t:c:p:", options, NULL)) >= 0)
+ while ((c = getopt_long(argc, argv, "h46i:t:c:p:j", options, NULL)) >= 0)
switch (c) {
case 'h':
arg_pager_flags |= PAGER_DISABLE;
break;
+ case ARG_JSON:
+ r = parse_json_argument(optarg, &arg_json_format_flags);
+ if (r <= 0)
+ return r;
+
+ break;
+
+ case 'j':
+ arg_json_format_flags = JSON_FORMAT_PRETTY_AUTO|JSON_FORMAT_COLOR_AUTO;
+ break;
+
case '?':
return -EINVAL;
{ "nta", VERB_ANY, VERB_ANY, 0, verb_nta },
{ "revert", VERB_ANY, 2, 0, verb_revert_link },
{ "log-level", VERB_ANY, 2, 0, verb_log_level },
+ { "monitor", VERB_ANY, 1, 0, verb_monitor },
{}
};
SD_BUS_PROPERTY("DNSSECNegativeTrustAnchors", "as", bus_property_get_ntas, 0, 0),
SD_BUS_PROPERTY("DNSStubListener", "s", bus_property_get_dns_stub_listener_mode, offsetof(Manager, dns_stub_listener_mode), 0),
SD_BUS_PROPERTY("ResolvConfMode", "s", bus_property_get_resolv_conf_mode, 0, 0),
- SD_BUS_PROPERTY("Monitor", "b", bus_property_get_bool, offsetof(Manager, enable_varlink_notifications), SD_BUS_VTABLE_PROPERTY_EMITS_CHANGE),
SD_BUS_METHOD_WITH_ARGS("ResolveHostname",
SD_BUS_ARGS("i", ifindex, "s", name, "i", family, "t", flags),
static int dns_cache_put_positive(
DnsCache *c,
+ DnsProtocol protocol,
DnsResourceRecord *rr,
DnsAnswer *answer,
DnsPacket *full_packet,
return 0;
}
+ /* Do not cache mDNS goodbye packet. */
+ if (protocol == DNS_PROTOCOL_MDNS && rr->ttl <= 1)
+ return 0;
+
/* Otherwise, add the new RR */
r = dns_cache_init(c);
if (r < 0)
int dns_cache_put(
DnsCache *c,
DnsCacheMode cache_mode,
+ DnsProtocol protocol,
DnsResourceKey *key,
int rcode,
DnsAnswer *answer,
r = dns_cache_put_positive(
c,
+ protocol,
item->rr,
primary ? answer : NULL,
primary ? full_packet : NULL,
int dns_cache_put(
DnsCache *c,
DnsCacheMode cache_mode,
+ DnsProtocol protocol,
DnsResourceKey *key,
int rcode,
DnsAnswer *answer,
dns_question_unref(q->question_idna);
dns_question_unref(q->question_utf8);
dns_packet_unref(q->question_bypass);
+ dns_question_unref(q->collected_questions);
dns_query_reset_answer(q);
q->state = state;
- if (q->question_utf8 && state == DNS_TRANSACTION_SUCCESS && set_size(q->manager->varlink_subscription) > 0)
- (void) send_dns_notification(q->manager, q->answer, dns_question_first_name(q->question_utf8));
+ (void) manager_monitor_send(q->manager, q->state, q->answer_rcode, q->answer_errno, q->question_idna, q->question_utf8, q->collected_questions, q->answer);
dns_query_stop(q);
if (q->complete)
dns_query_accept(q, bad);
}
+static int dns_query_collect_question(DnsQuery *q, DnsQuestion *question) {
+ _cleanup_(dns_question_unrefp) DnsQuestion *merged = NULL;
+ int r;
+
+ assert(q);
+
+ if (dns_question_size(question) == 0)
+ return 0;
+
+ /* When redirecting, save the first element in the chain, for informational purposes when monitoring */
+ r = dns_question_merge(q->collected_questions, question, &merged);
+ if (r < 0)
+ return r;
+
+ dns_question_unref(q->collected_questions);
+ q->collected_questions = TAKE_PTR(merged);
+
+ return 0;
+}
+
static int dns_query_cname_redirect(DnsQuery *q, const DnsResourceRecord *cname) {
_cleanup_(dns_question_unrefp) DnsQuestion *nq_idna = NULL, *nq_utf8 = NULL;
int r, k;
/* Turn off searching for the new name */
q->flags |= SD_RESOLVED_NO_SEARCH;
+ r = dns_query_collect_question(q, q->question_idna);
+ if (r < 0)
+ return r;
+ r = dns_query_collect_question(q, q->question_utf8);
+ if (r < 0)
+ return r;
+
+ /* Install the redirected question */
dns_question_unref(q->question_idna);
q->question_idna = TAKE_PTR(nq_idna);
* here, and use that instead. */
DnsPacket *question_bypass;
+ /* When we follow a CNAME redirect, we save the original question here, for informational/monitoring
+ * purposes. We'll keep adding to this whenever we go one step in the redirect, so that in the end
+ * this will contain the complete set of CNAME questions. */
+ DnsQuestion *collected_questions;
+
uint64_t flags;
int ifindex;
return 0;
}
+static int dns_question_add_raw_all(DnsQuestion *a, DnsQuestion *b) {
+ DnsQuestionItem *item;
+ int r;
+
+ DNS_QUESTION_FOREACH_ITEM(item, b) {
+ r = dns_question_add_raw(a, item->key, item->flags);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
int dns_question_add(DnsQuestion *q, DnsResourceKey *key, DnsQuestionFlags flags) {
DnsQuestionItem *item;
int r;
return dns_question_add_raw(q, key, flags);
}
+static int dns_question_add_all(DnsQuestion *a, DnsQuestion *b) {
+ DnsQuestionItem *item;
+ int r;
+
+ DNS_QUESTION_FOREACH_ITEM(item, b) {
+ r = dns_question_add(a, item->key, item->flags);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
int dns_question_matches_rr(DnsQuestion *q, DnsResourceRecord *rr, const char *search_domain) {
DnsResourceKey *key;
int r;
fputc('\n', f);
}
}
+
+int dns_question_merge(DnsQuestion *a, DnsQuestion *b, DnsQuestion **ret) {
+ _cleanup_(dns_question_unrefp) DnsQuestion *k = NULL;
+ int r;
+
+ assert(ret);
+
+ if (a == b || dns_question_size(b) <= 0) {
+ *ret = dns_question_ref(a);
+ return 0;
+ }
+
+ if (dns_question_size(a) <= 0) {
+ *ret = dns_question_ref(b);
+ return 0;
+ }
+
+ k = dns_question_new(dns_question_size(a) + dns_question_size(b));
+ if (!k)
+ return -ENOMEM;
+
+ r = dns_question_add_raw_all(k, a);
+ if (r < 0)
+ return r;
+
+ r = dns_question_add_all(k, b);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(k);
+ return 0;
+}
return dns_question_size(q) <= 0;
}
+int dns_question_merge(DnsQuestion *a, DnsQuestion *b, DnsQuestion **ret);
+
DEFINE_TRIVIAL_CLEANUP_FUNC(DnsQuestion*, dns_question_unref);
#define _DNS_QUESTION_FOREACH(u, k, q) \
return 0;
}
+int dns_resource_record_new_from_raw(DnsResourceRecord **ret, const void *data, size_t size) {
+ _cleanup_(dns_packet_unrefp) DnsPacket *p = NULL;
+ int r;
+
+ r = dns_packet_new(&p, DNS_PROTOCOL_DNS, 0, DNS_PACKET_SIZE_MAX);
+ if (r < 0)
+ return r;
+
+ p->refuse_compression = true;
+
+ r = dns_packet_append_blob(p, data, size, NULL);
+ if (r < 0)
+ return r;
+
+ return dns_packet_read_rr(p, ret, NULL, NULL);
+}
+
+int dns_resource_key_to_json(DnsResourceKey *key, JsonVariant **ret) {
+ assert(key);
+ assert(ret);
+
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("class", JSON_BUILD_INTEGER(key->class)),
+ JSON_BUILD_PAIR("type", JSON_BUILD_INTEGER(key->type)),
+ JSON_BUILD_PAIR("name", JSON_BUILD_STRING(dns_resource_key_name(key)))));
+}
+
+static int type_bitmap_to_json(Bitmap *b, JsonVariant **ret) {
+ _cleanup_(json_variant_unrefp) JsonVariant *l = NULL;
+ unsigned t;
+ int r;
+
+ assert(b);
+ assert(ret);
+
+ BITMAP_FOREACH(t, b) {
+ _cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
+
+ r = json_variant_new_unsigned(&v, t);
+ if (r < 0)
+ return r;
+
+ r = json_variant_append_array(&l, v);
+ if (r < 0)
+ return r;
+ }
+
+ if (!l)
+ return json_variant_new_array(ret, NULL, 0);
+
+ *ret = TAKE_PTR(l);
+ return 0;
+}
+
+int dns_resource_record_to_json(DnsResourceRecord *rr, JsonVariant **ret) {
+ _cleanup_(json_variant_unrefp) JsonVariant *k = NULL;
+ int r;
+
+ assert(rr);
+ assert(ret);
+
+ r = dns_resource_key_to_json(rr->key, &k);
+ if (r < 0)
+ return r;
+
+ switch (rr->unparsable ? _DNS_TYPE_INVALID : rr->key->type) {
+
+ case DNS_TYPE_SRV:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("priority", JSON_BUILD_UNSIGNED(rr->srv.priority)),
+ JSON_BUILD_PAIR("weight", JSON_BUILD_UNSIGNED(rr->srv.weight)),
+ JSON_BUILD_PAIR("port", JSON_BUILD_UNSIGNED(rr->srv.port)),
+ JSON_BUILD_PAIR("name", JSON_BUILD_STRING(rr->srv.name))));
+
+ case DNS_TYPE_PTR:
+ case DNS_TYPE_NS:
+ case DNS_TYPE_CNAME:
+ case DNS_TYPE_DNAME:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("name", JSON_BUILD_STRING(rr->ptr.name))));
+
+ case DNS_TYPE_HINFO:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("cpu", JSON_BUILD_STRING(rr->hinfo.cpu)),
+ JSON_BUILD_PAIR("os", JSON_BUILD_STRING(rr->hinfo.os))));
+
+ case DNS_TYPE_SPF:
+ case DNS_TYPE_TXT: {
+ _cleanup_(json_variant_unrefp) JsonVariant *l = NULL;
+
+ LIST_FOREACH(items, i, rr->txt.items) {
+ _cleanup_(json_variant_unrefp) JsonVariant *b = NULL;
+
+ r = json_variant_new_octescape(&b, i->data, i->length);
+ if (r < 0)
+ return r;
+
+ r = json_variant_append_array(&l, b);
+ if (r < 0)
+ return r;
+ }
+
+ if (!l) {
+ r = json_variant_new_array(&l, NULL, 0);
+ if (r < 0)
+ return r;
+ }
+
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("items", JSON_BUILD_VARIANT(l))));
+ }
+
+ case DNS_TYPE_A:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("address", JSON_BUILD_IN4_ADDR(&rr->a.in_addr))));
+
+ case DNS_TYPE_AAAA:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("address", JSON_BUILD_IN6_ADDR(&rr->aaaa.in6_addr))));
+
+ case DNS_TYPE_SOA:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("mname", JSON_BUILD_STRING(rr->soa.mname)),
+ JSON_BUILD_PAIR("rname", JSON_BUILD_STRING(rr->soa.rname)),
+ JSON_BUILD_PAIR("serial", JSON_BUILD_UNSIGNED(rr->soa.serial)),
+ JSON_BUILD_PAIR("refresh", JSON_BUILD_UNSIGNED(rr->soa.refresh)),
+ JSON_BUILD_PAIR("expire", JSON_BUILD_UNSIGNED(rr->soa.retry)),
+ JSON_BUILD_PAIR("minimum", JSON_BUILD_UNSIGNED(rr->soa.minimum))));
+
+ case DNS_TYPE_MX:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("priority", JSON_BUILD_UNSIGNED(rr->mx.priority)),
+ JSON_BUILD_PAIR("exchange", JSON_BUILD_STRING(rr->mx.exchange))));
+ case DNS_TYPE_LOC:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("version", JSON_BUILD_UNSIGNED(rr->loc.version)),
+ JSON_BUILD_PAIR("size", JSON_BUILD_UNSIGNED(rr->loc.size)),
+ JSON_BUILD_PAIR("horiz_pre", JSON_BUILD_UNSIGNED(rr->loc.horiz_pre)),
+ JSON_BUILD_PAIR("vert_pre", JSON_BUILD_UNSIGNED(rr->loc.vert_pre)),
+ JSON_BUILD_PAIR("latitude", JSON_BUILD_UNSIGNED(rr->loc.latitude)),
+ JSON_BUILD_PAIR("longitude", JSON_BUILD_UNSIGNED(rr->loc.longitude)),
+ JSON_BUILD_PAIR("altitude", JSON_BUILD_UNSIGNED(rr->loc.altitude))));
+
+ case DNS_TYPE_DS:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("keyTag", JSON_BUILD_UNSIGNED(rr->ds.key_tag)),
+ JSON_BUILD_PAIR("algorithm", JSON_BUILD_UNSIGNED(rr->ds.algorithm)),
+ JSON_BUILD_PAIR("digestType", JSON_BUILD_UNSIGNED(rr->ds.digest_type)),
+ JSON_BUILD_PAIR("digest", JSON_BUILD_HEX(rr->ds.digest, rr->ds.digest_size))));
+
+ case DNS_TYPE_SSHFP:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("algorithm", JSON_BUILD_UNSIGNED(rr->sshfp.algorithm)),
+ JSON_BUILD_PAIR("fptype", JSON_BUILD_UNSIGNED(rr->sshfp.fptype)),
+ JSON_BUILD_PAIR("fingerprint", JSON_BUILD_HEX(rr->sshfp.fingerprint, rr->sshfp.fingerprint_size))));
+
+ case DNS_TYPE_DNSKEY:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("flags", JSON_BUILD_UNSIGNED(rr->dnskey.flags)),
+ JSON_BUILD_PAIR("protocol", JSON_BUILD_UNSIGNED(rr->dnskey.protocol)),
+ JSON_BUILD_PAIR("algorithm", JSON_BUILD_UNSIGNED(rr->dnskey.algorithm)),
+ JSON_BUILD_PAIR("dnskey", JSON_BUILD_BASE64(rr->dnskey.key, rr->dnskey.key_size))));
+
+
+ case DNS_TYPE_RRSIG:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("signer", JSON_BUILD_STRING(rr->rrsig.signer)),
+ JSON_BUILD_PAIR("typeCovered", JSON_BUILD_UNSIGNED(rr->rrsig.type_covered)),
+ JSON_BUILD_PAIR("algorithm", JSON_BUILD_UNSIGNED(rr->rrsig.algorithm)),
+ JSON_BUILD_PAIR("labels", JSON_BUILD_UNSIGNED(rr->rrsig.labels)),
+ JSON_BUILD_PAIR("originalTtl", JSON_BUILD_UNSIGNED(rr->rrsig.original_ttl)),
+ JSON_BUILD_PAIR("expiration", JSON_BUILD_UNSIGNED(rr->rrsig.expiration)),
+ JSON_BUILD_PAIR("inception", JSON_BUILD_UNSIGNED(rr->rrsig.inception)),
+ JSON_BUILD_PAIR("keyTag", JSON_BUILD_UNSIGNED(rr->rrsig.key_tag)),
+ JSON_BUILD_PAIR("signature", JSON_BUILD_BASE64(rr->rrsig.signature, rr->rrsig.signature_size))));
+
+ case DNS_TYPE_NSEC: {
+ _cleanup_(json_variant_unrefp) JsonVariant *bm = NULL;
+
+ r = type_bitmap_to_json(rr->nsec.types, &bm);
+ if (r < 0)
+ return r;
+
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("nextDomain", JSON_BUILD_STRING(rr->nsec.next_domain_name)),
+ JSON_BUILD_PAIR("types", JSON_BUILD_VARIANT(bm))));
+ }
+
+ case DNS_TYPE_NSEC3: {
+ _cleanup_(json_variant_unrefp) JsonVariant *bm = NULL;
+
+ r = type_bitmap_to_json(rr->nsec3.types, &bm);
+ if (r < 0)
+ return r;
+
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("algorithm", JSON_BUILD_UNSIGNED(rr->nsec3.algorithm)),
+ JSON_BUILD_PAIR("flags", JSON_BUILD_UNSIGNED(rr->nsec3.flags)),
+ JSON_BUILD_PAIR("iterations", JSON_BUILD_UNSIGNED(rr->nsec3.iterations)),
+ JSON_BUILD_PAIR("salt", JSON_BUILD_HEX(rr->nsec3.salt, rr->nsec3.salt_size)),
+ JSON_BUILD_PAIR("hash", JSON_BUILD_BASE32HEX(rr->nsec3.next_hashed_name, rr->nsec3.next_hashed_name_size)),
+ JSON_BUILD_PAIR("types", JSON_BUILD_VARIANT(bm))));
+ }
+
+ case DNS_TYPE_TLSA:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("certUsage", JSON_BUILD_UNSIGNED(rr->tlsa.cert_usage)),
+ JSON_BUILD_PAIR("selector", JSON_BUILD_UNSIGNED(rr->tlsa.selector)),
+ JSON_BUILD_PAIR("matchingType", JSON_BUILD_UNSIGNED(rr->tlsa.matching_type)),
+ JSON_BUILD_PAIR("data", JSON_BUILD_HEX(rr->tlsa.data, rr->tlsa.data_size))));
+
+ case DNS_TYPE_CAA:
+ return json_build(ret,
+ JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR("key", JSON_BUILD_VARIANT(k)),
+ JSON_BUILD_PAIR("flags", JSON_BUILD_UNSIGNED(rr->caa.flags)),
+ JSON_BUILD_PAIR("tag", JSON_BUILD_STRING(rr->caa.tag)),
+ JSON_BUILD_PAIR("value", JSON_BUILD_OCTESCAPE(rr->caa.value, rr->caa.value_size))));
+
+ default:
+ /* Can't provide broken-down format */
+ *ret = NULL;
+ return 0;
+ }
+}
+
static const char* const dnssec_algorithm_table[_DNSSEC_ALGORITHM_MAX_DEFINED] = {
/* Mnemonics as listed on https://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml */
[DNSSEC_ALGORITHM_RSAMD5] = "RSAMD5",
#include "dns-type.h"
#include "hashmap.h"
#include "in-addr-util.h"
+#include "json.h"
#include "list.h"
#include "string-util.h"
#include "time-util.h"
DnsTxtItem *dns_txt_item_copy(DnsTxtItem *i);
int dns_txt_item_new_empty(DnsTxtItem **ret);
+int dns_resource_record_new_from_raw(DnsResourceRecord **ret, const void *data, size_t size);
+
+int dns_resource_key_to_json(DnsResourceKey *key, JsonVariant **ret);
+int dns_resource_record_to_json(DnsResourceRecord *rr, JsonVariant **ret);
+
void dns_resource_record_hash_func(const DnsResourceRecord *i, struct siphash *state);
int dns_resource_record_compare_func(const DnsResourceRecord *x, const DnsResourceRecord *y);
dns_cache_put(&t->scope->cache,
t->scope->manager->enable_cache,
+ t->scope->protocol,
dns_transaction_key(t),
t->answer_rcode,
t->answer,
}
}
+ /* https://datatracker.ietf.org/doc/html/rfc6840#section-5.2 */
+ if (result == DNSSEC_UNSUPPORTED_ALGORITHM) {
+ r = dns_answer_move_by_key(validated, &t->answer, rr->key, 0, NULL);
+ if (r < 0)
+ return r;
+
+ manager_dnssec_verdict(t->scope->manager, DNSSEC_INSECURE, rr->key);
+ return 1;
+ }
+
if (IN_SET(result,
DNSSEC_MISSING_KEY,
- DNSSEC_SIGNATURE_EXPIRED,
- DNSSEC_UNSUPPORTED_ALGORITHM)) {
+ DNSSEC_SIGNATURE_EXPIRED)) {
r = dns_transaction_dnskey_authenticated(t, rr);
if (r < 0 && r != -ENXIO)
Resolve.ResolveUnicastSingleLabel, config_parse_bool, 0, offsetof(Manager, resolve_unicast_single_label)
Resolve.DNSStubListenerExtra, config_parse_dns_stub_listener_extra, 0, offsetof(Manager, dns_extra_stub_listeners)
Resolve.CacheFromLocalhost, config_parse_bool, 0, offsetof(Manager, cache_from_localhost)
-Resolve.Monitor, config_parse_bool, 0, offsetof(Manager, enable_varlink_notifications)
return sendmsg_loop(fd, &mh, 0);
}
-int send_dns_notification(Manager *m, DnsAnswer *answer, const char *query_name) {
- _cleanup_free_ char *normalized = NULL;
- DnsResourceRecord *rr;
- int ifindex, r;
- _cleanup_(json_variant_unrefp) JsonVariant *array = NULL;
+static int dns_question_to_json(DnsQuestion *q, JsonVariant **ret) {
+ _cleanup_(json_variant_unrefp) JsonVariant *l = NULL;
+ DnsResourceKey *key;
+ int r;
+
+ assert(ret);
+
+ DNS_QUESTION_FOREACH(key, q) {
+ _cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
+
+ r = dns_resource_key_to_json(key, &v);
+ if (r < 0)
+ return r;
+
+ r = json_variant_append_array(&l, v);
+ if (r < 0)
+ return r;
+ }
+
+ *ret = TAKE_PTR(l);
+ return 0;
+}
+
+int manager_monitor_send(
+ Manager *m,
+ int state,
+ int rcode,
+ int error,
+ DnsQuestion *question_idna,
+ DnsQuestion *question_utf8,
+ DnsQuestion *collected_questions,
+ DnsAnswer *answer) {
+
+ _cleanup_(json_variant_unrefp) JsonVariant *jquestion = NULL, *jcollected_questions = NULL, *janswer = NULL;
+ _cleanup_(dns_question_unrefp) DnsQuestion *merged = NULL;
Varlink *connection;
+ DnsAnswerItem *rri;
+ int r;
assert(m);
if (set_isempty(m->varlink_subscription))
return 0;
- DNS_ANSWER_FOREACH_IFINDEX(rr, ifindex, answer) {
- _cleanup_(json_variant_unrefp) JsonVariant *entry = NULL;
-
- if (rr->key->type == DNS_TYPE_A) {
- struct in_addr *addr = &rr->a.in_addr;
- r = json_build(&entry,
- JSON_BUILD_OBJECT(JSON_BUILD_PAIR_CONDITION(ifindex > 0, "ifindex", JSON_BUILD_INTEGER(ifindex)),
- JSON_BUILD_PAIR_INTEGER("family", AF_INET),
- JSON_BUILD_PAIR_IN4_ADDR("address", addr),
- JSON_BUILD_PAIR_STRING("type", "A")));
- } else if (rr->key->type == DNS_TYPE_AAAA) {
- struct in6_addr *addr6 = &rr->aaaa.in6_addr;
- r = json_build(&entry,
- JSON_BUILD_OBJECT(JSON_BUILD_PAIR_CONDITION(ifindex > 0, "ifindex", JSON_BUILD_INTEGER(ifindex)),
- JSON_BUILD_PAIR_INTEGER("family", AF_INET6),
- JSON_BUILD_PAIR_IN6_ADDR("address", addr6),
- JSON_BUILD_PAIR_STRING("type", "AAAA")));
- } else
- continue;
- if (r < 0) {
- log_debug_errno(r, "Failed to build json object: %m");
- continue;
- }
+ /* Merge both questions format into one */
+ r = dns_question_merge(question_idna, question_utf8, &merged);
+ if (r < 0)
+ return log_error_errno(r, "Failed to merge UTF8/IDNA questions: %m");
+
+ /* Convert the current primary question to JSON */
+ r = dns_question_to_json(merged, &jquestion);
+ if (r < 0)
+ return log_error_errno(r, "Failed to convert question to JSON: %m");
- r = json_variant_append_array(&array, entry);
+ /* Generate a JSON array of the questions preceeding the current one in the CNAME chain */
+ r = dns_question_to_json(collected_questions, &jcollected_questions);
+ if (r < 0)
+ return log_error_errno(r, "Failed to convert question to JSON: %m");
+
+ DNS_ANSWER_FOREACH_ITEM(rri, answer) {
+ _cleanup_(json_variant_unrefp) JsonVariant *v = NULL, *w = NULL;
+
+ r = dns_resource_record_to_json(rri->rr, &v);
if (r < 0)
- return log_debug_errno(r, "Failed to append notification entry to array: %m");
- }
+ return log_error_errno(r, "Failed to convert answer resource record to JSON: %m");
- if (json_variant_is_blank_object(array))
- return 0;
+ r = dns_resource_record_to_wire_format(rri->rr, /* canonical= */ false); /* don't use DNSSEC canonical format, since it removes casing, but we want that for DNS_SD compat */
+ if (r < 0)
+ return log_error_errno(r, "Failed to generate RR wire format: %m");
- r = dns_name_normalize(query_name, 0, &normalized);
- if (r < 0)
- return log_debug_errno(r, "Failed to normalize query name: %m");
+ r = json_build(&w, JSON_BUILD_OBJECT(
+ JSON_BUILD_PAIR_CONDITION(v, "rr", JSON_BUILD_VARIANT(v)),
+ JSON_BUILD_PAIR("raw", JSON_BUILD_BASE64(rri->rr->wire_format, rri->rr->wire_format_size)),
+ JSON_BUILD_PAIR_CONDITION(rri->ifindex > 0, "ifindex", JSON_BUILD_INTEGER(rri->ifindex))));
+ if (r < 0)
+ return log_error_errno(r, "Failed to make answer RR object: %m");
+
+ r = json_variant_append_array(&janswer, w);
+ if (r < 0)
+ return log_debug_errno(r, "Failed to append notification entry to array: %m");
+ }
SET_FOREACH(connection, m->varlink_subscription) {
r = varlink_notifyb(connection,
- JSON_BUILD_OBJECT(JSON_BUILD_PAIR("addresses",
- JSON_BUILD_VARIANT(array)),
- JSON_BUILD_PAIR("name", JSON_BUILD_STRING(normalized))));
+ JSON_BUILD_OBJECT(JSON_BUILD_PAIR("state", JSON_BUILD_STRING(dns_transaction_state_to_string(state))),
+ JSON_BUILD_PAIR_CONDITION(state == DNS_TRANSACTION_RCODE_FAILURE, "rcode", JSON_BUILD_INTEGER(rcode)),
+ JSON_BUILD_PAIR_CONDITION(state == DNS_TRANSACTION_ERRNO, "errno", JSON_BUILD_INTEGER(error)),
+ JSON_BUILD_PAIR("question", JSON_BUILD_VARIANT(jquestion)),
+ JSON_BUILD_PAIR_CONDITION(jcollected_questions, "collectedQuestions", JSON_BUILD_VARIANT(jcollected_questions)),
+ JSON_BUILD_PAIR_CONDITION(janswer, "answer", JSON_BUILD_VARIANT(janswer))));
if (r < 0)
- log_debug_errno(r, "Failed to send notification, ignoring: %m");
+ log_debug_errno(r, "Failed to send monitor event, ignoring: %m");
}
+
return 0;
}
DnsOverTlsMode dns_over_tls_mode;
DnsCacheMode enable_cache;
bool cache_from_localhost;
- bool enable_varlink_notifications;
DnsStubListenerMode dns_stub_listener_mode;
#if ENABLE_DNS_OVER_TLS
Hashmap *polkit_registry;
VarlinkServer *varlink_server;
- VarlinkServer *varlink_notification_server;
+ VarlinkServer *varlink_monitor_server;
Set *varlink_subscription;
uint32_t manager_find_mtu(Manager *m);
-int send_dns_notification(Manager *m, DnsAnswer *answer, const char *query_name);
+int manager_monitor_send(Manager *m, int state, int rcode, int error, DnsQuestion *question_idna, DnsQuestion *question_utf8, DnsQuestion *collected_questions, DnsAnswer *answer);
int manager_write(Manager *m, int fd, DnsPacket *p);
int manager_send(Manager *m, int fd, int ifindex, int family, const union in_addr_union *destination, uint16_t port, const union in_addr_union *source, DnsPacket *p);
}
}
- dns_cache_put(&scope->cache, scope->manager->enable_cache, NULL, DNS_PACKET_RCODE(p), p->answer, NULL, false, _DNSSEC_RESULT_INVALID, UINT32_MAX, p->family, &p->sender);
+ dns_cache_put(
+ &scope->cache,
+ scope->manager->enable_cache,
+ DNS_PROTOCOL_MDNS,
+ NULL,
+ DNS_PACKET_RCODE(p),
+ p->answer,
+ NULL,
+ false,
+ _DNSSEC_RESULT_INVALID,
+ UINT32_MAX,
+ p->family,
+ &p->sender);
} else if (dns_packet_validate_query(p) > 0) {
log_debug("Got mDNS query packet for id %u", DNS_PACKET_ID(p));
if (json_variant_elements(parameters) > 0)
return varlink_error_invalid_parameter(link, parameters);
+ /* Send a ready message to the connecting client, to indicate that we are now listinening, and all
+ * queries issued after the point the client sees this will also be reported to the client. */
+ r = varlink_notifyb(link,
+ JSON_BUILD_OBJECT(JSON_BUILD_PAIR("ready", JSON_BUILD_BOOLEAN(true))));
+ if (r < 0)
+ return log_error_errno(r, "Failed to report monitor to be established: %m");
+
r = set_ensure_put(&m->varlink_subscription, NULL, link);
if (r < 0)
return log_error_errno(r, "Failed to add subscription to set: %m");
return 1;
}
-static int varlink_notification_server_init(Manager *m) {
+static int varlink_monitor_server_init(Manager *m) {
_cleanup_(varlink_server_unrefp) VarlinkServer *server = NULL;
int r;
assert(m);
- if (!m->enable_varlink_notifications || m->varlink_notification_server)
+ if (m->varlink_monitor_server)
return 0;
r = varlink_server_new(&server, VARLINK_SERVER_ROOT_ONLY);
if (r < 0)
return log_error_errno(r, "Failed to attach varlink connection to event loop: %m");
- m->varlink_notification_server = TAKE_PTR(server);
+ m->varlink_monitor_server = TAKE_PTR(server);
return 0;
}
-int manager_varlink_init(Manager *m) {
+static int varlink_main_server_init(Manager *m) {
_cleanup_(varlink_server_unrefp) VarlinkServer *s = NULL;
int r;
return log_error_errno(r, "Failed to attach varlink connection to event loop: %m");
m->varlink_server = TAKE_PTR(s);
+ return 0;
+}
+
+int manager_varlink_init(Manager *m) {
+ int r;
+
+ r = varlink_main_server_init(m);
+ if (r < 0)
+ return r;
- r = varlink_notification_server_init(m);
+ r = varlink_monitor_server_init(m);
if (r < 0)
return r;
assert(m);
m->varlink_server = varlink_server_unref(m->varlink_server);
- m->varlink_notification_server = varlink_server_unref(m->varlink_notification_server);
+ m->varlink_monitor_server = varlink_server_unref(m->varlink_monitor_server);
}
#include "missing_magic.h"
#include "parse-util.h"
+static int fd_get_devnum(int fd, BlockDeviceLookupFlag flags, dev_t *ret) {
+ struct stat st;
+ dev_t devnum;
+ int r;
+
+ assert(fd >= 0);
+ assert(ret);
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ if (S_ISBLK(st.st_mode))
+ devnum = st.st_rdev;
+ else if (!FLAGS_SET(flags, BLOCK_DEVICE_LOOKUP_BACKING))
+ return -ENOTBLK;
+ else if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode))
+ return -ENOTBLK;
+ else if (major(st.st_dev) != 0)
+ devnum = st.st_dev;
+ else {
+ /* If major(st.st_dev) is zero, this might mean we are backed by btrfs, which needs special
+ * handing, to get the backing device node. */
+
+ r = fcntl(fd, F_GETFL);
+ if (r < 0)
+ return -errno;
+
+ if (FLAGS_SET(r, O_PATH)) {
+ _cleanup_close_ int regfd = -1;
+
+ /* The fstat() above we can execute on an O_PATH fd. But the btrfs ioctl we cannot.
+ * Hence acquire a "real" fd first, without the O_PATH flag. */
+
+ regfd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
+ if (regfd < 0)
+ return regfd;
+
+ r = btrfs_get_block_device_fd(regfd, &devnum);
+ } else
+ r = btrfs_get_block_device_fd(fd, &devnum);
+ if (r == -ENOTTY) /* not btrfs */
+ return -ENOTBLK;
+ if (r < 0)
+ return r;
+ }
+
+ *ret = devnum;
+ return 0;
+}
+
int block_device_is_whole_disk(sd_device *dev) {
const char *s;
int r;
return 0;
}
+static int block_device_get_originating(sd_device *dev, sd_device **ret) {
+ _cleanup_(sd_device_unrefp) sd_device *first_found = NULL;
+ const char *suffix;
+ sd_device *child;
+ dev_t devnum = 0; /* avoid false maybe-uninitialized warning */
+
+ /* For the specified block device tries to chase it through the layers, in case LUKS-style DM
+ * stacking is used, trying to find the next underlying layer. */
+
+ assert(dev);
+ assert(ret);
+
+ FOREACH_DEVICE_CHILD_WITH_SUFFIX(dev, child, suffix) {
+ sd_device *child_whole_disk;
+ dev_t n;
+
+ if (!path_startswith(suffix, "slaves"))
+ continue;
+
+ if (block_device_get_whole_disk(child, &child_whole_disk) < 0)
+ continue;
+
+ if (sd_device_get_devnum(child_whole_disk, &n) < 0)
+ continue;
+
+ if (!first_found) {
+ first_found = sd_device_ref(child);
+ devnum = n;
+ continue;
+ }
+
+ /* We found a device backed by multiple other devices. We don't really support automatic
+ * discovery on such setups, with the exception of dm-verity partitions. In this case there
+ * are two backing devices: the data partition and the hash partition. We are fine with such
+ * setups, however, only if both partitions are on the same physical device. Hence, let's
+ * verify this by iterating over every node in the 'slaves/' directory and comparing them with
+ * the first that gets returned by readdir(), to ensure they all point to the same device. */
+ if (n != devnum)
+ return -ENOTUNIQ;
+ }
+
+ if (!first_found)
+ return -ENOENT;
+
+ *ret = TAKE_PTR(first_found);
+ return 1; /* found */
+}
+
+int block_device_new_from_fd(int fd, BlockDeviceLookupFlag flags, sd_device **ret) {
+ _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
+ dev_t devnum;
+ int r;
+
+ assert(fd >= 0);
+ assert(ret);
+
+ r = fd_get_devnum(fd, flags, &devnum);
+ if (r < 0)
+ return r;
+
+ r = sd_device_new_from_devnum(&dev, 'b', devnum);
+ if (r < 0)
+ return r;
+
+ if (FLAGS_SET(flags, BLOCK_DEVICE_LOOKUP_ORIGINATING)) {
+ _cleanup_(sd_device_unrefp) sd_device *dev_origin = NULL;
+ sd_device *dev_whole_disk;
+
+ r = block_device_get_whole_disk(dev, &dev_whole_disk);
+ if (r < 0)
+ return r;
+
+ r = block_device_get_originating(dev_whole_disk, &dev_origin);
+ if (r < 0 && r != -ENOENT)
+ return r;
+ if (r > 0)
+ device_unref_and_replace(dev, dev_origin);
+ }
+
+ if (FLAGS_SET(flags, BLOCK_DEVICE_LOOKUP_WHOLE_DISK)) {
+ sd_device *dev_whole_disk;
+
+ r = block_device_get_whole_disk(dev, &dev_whole_disk);
+ if (r < 0)
+ return r;
+
+ *ret = sd_device_ref(dev_whole_disk);
+ return 0;
+ }
+
+ *ret = sd_device_ref(dev);
+ return 0;
+}
+
+int block_device_new_from_path(const char *path, BlockDeviceLookupFlag flags, sd_device **ret) {
+ _cleanup_close_ int fd = -1;
+
+ assert(path);
+ assert(ret);
+
+ fd = open(path, O_CLOEXEC|O_PATH);
+ if (fd < 0)
+ return -errno;
+
+ return block_device_new_from_fd(fd, flags, ret);
+}
+
int block_get_whole_disk(dev_t d, dev_t *ret) {
char p[SYS_BLOCK_PATH_MAX("/partition")];
_cleanup_free_ char *s = NULL;
}
int block_get_originating(dev_t dt, dev_t *ret) {
- _cleanup_closedir_ DIR *d = NULL;
- _cleanup_free_ char *t = NULL;
- char p[SYS_BLOCK_PATH_MAX("/slaves")];
- _cleanup_free_ char *first_found = NULL;
- const char *q;
- dev_t devt;
+ _cleanup_(sd_device_unrefp) sd_device *dev = NULL, *origin = NULL;
int r;
- /* For the specified block device tries to chase it through the layers, in case LUKS-style DM stacking is used,
- * trying to find the next underlying layer. */
-
- xsprintf_sys_block_path(p, "/slaves", dt);
- d = opendir(p);
- if (!d)
- return -errno;
-
- FOREACH_DIRENT_ALL(de, d, return -errno) {
-
- if (dot_or_dot_dot(de->d_name))
- continue;
-
- if (!IN_SET(de->d_type, DT_LNK, DT_UNKNOWN))
- continue;
-
- if (first_found) {
- _cleanup_free_ char *u = NULL, *v = NULL, *a = NULL, *b = NULL;
-
- /* We found a device backed by multiple other devices. We don't really support
- * automatic discovery on such setups, with the exception of dm-verity partitions. In
- * this case there are two backing devices: the data partition and the hash
- * partition. We are fine with such setups, however, only if both partitions are on
- * the same physical device. Hence, let's verify this by iterating over every node
- * in the 'slaves/' directory and comparing them with the first that gets returned by
- * readdir(), to ensure they all point to the same device. */
-
- u = path_join(p, de->d_name, "../dev");
- if (!u)
- return -ENOMEM;
-
- v = path_join(p, first_found, "../dev");
- if (!v)
- return -ENOMEM;
-
- r = read_one_line_file(u, &a);
- if (r < 0)
- return log_debug_errno(r, "Failed to read %s: %m", u);
-
- r = read_one_line_file(v, &b);
- if (r < 0)
- return log_debug_errno(r, "Failed to read %s: %m", v);
-
- /* Check if the parent device is the same. If not, then the two backing devices are on
- * different physical devices, and we don't support that. */
- if (!streq(a, b))
- return -ENOTUNIQ;
- } else {
- first_found = strdup(de->d_name);
- if (!first_found)
- return -ENOMEM;
- }
- }
-
- if (!first_found)
- return -ENOENT;
-
- q = strjoina(p, "/", first_found, "/dev");
+ assert(ret);
- r = read_one_line_file(q, &t);
+ r = sd_device_new_from_devnum(&dev, 'b', dt);
if (r < 0)
return r;
- r = parse_devnum(t, &devt);
+ r = block_device_get_originating(dev, &origin);
if (r < 0)
- return -EINVAL;
-
- if (major(devt) == 0)
- return -ENOENT;
+ return r;
- *ret = devt;
- return 1;
+ return sd_device_get_devnum(origin, ret);
}
int get_block_device_harder_fd(int fd, dev_t *ret) {
int fd_get_whole_disk(int fd, bool backing, dev_t *ret) {
dev_t devt;
- struct stat st;
int r;
+ assert(fd >= 0);
assert(ret);
- if (fstat(fd, &st) < 0)
- return -errno;
-
- if (S_ISBLK(st.st_mode))
- devt = st.st_rdev;
- else if (!backing)
- return -ENOTBLK;
- else if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode))
- return -ENOTBLK;
- else if (major(st.st_dev) != 0)
- devt = st.st_dev;
- else {
- _cleanup_close_ int regfd = -1;
-
- /* If major(st.st_dev) is zero, this might mean we are backed by btrfs, which needs special
- * handing, to get the backing device node. */
-
- regfd = fd_reopen(fd, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
- if (regfd < 0)
- return regfd;
-
- r = btrfs_get_block_device_fd(regfd, &devt);
- if (r == -ENOTTY)
- return -ENOTBLK;
- if (r < 0)
- return r;
- }
+ r = fd_get_devnum(fd, backing ? BLOCK_DEVICE_LOOKUP_BACKING : 0, &devt);
+ if (r < 0)
+ return r;
return block_get_whole_disk(devt, ret);
}
assert(dev || fd >= 0);
if (!dev) {
- struct stat st;
-
- if (fstat(fd, &st) < 0)
- return -errno;
-
- r = sd_device_new_from_stat_rdev(&dev_unref, &st);
+ r = block_device_new_from_fd(fd, 0, &dev_unref);
if (r < 0)
return r;
#define xsprintf_sys_block_path(buf, suffix, devno) \
xsprintf(buf, "/sys/dev/block/%u:%u%s", major(devno), minor(devno), strempty(suffix))
+typedef enum BlockDeviceLookupFlag {
+ BLOCK_DEVICE_LOOKUP_WHOLE_DISK = 1 << 0, /* whole block device, e.g. sda, nvme0n1, or loop0. */
+ BLOCK_DEVICE_LOOKUP_BACKING = 1 << 1, /* fd may be regular file or directory on file system, in
+ * which case backing block device is determined. */
+ BLOCK_DEVICE_LOOKUP_ORIGINATING = 1 << 2, /* Try to find the underlying layer device for stacked
+ * block device, e.g. LUKS-style DM. */
+} BlockDeviceLookupFlag;
+
+int block_device_new_from_fd(int fd, BlockDeviceLookupFlag flag, sd_device **ret);
+int block_device_new_from_path(const char *path, BlockDeviceLookupFlag flag, sd_device **ret);
+
int block_device_is_whole_disk(sd_device *dev);
int block_device_get_whole_disk(sd_device *dev, sd_device **ret);
}
#if HAVE_BLKID
+static int dissected_image_probe_filesystem(DissectedImage *m) {
+ int r;
+
+ assert(m);
+
+ /* Fill in file system types if we don't know them yet. */
+
+ for (PartitionDesignator i = 0; i < _PARTITION_DESIGNATOR_MAX; i++) {
+ DissectedPartition *p = m->partitions + i;
+
+ if (!p->found)
+ continue;
+
+ if (!p->fstype && p->node) {
+ r = probe_filesystem(p->node, &p->fstype);
+ if (r < 0 && r != -EUCLEAN)
+ return r;
+ }
+
+ if (streq_ptr(p->fstype, "crypto_LUKS"))
+ m->encrypted = true;
+
+ if (p->fstype && fstype_is_ro(p->fstype))
+ p->rw = false;
+
+ if (!p->rw)
+ p->growfs = false;
+ }
+
+ return 0;
+}
+
static void check_partition_flags(
const char *node,
unsigned long long pflags,
return asprintf(ret, "%s%s%i", whole_devname, need_p ? "p" : "", nr);
}
-#endif
-int dissect_image(
+static int dissect_image(
+ DissectedImage *m,
int fd,
const char *devname,
- const char *image_path,
const VeritySettings *verity,
const MountOptions *mount_options,
- DissectImageFlags flags,
- DissectedImage **ret) {
+ DissectImageFlags flags) {
-#if HAVE_BLKID
sd_id128_t root_uuid = SD_ID128_NULL, root_verity_uuid = SD_ID128_NULL;
sd_id128_t usr_uuid = SD_ID128_NULL, usr_verity_uuid = SD_ID128_NULL;
bool is_gpt, is_mbr, multiple_generic = false,
generic_rw = false, /* initialize to appease gcc */
generic_growfs = false;
- _cleanup_(dissected_image_unrefp) DissectedImage *m = NULL;
_cleanup_(blkid_free_probep) blkid_probe b = NULL;
_cleanup_free_ char *generic_node = NULL;
sd_id128_t generic_uuid = SD_ID128_NULL;
blkid_partlist pl;
int r, generic_nr = -1, n_partitions;
+ assert(m);
assert(fd >= 0);
assert(devname);
- assert(ret);
assert(!verity || verity->designator < 0 || IN_SET(verity->designator, PARTITION_ROOT, PARTITION_USR));
assert(!verity || verity->root_hash || verity->root_hash_size == 0);
assert(!verity || verity->root_hash_sig || verity->root_hash_sig_size == 0);
if (r != 0)
return errno_or_else(EIO);
- r = dissected_image_new(image_path, &m);
- if (r < 0)
- return r;
-
if ((!(flags & DISSECT_IMAGE_GPT_ONLY) &&
(flags & DISSECT_IMAGE_GENERIC_ROOT)) ||
(flags & DISSECT_IMAGE_NO_PARTITION_TABLE)) {
.size = UINT64_MAX,
};
- *ret = TAKE_PTR(m);
return 0;
}
}
if (verity && verity->data_path)
return -EBADR;
- /* Safety check: refuse block devices that carry a partition table but for which the kernel doesn't
- * do partition scanning. */
- r = blockdev_partscan_enabled(fd);
- if (r < 0)
- return r;
- if (r == 0)
- return -EPROTONOSUPPORT;
+ if (FLAGS_SET(flags, DISSECT_IMAGE_MANAGE_PARTITION_DEVICES)) {
+ /* Safety check: refuse block devices that carry a partition table but for which the kernel doesn't
+ * do partition scanning. */
+ r = blockdev_partscan_enabled(fd);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -EPROTONOSUPPORT;
+ }
errno = 0;
pl = blkid_probe_get_partitions(b);
* Kernel returns EBUSY if there's already a partition by that number or an overlapping
* partition already existent. */
- r = block_device_add_partition(fd, node, nr, (uint64_t) start * 512, (uint64_t) size * 512);
- if (r < 0) {
- if (r != -EBUSY)
- return log_debug_errno(r, "BLKPG_ADD_PARTITION failed: %m");
+ if (FLAGS_SET(flags, DISSECT_IMAGE_MANAGE_PARTITION_DEVICES)) {
+ r = block_device_add_partition(fd, node, nr, (uint64_t) start * 512, (uint64_t) size * 512);
+ if (r < 0) {
+ if (r != -EBUSY)
+ return log_debug_errno(r, "BLKPG_ADD_PARTITION failed: %m");
- log_debug_errno(r, "Kernel was quicker than us in adding partition %i.", nr);
- } else
- log_debug("We were quicker than kernel in adding partition %i.", nr);
+ log_debug_errno(r, "Kernel was quicker than us in adding partition %i.", nr);
+ } else
+ log_debug("We were quicker than kernel in adding partition %i.", nr);
+ }
if (is_gpt) {
PartitionDesignator designator = _PARTITION_DESIGNATOR_INVALID;
}
}
- blkid_free_probe(b);
- b = NULL;
+ return 0;
+}
+#endif
- /* Fill in file system types if we don't know them yet. */
- for (PartitionDesignator i = 0; i < _PARTITION_DESIGNATOR_MAX; i++) {
- DissectedPartition *p = m->partitions + i;
+int dissect_image_file(
+ const char *path,
+ const VeritySettings *verity,
+ const MountOptions *mount_options,
+ DissectImageFlags flags,
+ DissectedImage **ret) {
- if (!p->found)
- continue;
+#if HAVE_BLKID
+ _cleanup_(dissected_image_unrefp) DissectedImage *m = NULL;
+ _cleanup_close_ int fd = -1;
+ int r;
- if (!p->fstype && p->node) {
- r = probe_filesystem(p->node, &p->fstype);
- if (r < 0 && r != -EUCLEAN)
- return r;
- }
+ assert(path);
+ assert((flags & DISSECT_IMAGE_BLOCK_DEVICE) == 0);
+ assert(ret);
- if (streq_ptr(p->fstype, "crypto_LUKS"))
- m->encrypted = true;
+ fd = open(path, O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
+ if (fd < 0)
+ return -errno;
- if (p->fstype && fstype_is_ro(p->fstype))
- p->rw = false;
+ r = fd_verify_regular(fd);
+ if (r < 0)
+ return r;
- if (!p->rw)
- p->growfs = false;
- }
+ r = dissected_image_new(path, &m);
+ if (r < 0)
+ return r;
+
+ r = dissect_image(m, fd, path, verity, mount_options, flags);
+ if (r < 0)
+ return r;
*ret = TAKE_PTR(m);
return 0;
assert(node);
assert(fstype);
- r = fsck_exists(fstype);
+ r = fsck_exists_for_fstype(fstype);
if (r < 0) {
log_debug_errno(r, "Couldn't determine whether fsck for %s exists, proceeding anyway.", fstype);
return 0;
DissectImageFlags flags,
DissectedImage **ret) {
+#if HAVE_BLKID
_cleanup_(dissected_image_unrefp) DissectedImage *m = NULL;
int r;
assert(loop);
assert(ret);
- r = dissect_image(loop->fd, loop->node, loop->backing_file ?: loop->node, verity, mount_options, flags, &m);
+ r = dissected_image_new(loop->backing_file ?: loop->node, &m);
if (r < 0)
return r;
m->loop = loop_device_ref(loop);
+ r = dissect_image(m, loop->fd, loop->node, verity, mount_options, flags | DISSECT_IMAGE_BLOCK_DEVICE);
+ if (r < 0)
+ return r;
+
+ r = dissected_image_probe_filesystem(m);
+ if (r < 0)
+ return r;
+
*ret = TAKE_PTR(m);
return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
}
int dissect_loop_device_and_warn(
}
typedef enum DissectImageFlags {
- DISSECT_IMAGE_DEVICE_READ_ONLY = 1 << 0, /* Make device read-only */
- DISSECT_IMAGE_DISCARD_ON_LOOP = 1 << 1, /* Turn on "discard" if on a loop device and file system supports it */
- DISSECT_IMAGE_DISCARD = 1 << 2, /* Turn on "discard" if file system supports it, on all block devices */
- DISSECT_IMAGE_DISCARD_ON_CRYPTO = 1 << 3, /* Turn on "discard" also on crypto devices */
- DISSECT_IMAGE_DISCARD_ANY = DISSECT_IMAGE_DISCARD_ON_LOOP |
- DISSECT_IMAGE_DISCARD |
- DISSECT_IMAGE_DISCARD_ON_CRYPTO,
- DISSECT_IMAGE_GPT_ONLY = 1 << 4, /* Only recognize images with GPT partition tables */
- DISSECT_IMAGE_GENERIC_ROOT = 1 << 5, /* If no partition table or only single generic partition, assume it's the root fs */
- DISSECT_IMAGE_MOUNT_ROOT_ONLY = 1 << 6, /* Mount only the root and /usr partitions */
- DISSECT_IMAGE_MOUNT_NON_ROOT_ONLY = 1 << 7, /* Mount only the non-root and non-/usr partitions */
- DISSECT_IMAGE_VALIDATE_OS = 1 << 8, /* Refuse mounting images that aren't identifiable as OS images */
- DISSECT_IMAGE_VALIDATE_OS_EXT = 1 << 9, /* Refuse mounting images that aren't identifiable as OS extension images */
- DISSECT_IMAGE_RELAX_VAR_CHECK = 1 << 10, /* Don't insist that the UUID of /var is hashed from /etc/machine-id */
- DISSECT_IMAGE_FSCK = 1 << 11, /* File system check the partition before mounting (no effect when combined with DISSECT_IMAGE_READ_ONLY) */
- DISSECT_IMAGE_NO_PARTITION_TABLE = 1 << 12, /* Only recognize single file system images */
- DISSECT_IMAGE_VERITY_SHARE = 1 << 13, /* When activating a verity device, reuse existing one if already open */
- DISSECT_IMAGE_MKDIR = 1 << 14, /* Make top-level directory to mount right before mounting, if missing */
- DISSECT_IMAGE_USR_NO_ROOT = 1 << 15, /* If no root fs is in the image, but /usr is, then allow this (so that we can mount the rootfs as tmpfs or so */
- DISSECT_IMAGE_REQUIRE_ROOT = 1 << 16, /* Don't accept disks without root partition (or at least /usr partition if DISSECT_IMAGE_USR_NO_ROOT is set) */
- DISSECT_IMAGE_MOUNT_READ_ONLY = 1 << 17, /* Make mounts read-only */
- DISSECT_IMAGE_READ_ONLY = DISSECT_IMAGE_DEVICE_READ_ONLY |
- DISSECT_IMAGE_MOUNT_READ_ONLY,
- DISSECT_IMAGE_GROWFS = 1 << 18, /* Grow file systems in partitions marked for that to the size of the partitions after mount */
- DISSECT_IMAGE_MOUNT_IDMAPPED = 1 << 19, /* Mount mounts with kernel 5.12-style userns ID mapping, if file system type doesn't support uid=/gid= */
+ DISSECT_IMAGE_DEVICE_READ_ONLY = 1 << 0, /* Make device read-only */
+ DISSECT_IMAGE_DISCARD_ON_LOOP = 1 << 1, /* Turn on "discard" if on a loop device and file system supports it */
+ DISSECT_IMAGE_DISCARD = 1 << 2, /* Turn on "discard" if file system supports it, on all block devices */
+ DISSECT_IMAGE_DISCARD_ON_CRYPTO = 1 << 3, /* Turn on "discard" also on crypto devices */
+ DISSECT_IMAGE_DISCARD_ANY = DISSECT_IMAGE_DISCARD_ON_LOOP |
+ DISSECT_IMAGE_DISCARD |
+ DISSECT_IMAGE_DISCARD_ON_CRYPTO,
+ DISSECT_IMAGE_GPT_ONLY = 1 << 4, /* Only recognize images with GPT partition tables */
+ DISSECT_IMAGE_GENERIC_ROOT = 1 << 5, /* If no partition table or only single generic partition, assume it's the root fs */
+ DISSECT_IMAGE_MOUNT_ROOT_ONLY = 1 << 6, /* Mount only the root and /usr partitions */
+ DISSECT_IMAGE_MOUNT_NON_ROOT_ONLY = 1 << 7, /* Mount only the non-root and non-/usr partitions */
+ DISSECT_IMAGE_VALIDATE_OS = 1 << 8, /* Refuse mounting images that aren't identifiable as OS images */
+ DISSECT_IMAGE_VALIDATE_OS_EXT = 1 << 9, /* Refuse mounting images that aren't identifiable as OS extension images */
+ DISSECT_IMAGE_RELAX_VAR_CHECK = 1 << 10, /* Don't insist that the UUID of /var is hashed from /etc/machine-id */
+ DISSECT_IMAGE_FSCK = 1 << 11, /* File system check the partition before mounting (no effect when combined with DISSECT_IMAGE_READ_ONLY) */
+ DISSECT_IMAGE_NO_PARTITION_TABLE = 1 << 12, /* Only recognize single file system images */
+ DISSECT_IMAGE_VERITY_SHARE = 1 << 13, /* When activating a verity device, reuse existing one if already open */
+ DISSECT_IMAGE_MKDIR = 1 << 14, /* Make top-level directory to mount right before mounting, if missing */
+ DISSECT_IMAGE_USR_NO_ROOT = 1 << 15, /* If no root fs is in the image, but /usr is, then allow this (so that we can mount the rootfs as tmpfs or so */
+ DISSECT_IMAGE_REQUIRE_ROOT = 1 << 16, /* Don't accept disks without root partition (or at least /usr partition if DISSECT_IMAGE_USR_NO_ROOT is set) */
+ DISSECT_IMAGE_MOUNT_READ_ONLY = 1 << 17, /* Make mounts read-only */
+ DISSECT_IMAGE_READ_ONLY = DISSECT_IMAGE_DEVICE_READ_ONLY |
+ DISSECT_IMAGE_MOUNT_READ_ONLY,
+ DISSECT_IMAGE_GROWFS = 1 << 18, /* Grow file systems in partitions marked for that to the size of the partitions after mount */
+ DISSECT_IMAGE_MOUNT_IDMAPPED = 1 << 19, /* Mount mounts with kernel 5.12-style userns ID mapping, if file system type doesn't support uid=/gid= */
+ DISSECT_IMAGE_MANAGE_PARTITION_DEVICES = 1 << 20, /* Manage partition devices, e.g. probe each partition in more detail */
+ DISSECT_IMAGE_BLOCK_DEVICE = DISSECT_IMAGE_MANAGE_PARTITION_DEVICES,
} DissectImageFlags;
struct DissectedImage {
const char* mount_options_from_designator(const MountOptions *options, PartitionDesignator designator);
int probe_filesystem(const char *node, char **ret_fstype);
-int dissect_image(
- int fd,
- const char *devname,
- const char *image_path,
+int dissect_image_file(
+ const char *path,
const VeritySettings *verity,
const MountOptions *mount_options,
DissectImageFlags flags,
return DWARF_CB_OK;
}
+static char* build_package_reference(
+ const char *type,
+ const char *name,
+ const char *version,
+ const char *arch) {
+
+ /* Construct an identifier for a specific version of the package. The syntax is most suitable for
+ * rpm: the resulting string can be used directly in queries and rpm/dnf/yum commands. For dpkg and
+ * other systems, it might not be usable directly, but users should still be able to figure out the
+ * meaning.
+ */
+
+ return strjoin(type ?: "package",
+ " ",
+ name,
+
+ version ? "-" : "",
+ strempty(version),
+
+ /* arch is meaningful even without version, so always print it */
+ arch ? "." : "",
+ strempty(arch));
+}
+
+static void report_module_metadata(StackContext *c, const char *name, JsonVariant *metadata) {
+ assert(c);
+ assert(name);
+ assert(metadata);
+
+ if (!c->f)
+ return;
+
+ const char
+ *build_id = json_variant_string(json_variant_by_key(metadata, "buildId")),
+ *type = json_variant_string(json_variant_by_key(metadata, "type")),
+ *package = json_variant_string(json_variant_by_key(metadata, "name")),
+ *version = json_variant_string(json_variant_by_key(metadata, "version")),
+ *arch = json_variant_string(json_variant_by_key(metadata, "architecture"));
+
+ fprintf(c->f, "Module %s", name);
+
+ if (package) {
+ /* Version/architecture is only meaningful with a package name.
+ * Skip the detailed fields if package is unknown. */
+ _cleanup_free_ char *id = build_package_reference(type, package, version, arch);
+ fprintf(c->f, " from %s", strnull(id));
+ }
+
+ if (build_id && !(package && version))
+ fprintf(c->f, ", build-id=%s", build_id);
+
+ fputs("\n", c->f);
+}
+
static int parse_package_metadata(const char *name, JsonVariant *id_json, Elf *elf, bool *ret_interpreter_found, StackContext *c) {
bool interpreter_found = false;
size_t n_program_headers;
(note_offset = sym_gelf_getnote(data, note_offset, ¬e_header, &name_offset, &desc_offset)) > 0;) {
_cleanup_(json_variant_unrefp) JsonVariant *v = NULL, *w = NULL;
- const char *note_name = (const char *)data->d_buf + name_offset;
const char *payload = (const char *)data->d_buf + desc_offset;
if (note_header.n_namesz == 0 || note_header.n_descsz == 0)
if (note_header.n_type != ELF_PACKAGE_METADATA_ID)
continue;
+ _cleanup_free_ char *payload_0suffixed = NULL;
+ assert(note_offset > desc_offset);
+ size_t payload_len = note_offset - desc_offset;
+
+ /* If we are lucky and the payload is NUL-padded, we don't need to copy the string.
+ * But if happens to go all the way until the end of the buffer, make a copy. */
+ if (payload[payload_len-1] != '\0') {
+ payload_0suffixed = memdup_suffix0(payload, payload_len);
+ if (!payload_0suffixed)
+ return log_oom();
+ payload = payload_0suffixed;
+ }
+
r = json_parse(payload, 0, &v, NULL, NULL);
if (r < 0)
return log_error_errno(r, "json_parse on %s failed: %m", payload);
- /* First pretty-print to the buffer, so that the metadata goes as
- * plaintext in the journal. */
- if (c->f) {
- fprintf(c->f, "Metadata for module %s owned by %s found: ",
- name, note_name);
- json_variant_dump(v, JSON_FORMAT_NEWLINE|JSON_FORMAT_PRETTY, c->f, NULL);
- fputc('\n', c->f);
- }
-
- /* Secondly, if we have a build-id, merge it in the same JSON object
- * so that it appears all nicely together in the logs/metadata. */
+ /* If we have a build-id, merge it in the same JSON object so that it appears all
+ * nicely together in the logs/metadata. */
if (id_json) {
r = json_variant_merge(&v, id_json);
if (r < 0)
- return log_error_errno(r, "json_variant_merge of package meta with buildid failed: %m");
+ return log_error_errno(r, "json_variant_merge of package meta with buildId failed: %m");
}
+ /* Pretty-print to the buffer, so that the metadata goes as plaintext in the
+ * journal. */
+ report_module_metadata(c, name, v);
+
/* Then we build a new object using the module name as the key, and merge it
* with the previous parses, so that in the end it all fits together in a single
* JSON blob. */
r = json_variant_merge(c->package_metadata, w);
if (r < 0)
- return log_error_errno(r, "json_variant_merge of package meta with buildid failed: %m");
+ return log_error_errno(r, "json_variant_merge of package meta with buildId failed: %m");
/* Finally stash the name, so we avoid double visits. */
r = set_put_strdup(c->modules, name);
* will then be added as metadata to the journal message with the stack trace. */
r = json_build(&id_json, JSON_BUILD_OBJECT(JSON_BUILD_PAIR("buildId", JSON_BUILD_HEX(id, id_len))));
if (r < 0)
- return log_error_errno(r, "json_build on build-id failed: %m");
-
- if (c->f) {
- JsonVariant *build_id = json_variant_by_key(id_json, "buildId");
- assert(build_id);
- fprintf(c->f, "Module %s with build-id %s\n", name, json_variant_string(build_id));
- }
+ return log_error_errno(r, "json_build on buildId failed: %m");
}
if (ret_id_json)
if (r < 0) {
log_warning("Could not parse number of program headers from core file: %s",
sym_elf_errmsg(-1)); /* -1 retrieves the most recent error */
+ report_module_metadata(c, name, id_json);
+
return DWARF_CB_OK;
}
}
if (!isempty(fstype) && !streq(fstype, "auto")) {
- r = fsck_exists(fstype);
+ r = fsck_exists_for_fstype(fstype);
if (r < 0)
log_warning_errno(r, "Checking was requested for %s, but couldn't detect if fsck.%s may be used, proceeding: %m", what, fstype);
else if (r == 0) {
log_debug("Checking was requested for %s, but fsck.%s does not exist.", what, fstype);
return 0;
}
+ } else {
+ r = fsck_exists();
+ if (r < 0)
+ log_warning_errno(r, "Checking was requested for %s, but couldn't detect if the fsck command may be used, proceeding: %m", what);
+ else if (r == 0) {
+ /* treat missing fsck as essentially OK */
+ log_debug("Checking was requested for %s, but the fsck command does not exist.", what);
+ return 0;
+ }
}
if (path_equal(where, "/")) {
#include "alloc-util.h"
#include "errno-util.h"
+#include "escape.h"
#include "fd-util.h"
#include "fileio.h"
#include "float.h"
return json_variant_new_stringn(ret, s, k);
}
+int json_variant_new_base32hex(JsonVariant **ret, const void *p, size_t n) {
+ _cleanup_free_ char *s = NULL;
+
+ assert_return(ret, -EINVAL);
+ assert_return(n == 0 || p, -EINVAL);
+
+ s = base32hexmem(p, n, false);
+ if (!s)
+ return -ENOMEM;
+
+ return json_variant_new_string(ret, s);
+}
+
int json_variant_new_hex(JsonVariant **ret, const void *p, size_t n) {
_cleanup_free_ char *s = NULL;
return json_variant_new_stringn(ret, s, n*2);
}
+int json_variant_new_octescape(JsonVariant **ret, const void *p, size_t n) {
+ _cleanup_free_ char *s = NULL;
+
+ assert_return(ret, -EINVAL);
+ assert_return(n == 0 || p, -EINVAL);
+
+ s = octescape(p, n);
+ if (!s)
+ return -ENOMEM;
+
+ return json_variant_new_string(ret, s);
+}
+
int json_variant_new_id128(JsonVariant **ret, sd_id128_t id) {
return json_variant_new_string(ret, SD_ID128_TO_STRING(id));
}
break;
}
- case _JSON_BUILD_BASE64: {
+ case _JSON_BUILD_BASE64:
+ case _JSON_BUILD_BASE32HEX:
+ case _JSON_BUILD_HEX:
+ case _JSON_BUILD_OCTESCAPE: {
const void *p;
size_t n;
n = va_arg(ap, size_t);
if (current->n_suppress == 0) {
- r = json_variant_new_base64(&add, p, n);
- if (r < 0)
- goto finish;
- }
-
- n_subtract = 1;
-
- if (current->expect == EXPECT_TOPLEVEL)
- current->expect = EXPECT_END;
- else if (current->expect == EXPECT_OBJECT_VALUE)
- current->expect = EXPECT_OBJECT_KEY;
- else
- assert(current->expect == EXPECT_ARRAY_ELEMENT);
-
- break;
- }
-
- case _JSON_BUILD_HEX: {
- const void *p;
- size_t n;
-
- if (!IN_SET(current->expect, EXPECT_TOPLEVEL, EXPECT_OBJECT_VALUE, EXPECT_ARRAY_ELEMENT)) {
- r = -EINVAL;
- goto finish;
- }
-
- p = va_arg(ap, const void *);
- n = va_arg(ap, size_t);
-
- if (current->n_suppress == 0) {
- r = json_variant_new_hex(&add, p, n);
+ r = command == _JSON_BUILD_BASE64 ? json_variant_new_base64(&add, p, n) :
+ command == _JSON_BUILD_BASE32HEX ? json_variant_new_base32hex(&add, p, n) :
+ command == _JSON_BUILD_HEX ? json_variant_new_hex(&add, p, n) :
+ json_variant_new_octescape(&add, p, n);
if (r < 0)
goto finish;
}
NULL);
}
+static void *dispatch_userdata(const JsonDispatch *p, void *userdata) {
+
+ /* When the the userdata pointer is passed in as NULL, then we'll just use the offset as a literal
+ * address, and convert it to a pointer. Note that might as well just add the offset to the NULL
+ * pointer, but UndefinedBehaviourSanitizer doesn't like pointer arithmetics based on NULL pointers,
+ * hence we code this explicitly here. */
+
+ if (userdata)
+ return (uint8_t*) userdata + p->offset;
+
+ return SIZE_TO_PTR(p->offset);
+}
+
int json_dispatch(JsonVariant *v, const JsonDispatch table[], JsonDispatchCallback bad, JsonDispatchFlags flags, void *userdata) {
size_t m;
int r, done = 0;
found[p-table] = true;
if (p->callback) {
- r = p->callback(json_variant_string(key), value, merged_flags, (uint8_t*) userdata + p->offset);
+ r = p->callback(json_variant_string(key), value, merged_flags, dispatch_userdata(p, userdata));
if (r < 0) {
if (merged_flags & JSON_PERMISSIVE)
continue;
return 0;
}
+int json_dispatch_int16(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata) {
+ int16_t *i = ASSERT_PTR(userdata);
+
+ assert(variant);
+
+ if (!json_variant_is_integer(variant))
+ return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not an integer.", strna(name));
+
+ if (json_variant_integer(variant) < INT16_MIN || json_variant_integer(variant) > INT16_MAX)
+ return json_log(variant, flags, SYNTHETIC_ERRNO(ERANGE), "JSON field '%s' out of bounds.", strna(name));
+
+ *i = (int16_t) json_variant_integer(variant);
+ return 0;
+}
+
+int json_dispatch_uint16(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata) {
+ uint16_t *i = ASSERT_PTR(userdata);
+
+ assert(variant);
+
+ if (!json_variant_is_unsigned(variant))
+ return json_log(variant, flags, SYNTHETIC_ERRNO(EINVAL), "JSON field '%s' is not an unsigned integer.", strna(name));
+
+ if (json_variant_unsigned(variant) > UINT16_MAX)
+ return json_log(variant, flags, SYNTHETIC_ERRNO(ERANGE), "JSON field '%s' out of bounds.", strna(name));
+
+ *i = (uint16_t) json_variant_unsigned(variant);
+ return 0;
+}
+
int json_dispatch_string(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata) {
char **s = ASSERT_PTR(userdata);
int r;
int json_variant_new_stringn(JsonVariant **ret, const char *s, size_t n);
int json_variant_new_base64(JsonVariant **ret, const void *p, size_t n);
+int json_variant_new_base32hex(JsonVariant **ret, const void *p, size_t n);
int json_variant_new_hex(JsonVariant **ret, const void *p, size_t n);
+int json_variant_new_octescape(JsonVariant **ret, const void *p, size_t n);
int json_variant_new_integer(JsonVariant **ret, int64_t i);
int json_variant_new_unsigned(JsonVariant **ret, uint64_t u);
int json_variant_new_real(JsonVariant **ret, double d);
_JSON_BUILD_LITERAL,
_JSON_BUILD_STRV,
_JSON_BUILD_BASE64,
+ _JSON_BUILD_BASE32HEX,
_JSON_BUILD_HEX,
+ _JSON_BUILD_OCTESCAPE,
_JSON_BUILD_ID128,
_JSON_BUILD_BYTE_ARRAY,
_JSON_BUILD_HW_ADDR,
#define JSON_BUILD_LITERAL(l) _JSON_BUILD_LITERAL, (const char*) { l }
#define JSON_BUILD_STRV(l) _JSON_BUILD_STRV, (char**) { l }
#define JSON_BUILD_BASE64(p, n) _JSON_BUILD_BASE64, (const void*) { p }, (size_t) { n }
+#define JSON_BUILD_BASE32HEX(p, n) _JSON_BUILD_BASE32HEX, (const void*) { p }, (size_t) { n }
#define JSON_BUILD_HEX(p, n) _JSON_BUILD_HEX, (const void*) { p }, (size_t) { n }
+#define JSON_BUILD_OCTESCAPE(p, n) _JSON_BUILD_OCTESCAPE, (const void*) { p }, (size_t) { n }
#define JSON_BUILD_ID128(id) _JSON_BUILD_ID128, (const sd_id128_t*) { &(id) }
#define JSON_BUILD_BYTE_ARRAY(v, n) _JSON_BUILD_BYTE_ARRAY, (const void*) { v }, (size_t) { n }
#define JSON_BUILD_CONST_STRING(s) _JSON_BUILD_VARIANT, JSON_VARIANT_STRING_CONST(s)
int json_dispatch_uint64(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
int json_dispatch_uint32(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
int json_dispatch_int32(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
+int json_dispatch_uint16(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
+int json_dispatch_int16(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
int json_dispatch_uid_gid(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
int json_dispatch_user_group_name(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
int json_dispatch_id128(const char *name, JsonVariant *variant, JsonDispatchFlags flags, void *userdata);
/* If this is already a block device and we are supposed to cover the whole of it
* then store an fd to the original open device node — and do not actually create an
* unnecessary loopback device for it. */
- return loop_device_open_full(NULL, fd, open_flags, lock_op, ret);
+ return loop_device_open_from_fd(fd, open_flags, lock_op, ret);
} else {
r = stat_verify_regular(&st);
if (r < 0)
d->relinquished = false;
}
-int loop_device_open_full(
- const char *loop_path,
- int loop_fd,
+int loop_device_open(
+ sd_device *dev,
int open_flags,
int lock_op,
LoopDevice **ret) {
- _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
_cleanup_close_ int fd = -1, lock_fd = -1;
- _cleanup_free_ char *p = NULL, *backing_file = NULL;
+ _cleanup_free_ char *node = NULL, *backing_file = NULL;
struct loop_info64 info;
uint64_t diskseq = 0;
- struct stat st;
LoopDevice *d;
+ const char *s;
+ dev_t devnum;
int r, nr = -1;
- assert(loop_path || loop_fd >= 0);
+ assert(dev);
assert(IN_SET(open_flags, O_RDWR, O_RDONLY));
assert(ret);
- if (loop_fd < 0) {
- fd = open(loop_path, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
- if (fd < 0)
- return -errno;
- loop_fd = fd;
- }
-
- if (fstat(loop_fd, &st) < 0)
- return -errno;
- if (!S_ISBLK(st.st_mode))
- return -ENOTBLK;
-
- r = sd_device_new_from_stat_rdev(&dev, &st);
- if (r < 0)
- return r;
+ /* Even if fd is provided through the argument in loop_device_open_from_fd(), we reopen the inode
+ * here, instead of keeping just a dup() clone of it around, since we want to ensure that the
+ * O_DIRECT flag of the handle we keep is off, we have our own file index, and have the right
+ * read/write mode in effect. */
+ fd = sd_device_open(dev, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
+ if (fd < 0)
+ return fd;
- if (fd < 0) {
- /* If loop_fd is provided through the argument, then we reopen the inode here, instead of
- * keeping just a dup() clone of it around, since we want to ensure that the O_DIRECT
- * flag of the handle we keep is off, we have our own file index, and have the right
- * read/write mode in effect.*/
- fd = fd_reopen(loop_fd, O_CLOEXEC|O_NONBLOCK|O_NOCTTY|open_flags);
- if (fd < 0)
- return fd;
- loop_fd = fd;
+ if ((lock_op & ~LOCK_NB) != LOCK_UN) {
+ lock_fd = open_lock_fd(fd, lock_op);
+ if (lock_fd < 0)
+ return lock_fd;
}
- if (ioctl(loop_fd, LOOP_GET_STATUS64, &info) >= 0) {
- const char *s;
-
+ if (ioctl(fd, LOOP_GET_STATUS64, &info) >= 0) {
#if HAVE_VALGRIND_MEMCHECK_H
/* Valgrind currently doesn't know LOOP_GET_STATUS64. Remove this once it does */
VALGRIND_MAKE_MEM_DEFINED(&info, sizeof(info));
}
}
- r = fd_get_diskseq(loop_fd, &diskseq);
+ r = fd_get_diskseq(fd, &diskseq);
if (r < 0 && r != -EOPNOTSUPP)
return r;
- if ((lock_op & ~LOCK_NB) != LOCK_UN) {
- lock_fd = open_lock_fd(loop_fd, lock_op);
- if (lock_fd < 0)
- return lock_fd;
- }
+ r = sd_device_get_devnum(dev, &devnum);
+ if (r < 0)
+ return r;
- r = sd_device_get_devname(dev, &loop_path);
+ r = sd_device_get_devname(dev, &s);
if (r < 0)
return r;
- p = strdup(loop_path);
- if (!p)
+ node = strdup(s);
+ if (!node)
return -ENOMEM;
d = new(LoopDevice, 1);
.fd = TAKE_FD(fd),
.lock_fd = TAKE_FD(lock_fd),
.nr = nr,
- .node = TAKE_PTR(p),
- .dev = TAKE_PTR(dev),
+ .node = TAKE_PTR(node),
+ .dev = sd_device_ref(dev),
.backing_file = TAKE_PTR(backing_file),
.relinquished = true, /* It's not ours, don't try to destroy it when this object is freed */
- .devno = st.st_rdev,
+ .devno = devnum,
.diskseq = diskseq,
.uevent_seqnum_not_before = UINT64_MAX,
.timestamp_not_before = USEC_INFINITY,
};
*ret = d;
- return d->fd;
+ return 0;
+}
+
+int loop_device_open_from_fd(
+ int fd,
+ int open_flags,
+ int lock_op,
+ LoopDevice **ret) {
+
+ _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
+ int r;
+
+ assert(fd >= 0);
+
+ r = block_device_new_from_fd(fd, 0, &dev);
+ if (r < 0)
+ return r;
+
+ return loop_device_open(dev, open_flags, lock_op, ret);
+}
+
+int loop_device_open_from_path(
+ const char *path,
+ int open_flags,
+ int lock_op,
+ LoopDevice **ret) {
+
+ _cleanup_(sd_device_unrefp) sd_device *dev = NULL;
+ int r;
+
+ assert(path);
+
+ r = block_device_new_from_path(path, 0, &dev);
+ if (r < 0)
+ return r;
+
+ return loop_device_open(dev, open_flags, lock_op, ret);
}
static int resize_partition(int partition_fd, uint64_t offset, uint64_t size) {
int loop_device_make(int fd, int open_flags, uint64_t offset, uint64_t size, uint32_t loop_flags, int lock_op, LoopDevice **ret);
int loop_device_make_by_path(const char *path, int open_flags, uint32_t loop_flags, int lock_op, LoopDevice **ret);
-int loop_device_open_full(const char *loop_path, int loop_fd, int open_flags, int lock_op, LoopDevice **ret);
-static inline int loop_device_open(const char *loop_path, int open_flags, int lock_op, LoopDevice **ret) {
- return loop_device_open_full(loop_path, -1, open_flags, lock_op, ret);
-}
+int loop_device_open(sd_device *dev, int open_flags, int lock_op, LoopDevice **ret);
+int loop_device_open_from_fd(int fd, int open_flags, int lock_op, LoopDevice **ret);
+int loop_device_open_from_path(const char *path, int open_flags, int lock_op, LoopDevice **ret);
LoopDevice* loop_device_ref(LoopDevice *d);
LoopDevice* loop_device_unref(LoopDevice *d);
good = is_clean_exit(p->code, p->status, EXIT_CLEAN_DAEMON, NULL);
if (!good) {
- on = ansi_highlight_red();
+ on = p->ignore ? ansi_highlight_yellow() : ansi_highlight_red();
off = ansi_normal();
} else
on = off = "";
SD_EVENT_PRIORITY_IDLE = 100
};
+#define SD_EVENT_SIGNAL_PROCMASK (1 << 30)
+
typedef int (*sd_event_handler_t)(sd_event_source *s, void *userdata);
typedef int (*sd_event_io_handler_t)(sd_event_source *s, int fd, uint32_t revents, void *userdata);
typedef int (*sd_event_time_handler_t)(sd_event_source *s, uint64_t usec, void *userdata);
int sd_event_set_watchdog(sd_event *e, int b);
int sd_event_get_watchdog(sd_event *e);
int sd_event_get_iteration(sd_event *e, uint64_t *ret);
+int sd_event_set_signal_exit(sd_event *e, int b);
sd_event_source* sd_event_source_ref(sd_event_source *s);
sd_event_source* sd_event_source_unref(sd_event_source *s);
dump_glyph(SPECIAL_GLYPH_MULTIPLICATION_SIGN);
dump_glyph(SPECIAL_GLYPH_CIRCLE_ARROW);
dump_glyph(SPECIAL_GLYPH_BULLET);
+ dump_glyph(SPECIAL_GLYPH_ARROW_LEFT);
dump_glyph(SPECIAL_GLYPH_ARROW_RIGHT);
dump_glyph(SPECIAL_GLYPH_ARROW_UP);
dump_glyph(SPECIAL_GLYPH_ARROW_DOWN);
#include <sys/file.h>
#include "alloc-util.h"
+#include "capability-util.h"
#include "dissect-image.h"
#include "fd-util.h"
#include "fileio.h"
#include "mount-util.h"
#include "namespace-util.h"
#include "parse-util.h"
+#include "path-util.h"
#include "string-util.h"
#include "strv.h"
#include "tests.h"
#if HAVE_BLKID
static usec_t end = 0;
+static void verify_dissected_image(DissectedImage *dissected) {
+ assert_se(dissected->partitions[PARTITION_ESP].found);
+ assert_se(dissected->partitions[PARTITION_ESP].node);
+ assert_se(dissected->partitions[PARTITION_XBOOTLDR].found);
+ assert_se(dissected->partitions[PARTITION_XBOOTLDR].node);
+ assert_se(dissected->partitions[PARTITION_ROOT].found);
+ assert_se(dissected->partitions[PARTITION_ROOT].node);
+ assert_se(dissected->partitions[PARTITION_HOME].found);
+ assert_se(dissected->partitions[PARTITION_HOME].node);
+}
+
static void* thread_func(void *ptr) {
int fd = PTR_TO_FD(ptr);
int r;
partition_designator_to_string(d));
}
- assert_se(dissected->partitions[PARTITION_ESP].found);
- assert_se(dissected->partitions[PARTITION_ESP].node);
- assert_se(dissected->partitions[PARTITION_XBOOTLDR].found);
- assert_se(dissected->partitions[PARTITION_XBOOTLDR].node);
- assert_se(dissected->partitions[PARTITION_ROOT].found);
- assert_se(dissected->partitions[PARTITION_ROOT].node);
- assert_se(dissected->partitions[PARTITION_HOME].found);
- assert_se(dissected->partitions[PARTITION_HOME].node);
+ verify_dissected_image(dissected);
r = dissected_image_mount(dissected, mounted, UID_INVALID, UID_INVALID, DISSECT_IMAGE_READ_ONLY);
log_notice_errno(r, "Mounted %s → %s: %m", loop->node, mounted);
}
static int run(int argc, char *argv[]) {
+#if HAVE_BLKID
+ _cleanup_(dissected_image_unrefp) DissectedImage *dissected = NULL;
+ _cleanup_(umount_and_rmdir_and_freep) char *mounted = NULL;
+ pthread_t threads[arg_n_threads];
+ sd_id128_t id;
+#endif
_cleanup_free_ char *p = NULL, *cmd = NULL;
_cleanup_(pclosep) FILE *sfdisk = NULL;
_cleanup_(loop_device_unrefp) LoopDevice *loop = NULL;
if (argc >= 5)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Too many arguments (expected 3 at max).");
- if (!have_root_gpt_type()) {
- log_tests_skipped("No root partition GPT defined for this architecture, exiting.");
- return EXIT_TEST_SKIP;
- }
-
- if (detect_container() > 0) {
- log_tests_skipped("Test not supported in a container, requires udev/uevent notifications.");
- return EXIT_TEST_SKIP;
- }
-
- /* This is a test for the loopback block device setup code and it's use by the image dissection
- * logic: since the kernel APIs are hard use and prone to races, let's test this in a heavy duty
- * test: we open a bunch of threads and repeatedly allocate and deallocate loopback block devices in
- * them in parallel, with an image file with a number of partitions. */
-
- r = detach_mount_namespace();
- if (ERRNO_IS_PRIVILEGE(r)) {
- log_tests_skipped("Lacking privileges");
- return EXIT_TEST_SKIP;
- }
+ if (!have_root_gpt_type())
+ return log_tests_skipped("No root partition GPT defined for this architecture");
- FOREACH_STRING(fs, "vfat", "ext4") {
- r = mkfs_exists(fs);
- assert_se(r >= 0);
- if (!r) {
- log_tests_skipped("mkfs.{vfat|ext4} not installed");
- return EXIT_TEST_SKIP;
- }
- }
-
- assert_se(r >= 0);
+ r = find_executable("sfdisk", NULL);
+ if (r < 0)
+ return log_tests_skipped_errno(r, "Could not find sfdisk command");
assert_se(tempfn_random_child("/var/tmp", "sfdisk", &p) >= 0);
fd = open(p, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC|O_NOFOLLOW, 0666);
assert_se(pclose(sfdisk) == 0);
sfdisk = NULL;
+#if HAVE_BLKID
+ assert_se(dissect_image_file(p, NULL, NULL, 0, &dissected) >= 0);
+ verify_dissected_image(dissected);
+ dissected = dissected_image_unref(dissected);
+#endif
+
+ if (geteuid() != 0 || have_effective_cap(CAP_SYS_ADMIN) <= 0) {
+ log_tests_skipped("not running privileged");
+ return 0;
+ }
+
+ if (detect_container() > 0) {
+ log_tests_skipped("Test not supported in a container, requires udev/uevent notifications");
+ return 0;
+ }
+
assert_se(loop_device_make(fd, O_RDWR, 0, UINT64_MAX, LO_FLAGS_PARTSCAN, LOCK_EX, &loop) >= 0);
#if HAVE_BLKID
- _cleanup_(dissected_image_unrefp) DissectedImage *dissected = NULL;
- _cleanup_(umount_and_rmdir_and_freep) char *mounted = NULL;
- pthread_t threads[arg_n_threads];
- sd_id128_t id;
-
assert_se(dissect_loop_device(loop, NULL, NULL, 0, &dissected) >= 0);
+ verify_dissected_image(dissected);
- assert_se(dissected->partitions[PARTITION_ESP].found);
- assert_se(dissected->partitions[PARTITION_ESP].node);
- assert_se(dissected->partitions[PARTITION_XBOOTLDR].found);
- assert_se(dissected->partitions[PARTITION_XBOOTLDR].node);
- assert_se(dissected->partitions[PARTITION_ROOT].found);
- assert_se(dissected->partitions[PARTITION_ROOT].node);
- assert_se(dissected->partitions[PARTITION_HOME].found);
- assert_se(dissected->partitions[PARTITION_HOME].node);
+ FOREACH_STRING(fs, "vfat", "ext4") {
+ r = mkfs_exists(fs);
+ assert_se(r >= 0);
+ if (!r) {
+ log_tests_skipped("mkfs.{vfat|ext4} not installed");
+ return 0;
+ }
+ }
+ assert_se(r >= 0);
assert_se(sd_id128_randomize(&id) >= 0);
assert_se(make_filesystem(dissected->partitions[PARTITION_ESP].node, "vfat", "EFI", NULL, id, true) >= 0);
dissected = dissected_image_unref(dissected);
assert_se(dissect_loop_device(loop, NULL, NULL, 0, &dissected) >= 0);
+ verify_dissected_image(dissected);
assert_se(mkdtemp_malloc(NULL, &mounted) >= 0);
* it. */
assert_se(loop_device_flock(loop, LOCK_SH) >= 0);
+ /* This is a test for the loopback block device setup code and it's use by the image dissection
+ * logic: since the kernel APIs are hard use and prone to races, let's test this in a heavy duty
+ * test: we open a bunch of threads and repeatedly allocate and deallocate loopback block devices in
+ * them in parallel, with an image file with a number of partitions. */
+ assert_se(detach_mount_namespace() >= 0);
+
/* This first (writable) mount will initialize the mount point dirs, so that the subsequent read-only ones can work */
assert_se(dissected_image_mount(dissected, mounted, UID_INVALID, UID_INVALID, 0) >= 0);
void *k;
assert_se(pthread_join(threads[i], &k) == 0);
- assert_se(k == NULL);
+ assert_se(!k);
log_notice("Joined thread #%u.", i);
}
#else
log_notice("Cutting test short, since we do not have libblkid.");
#endif
-
return 0;
}
HASHMAP_FOREACH_KEY(p, k, h) {
int mnt_id = PTR_TO_INT(k), mnt_id2;
+ const char *q;
r = path_get_mnt_id(p, &mnt_id2);
if (r < 0) {
- log_debug_errno(r, "Failed to get the mnt id of %s: %m\n", p);
+ log_debug_errno(r, "Failed to get the mnt id of %s: %m", p);
continue;
}
if (mnt_id == mnt_id2) {
- log_debug("mnt ids of %s is %i\n", p, mnt_id);
+ log_debug("mnt ids of %s is %i.", p, mnt_id);
continue;
} else
- log_debug("mnt ids of %s are %i, %i\n", p, mnt_id, mnt_id2);
-
- /* The ids don't match? If so, then there are two mounts on the same path, let's check if
- * that's really the case */
- char *t = hashmap_get(h, INT_TO_PTR(mnt_id2));
- log_debug("the other path for mnt id %i is %s\n", mnt_id2, t);
- assert_se(path_equal(p, t));
+ log_debug("mnt ids of %s are %i (from /proc/self/mountinfo), %i (from path_get_mnt_id()).", p, mnt_id, mnt_id2);
+
+ /* The ids don't match? This can easily happen e.g. running with "unshare --mount-proc".
+ * See #11505. */
+ assert_se(q = hashmap_get(h, INT_TO_PTR(mnt_id2)));
+
+ assert_se((r = path_is_mount_point(p, NULL, 0)) >= 0);
+ if (r == 0) {
+ /* If the path is not a mount point anymore, then it must be a sub directory of
+ * the path corresponds to mnt_id2. */
+ log_debug("The path %s for mnt id %i is not a mount point.", p, mnt_id2);
+ assert_se(!isempty(path_startswith(p, q)));
+ } else {
+ /* If the path is still a mount point, then it must be equivalent to the path
+ * corresponds to mnt_id2 */
+ log_debug("There are multiple mounts on the same path %s.", p);
+ assert_se(path_equal(p, q));
+ }
}
}
assert_se(rl1t == 0);
} else
- printf("Skipping bind mount file test: %m\n");
+ log_info("Skipping bind mount file test");
assert_se(rm_rf(tmp_dir, REMOVE_ROOT|REMOVE_PHYSICAL) == 0);
}
assert_se(unsetenv("PATH") == 0);
/* fsck.minix is provided by util-linux and will probably exist. */
- assert_se(fsck_exists("minix") == 1);
+ assert_se(fsck_exists_for_fstype("minix") == 1);
- assert_se(fsck_exists("AbCdE") == 0);
- assert_se(fsck_exists("/../bin/") == 0);
+ assert_se(fsck_exists_for_fstype("AbCdE") == 0);
+ assert_se(fsck_exists_for_fstype("/../bin/") == 0);
}
static void test_path_make_relative_one(const char *from, const char *to, const char *expected) {
#include "terminal-util.h"
#include "umask-util.h"
#include "user-util.h"
+#include "virt.h"
/* This reads all files listed in /etc/tmpfiles.d/?*.conf and creates
* them in the file system. This is intended to be used to create
DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(creation_mode_verb, CreationMode);
-static int specifier_machine_id_safe(char specifier, const void *data, const char *root, const void *userdata, char **ret) {
- int r;
-
- /* If /etc/machine_id is missing or empty (e.g. in a chroot environment) return a recognizable error
- * so that the caller can skip the rule gracefully. */
-
- r = specifier_machine_id(specifier, data, root, userdata, ret);
- if (IN_SET(r, -ENOENT, -ENOMEDIUM))
- return -ENXIO;
-
- return r;
+/* Different kinds of errors that mean that information is not available in the environment. */
+static inline bool ERRNO_IS_NOINFO(int r) {
+ return IN_SET(abs(r),
+ EUNATCH, /* os-release or machine-id missing */
+ ENOMEDIUM, /* machine-id or another file empty */
+ ENXIO); /* env var is unset */
}
static int specifier_directory(char specifier, const void *data, const char *root, const void *userdata, char **ret) {
static int log_unresolvable_specifier(const char *filename, unsigned line) {
static bool notified = false;
- /* In system mode, this is called when /etc is not fully initialized (e.g.
- * in a chroot environment) where some specifiers are unresolvable. In user
- * mode, this is called when some variables are not defined. These cases are
- * not considered as an error so log at LOG_NOTICE only for the first time
- * and then downgrade this to LOG_DEBUG for the rest. */
+ /* In system mode, this is called when /etc is not fully initialized and some specifiers are
+ * unresolvable. In user mode, this is called when some variables are not defined. These cases are
+ * not considered a fatal error, so log at LOG_NOTICE only for the first time and then downgrade this
+ * to LOG_DEBUG for the rest.
+ *
+ * If we're running in a chroot (--root was used or sd_booted() reports that systemd is not running),
+ * always use LOG_DEBUG. We may be called to initialize a chroot before booting and there is no
+ * expectation that machine-id and other files will be populated.
+ */
+
+ int log_level = notified || arg_root || running_in_chroot() > 0 ?
+ LOG_DEBUG : LOG_NOTICE;
log_syntax(NULL,
- notified ? LOG_DEBUG : LOG_NOTICE,
+ log_level,
filename, line, 0,
- "Failed to resolve specifier: %s, skipping",
+ "Failed to resolve specifier: %s, skipping.",
arg_user ? "Required $XDG_... variable not defined" : "uninitialized /etc/ detected");
if (!notified)
- log_notice("All rules containing unresolvable specifiers will be skipped.");
+ log_full(log_level,
+ "All rules containing unresolvable specifiers will be skipped.");
notified = true;
return 0;
return r;
r = xdg_user_config_dir(&persistent_config, "/user-tmpfiles.d");
- if (r < 0 && r != -ENXIO)
+ if (r < 0 && !ERRNO_IS_NOINFO(r))
return r;
r = xdg_user_runtime_dir(&runtime_config, "/user-tmpfiles.d");
- if (r < 0 && r != -ENXIO)
+ if (r < 0 && !ERRNO_IS_NOINFO(r))
return r;
r = xdg_user_data_dir(&data_home, "/user-tmpfiles.d");
- if (r < 0 && r != -ENXIO)
+ if (r < 0 && !ERRNO_IS_NOINFO(r))
return r;
r = strv_extend_strv_concat(&res, config_dirs, "/user-tmpfiles.d");
{ 'B', specifier_os_build_id, NULL },
{ 'H', specifier_hostname, NULL },
{ 'l', specifier_short_hostname, NULL },
- { 'm', specifier_machine_id_safe, NULL },
+ { 'm', specifier_machine_id, NULL },
{ 'o', specifier_os_id, NULL },
{ 'v', specifier_kernel_release, NULL },
{ 'w', specifier_os_version_id, NULL },
i.try_replace = try_replace;
r = specifier_printf(path, PATH_MAX-1, specifier_table, arg_root, NULL, &i.path);
- if (r == -ENXIO)
+ if (ERRNO_IS_NOINFO(r))
return log_unresolvable_specifier(fname, line);
if (r < 0) {
if (IN_SET(r, -EINVAL, -EBADSLT))
if (!unbase64) {
/* Do specifier expansion except if base64 mode is enabled */
r = specifier_expansion_from_arg(specifier_table, &i);
- if (r == -ENXIO)
+ if (ERRNO_IS_NOINFO(r))
return log_unresolvable_specifier(fname, line);
if (r < 0) {
if (IN_SET(r, -EINVAL, -EBADSLT))
r = read_credential(i.argument, &i.binary_argument, &i.binary_argument_size);
if (IN_SET(r, -ENXIO, -ENOENT)) {
/* Silently skip over lines that have no credentials passed */
- log_syntax(NULL, LOG_INFO, fname, line, 0, "Credential '%s' not specified, skipping line.", i.argument);
+ log_syntax(NULL, LOG_INFO, fname, line, 0,
+ "Credential '%s' not specified, skipping line.", i.argument);
return 0;
}
if (r < 0)
return 0;
}
+ /* if a path is provided, then it cannot be a unit name. Let's return earlier. */
+ if (is_path(id))
+ return -ENODEV;
+
/* Check if the argument looks like a device unit name. */
return find_device_from_unit(id, ret);
}
def check_output(self, expected_output):
for _ in range(15):
+ # Wait until the unit finishes so we don't check an incomplete log
+ if subprocess.call(['systemctl', '-q', 'is-active', self.unit]) == 0:
+ continue
+
try:
with open(self.output_file, 'r', encoding='utf-8') as log:
output = log.read()
)
if ! get_bool "$INTERACTIVE_DEBUG"; then
- kernel_params+=("systemd.wants=end.service")
+ kernel_params+=(
+ "oops=panic"
+ "panic=1"
+ "softlockup_panic=1"
+ "systemd.wants=end.service"
+ )
fi
[ -e "$IMAGE_PRIVATE" ] && image="$IMAGE_PRIVATE" || image="$IMAGE_PUBLIC"
udevadm control --log-level debug
ARGS=()
-state_directory=/var/lib/private/
+STATE_DIRECTORY=/var/lib/private/
if [[ -v ASAN_OPTIONS || -v UBSAN_OPTIONS ]]; then
# If we're running under sanitizers, we need to use a less restrictive
# profile, otherwise LSan syscall would get blocked by seccomp
ARGS+=(--profile=trusted)
# With the trusted profile DynamicUser is disabled, so the storage is not in private/
- state_directory=/var/lib/
+ STATE_DIRECTORY=/var/lib/
fi
+# Bump the timeout if we're running with plain QEMU
+[[ "$(systemd-detect-virt -v)" == "qemu" ]] && TIMEOUT=60 || TIMEOUT=30
systemd-dissect --no-pager /usr/share/minimal_0.raw | grep -q '✓ portable service'
systemd-dissect --no-pager /usr/share/minimal_1.raw | grep -q '✓ portable service'
# Running with sanitizers may freeze the invoked service. See issue #24147.
# Let's set timeout to improve performance.
-timeout 30 portablectl "${ARGS[@]}" reattach --now --runtime /usr/share/minimal_1.raw minimal-app0
+timeout "$TIMEOUT" portablectl "${ARGS[@]}" reattach --now --runtime /usr/share/minimal_1.raw minimal-app0
systemctl is-active minimal-app0.service
systemctl is-active minimal-app0-bar.service
systemctl is-active minimal-app0-foo.service
systemctl is-active minimal-app0-bar.service && exit 1
-timeout 30 portablectl "${ARGS[@]}" reattach --now --enable --runtime /tmp/minimal_1 minimal-app0
+timeout "$TIMEOUT" portablectl "${ARGS[@]}" reattach --now --enable --runtime /tmp/minimal_1 minimal-app0
systemctl is-active minimal-app0.service
systemctl is-active minimal-app0-bar.service
status="$(portablectl is-attached --extension app0 minimal_0)"
[[ "${status}" == "running-runtime" ]]
-timeout 30 portablectl "${ARGS[@]}" reattach --now --runtime --extension /usr/share/app0.raw /usr/share/minimal_1.raw app0
+timeout "$TIMEOUT" portablectl "${ARGS[@]}" reattach --now --runtime --extension /usr/share/app0.raw /usr/share/minimal_1.raw app0
systemctl is-active app0.service
status="$(portablectl is-attached --extension app0 minimal_1)"
# Ensure that adding or removing a version to the image doesn't break reattaching
cp /usr/share/app1.raw /tmp/app1_2.raw
-timeout 30 portablectl "${ARGS[@]}" reattach --now --runtime --extension /tmp/app1_2.raw /usr/share/minimal_1.raw app1
+timeout "$TIMEOUT" portablectl "${ARGS[@]}" reattach --now --runtime --extension /tmp/app1_2.raw /usr/share/minimal_1.raw app1
systemctl is-active app1.service
status="$(portablectl is-attached --extension app1_2 minimal_1)"
[[ "${status}" == "running-runtime" ]]
-timeout 30 portablectl "${ARGS[@]}" reattach --now --runtime --extension /usr/share/app1.raw /usr/share/minimal_1.raw app1
+timeout "$TIMEOUT" portablectl "${ARGS[@]}" reattach --now --runtime --extension /usr/share/app1.raw /usr/share/minimal_1.raw app1
systemctl is-active app1.service
status="$(portablectl is-attached --extension app1 minimal_1)"
[[ "${status}" == "running-runtime" ]]
-portablectl detach --now --runtime --extension /usr/share/app1.raw /usr/share/minimal_1.raw app1
+portablectl detach --force --no-reload --runtime --extension /usr/share/app1.raw /usr/share/minimal_1.raw app1
+portablectl "${ARGS[@]}" attach --force --no-reload --runtime --extension /usr/share/app1.raw /usr/share/minimal_0.raw app1
+systemctl daemon-reload
+systemctl restart app1.service
+
+systemctl is-active app1.service
+status="$(portablectl is-attached --extension app1 minimal_0)"
+[[ "${status}" == "running-runtime" ]]
+
+portablectl detach --now --runtime --extension /usr/share/app1.raw /usr/share/minimal_0.raw app1
# Ensure that the combination of read-only images, state directory and dynamic user works, and that
# state is retained. Check after detaching, as on slow systems (eg: sanitizers) it might take a while
# after the service is attached before the file appears.
-grep -q -F bar "${state_directory}/app0/foo"
-grep -q -F baz "${state_directory}/app1/foo"
+grep -q -F bar "${STATE_DIRECTORY}/app0/foo"
+grep -q -F baz "${STATE_DIRECTORY}/app1/foo"
# portablectl also works with directory paths rather than images
}
testcase_simultaneous_events() {
- local disk expected i iterations link num_part part partscript rule target timeout
+ local disk expected i iterations key link num_part part partscript rule target timeout
local -a devices symlinks
+ local -A running
if [[ -n "${ASAN_OPTIONS:-}" ]] || [[ "$(systemd-detect-virt -v)" == "qemu" ]]; then
num_part=2
else
udevadm lock --device="${devices[$disk]}" sfdisk -q -X gpt "${devices[$disk]}" <"$partscript" &
fi
+ running[$disk]=$!
done
- # Wait for the above sfdisk commands to be finished.
- for disk in {0..9}; do
- udevadm lock --device="${devices[$disk]}" true
+ for key in "${!running[@]}"; do
+ wait "${running[$key]}"
+ unset "running[$key]"
done
if ((i % 10 <= 1)); then
: >/failed
RUN_OUT="$(mktemp)"
-NOTIFICATION_SUBSCRIPTION_SCRIPT="/tmp/subscribe.sh"
-NOTIFICATION_LOGS="/tmp/notifications.txt"
-
-at_exit() {
- set +e
- cat "$NOTIFICATION_LOGS"
-}
-
-trap at_exit EXIT
run() {
"$@" |& tee "$RUN_OUT"
}
-run_retry() {
- local ntries="${1:?}"
- local i
-
- shift
-
- for ((i = 0; i < ntries; i++)); do
- "$@" && return 0
- sleep .5
- done
-
- return 1
-}
+monitor_check_rr() {
+ local match="${1:?}"
-notification_check_host() {
- local host="${1:?}"
- local address="${2:?}"
-
- # Attempt to parse the notification JSON returned over varlink and check
- # if it contains the requested record. As this is an async operation, let's
- # retry it a couple of times in case it fails.
- #
- # Example JSON:
- # {
- # "parameters": {
- # "addresses": [
- # {
- # "ifindex": 2,
- # "family": 2,
- # "address": [
- # 10,
- # 0,
- # 0,
- # 121
- # ],
- # "type": "A"
- # }
- # ],
- # "name": "untrusted.test"
- # },
- # "continues": true
- # }
- #
- # Note: we need to do some post-processing of the $NOTIFICATION_LOGS file,
- # since the JSON objects are concatenated with \0 instead of a newline
- # shellcheck disable=SC2016
- run_retry 10 jq --slurp \
- --exit-status \
- --arg host "$host" \
- --arg address "$address" \
- '.[] | select(.parameters.name == $host) | .parameters.addresses[] | select(.address | join(".") == $address) | true' \
- <(tr '\0' '\n' <"$NOTIFICATION_LOGS")
+ # Wait until the first mention of the specified log message is
+ # displayed. We turn off pipefail for this, since we don't care about the
+ # lhs of this pipe expression, we only care about the rhs' result to be
+ # clean
+ set +o pipefail
+ journalctl -u resmontest.service -f --full | grep -m1 "$match"
+ set -o pipefail
}
### SETUP ###
DNS=10.0.0.1
EOF
-# Script to dump DNS notifications to a txt file
-cat >$NOTIFICATION_SUBSCRIPTION_SCRIPT <<EOF
-#!/bin/sh
-printf '
-{
- "method": "io.systemd.Resolve.Monitor.SubscribeQueryResults",
- "more": true
-}\0' | nc -U /run/systemd/resolve/io.systemd.Resolve.Monitor > $NOTIFICATION_LOGS
-EOF
-chmod a+x $NOTIFICATION_SUBSCRIPTION_SCRIPT
-
{
echo "FallbackDNS="
echo "DNSSEC=allow-downgrade"
echo "DNSOverTLS=opportunistic"
- echo "Monitor=yes"
} >>/etc/systemd/resolved.conf
ln -svf /run/systemd/resolve/stub-resolv.conf /etc/resolv.conf
# Override the default NTA list, which turns off DNSSEC validation for (among
resolvectl status
resolvectl log-level debug
-# Verify that DNS notifications are enabled (Monitor=yes)
-run busctl get-property org.freedesktop.resolve1 /org/freedesktop/resolve1 org.freedesktop.resolve1.Manager Monitor
-grep -qF 'b true' "$RUN_OUT"
-
-# Start monitoring DNS notifications
-systemd-run $NOTIFICATION_SUBSCRIPTION_SCRIPT
+# Start monitoring queries
+systemd-run -u resmontest.service -p Type=notify resolvectl monitor
# We need to manually propagate the DS records of onlinesign.test. to the parent
# zone, since they're generated online
# Sanity check
run getent -s resolve hosts ns1.unsigned.test
grep -qE "^10\.0\.0\.1\s+ns1\.unsigned\.test" "$RUN_OUT"
-notification_check_host "ns1.unsigned.test" "10.0.0.1"
+monitor_check_rr "ns1.unsigned.test IN A 10.0.0.1"
# Issue: https://github.com/systemd/systemd/issues/18812
# PR: https://github.com/systemd/systemd/pull/18896
run resolvectl query -t A cname-chain.signed.test
grep -qF "follow14.final.signed.test IN A 10.0.0.14" "$RUN_OUT"
grep -qF "authenticated: yes" "$RUN_OUT"
-notification_check_host "follow10.so.close.signed.test" "10.0.0.14"
+
+monitor_check_rr "follow10.so.close.signed.test IN CNAME follow11.yet.so.far.signed.test"
+monitor_check_rr "follow11.yet.so.far.signed.test IN CNAME follow12.getting.hot.signed.test"
+monitor_check_rr "follow12.getting.hot.signed.test IN CNAME follow13.almost.final.signed.test"
+monitor_check_rr "follow13.almost.final.signed.test IN CNAME follow14.final.signed.test"
+monitor_check_rr "follow14.final.signed.test IN A 10.0.0.14"
+
# Non-existing RR + CNAME chain
run dig +dnssec AAAA cname-chain.signed.test
grep -qF "status: NOERROR" "$RUN_OUT"
# Resolve via dbus method
run busctl call org.freedesktop.resolve1 /org/freedesktop/resolve1 org.freedesktop.resolve1.Manager ResolveHostname 'isit' 0 secondsub.onlinesign.test 0 0
grep -qF '10 0 0 134 "secondsub.onlinesign.test"' "$RUN_OUT"
-notification_check_host "secondsub.onlinesign.test" "10.0.0.134"
+monitor_check_rr "secondsub.onlinesign.test IN A 10.0.0.134"
: "--- ZONE: untrusted.test (DNSSEC without propagated DS records) ---"
run dig +short untrusted.test
#run dig +dnssec this.does.not.exist.untrusted.test
#grep -qF "status: NXDOMAIN" "$RUN_OUT"
+systemctl stop resmontest.service
+
touch /testok
rm /failed