In all our daemons the primary entrypoint object is called "Manager".
But so far there was one exception: in journald it was called "Server".
Let's normalize that, and stick to the same nomenclature everywhere, to
make journald less special.
No real code change, just some search&replace.
#include "journald-audit.h"
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
- _cleanup_(server_freep) Server *s = NULL;
+ _cleanup_(manager_freep) Manager *m = NULL;
fuzz_setup_logging();
- assert_se(server_new(&s) >= 0);
- dummy_server_init(s, data, size);
- process_audit_string(s, 0, s->buffer, size);
+ assert_se(manager_new(&m) >= 0);
+ dummy_manager_init(m, data, size);
+ process_audit_string(m, 0, m->buffer, size);
return 0;
}
#include "journald-kmsg.h"
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
- _cleanup_(server_freep) Server *s = NULL;
+ _cleanup_(manager_freep) Manager *m = NULL;
if (size == 0)
return 0;
fuzz_setup_logging();
- assert_se(server_new(&s) >= 0);
- dummy_server_init(s, data, size);
- dev_kmsg_record(s, s->buffer, size);
+ assert_se(manager_new(&m) >= 0);
+ dummy_manager_init(m, data, size);
+ dev_kmsg_record(m, m->buffer, size);
return 0;
}
#include "tmpfile-util.h"
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
- _cleanup_(server_freep) Server *s = NULL;
+ _cleanup_(manager_freep) Manager *m = NULL;
_cleanup_close_ int sealed_fd = -EBADF, unsealed_fd = -EBADF;
_cleanup_(unlink_tempfilep) char name[] = "/tmp/fuzz-journald-native-fd.XXXXXX";
char *label = NULL;
fuzz_setup_logging();
- assert_se(server_new(&s) >= 0);
- dummy_server_init(s, NULL, 0);
+ assert_se(manager_new(&m) >= 0);
+ dummy_manager_init(m, NULL, 0);
sealed_fd = memfd_new_and_seal(NULL, data, size);
assert_se(sealed_fd >= 0);
.uid = geteuid(),
.gid = getegid(),
};
- (void) server_process_native_file(s, sealed_fd, &ucred, tv, label, label_len);
+ (void) manager_process_native_file(m, sealed_fd, &ucred, tv, label, label_len);
unsealed_fd = mkostemp_safe(name);
assert_se(unsealed_fd >= 0);
assert_se(write(unsealed_fd, data, size) == (ssize_t) size);
assert_se(lseek(unsealed_fd, 0, SEEK_SET) == 0);
- (void) server_process_native_file(s, unsealed_fd, &ucred, tv, label, label_len);
+ (void) manager_process_native_file(m, unsealed_fd, &ucred, tv, label, label_len);
return 0;
}
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
fuzz_setup_logging();
- fuzz_journald_processing_function(data, size, server_process_native_message);
+ fuzz_journald_processing_function(data, size, manager_process_native_message);
return 0;
}
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
_cleanup_close_pair_ int stream_fds[2] = EBADF_PAIR;
- _cleanup_(server_freep) Server *s = NULL;
+ _cleanup_(manager_freep) Manager *m = NULL;
StdoutStream *stream;
int v, fd0;
fuzz_setup_logging();
assert_se(socketpair(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0, stream_fds) >= 0);
- assert_se(server_new(&s) >= 0);
- dummy_server_init(s, NULL, 0);
+ assert_se(manager_new(&m) >= 0);
+ dummy_manager_init(m, NULL, 0);
- assert_se(stdout_stream_install(s, stream_fds[0], &stream) >= 0);
+ assert_se(stdout_stream_install(m, stream_fds[0], &stream) >= 0);
fd0 = TAKE_FD(stream_fds[0]); /* avoid double close */
assert_se(write(stream_fds[1], data, size) == (ssize_t) size);
while (ioctl(fd0, SIOCINQ, &v) == 0 && v)
- sd_event_run(s->event, UINT64_MAX);
+ sd_event_run(m->event, UINT64_MAX);
- if (s->n_stdout_streams > 0)
+ if (m->n_stdout_streams > 0)
stdout_stream_terminate(stream);
return 0;
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
fuzz_setup_logging();
- fuzz_journald_processing_function(data, size, server_process_syslog_message);
+ fuzz_journald_processing_function(data, size, manager_process_syslog_message);
return 0;
}
#include "alloc-util.h"
#include "fuzz-journald.h"
-#include "journald-server.h"
+#include "journald-manager.h"
-void dummy_server_init(Server *s, const uint8_t *buffer, size_t size) {
- assert(s);
+void dummy_manager_init(Manager *m, const uint8_t *buffer, size_t size) {
+ assert(m);
- s->storage = STORAGE_NONE;
- assert_se(sd_event_default(&s->event) >= 0);
+ m->storage = STORAGE_NONE;
+ assert_se(sd_event_default(&m->event) >= 0);
if (buffer) {
- s->buffer = memdup_suffix0(buffer, size);
- assert_se(s->buffer);
+ m->buffer = memdup_suffix0(buffer, size);
+ assert_se(m->buffer);
}
}
void fuzz_journald_processing_function(
const uint8_t *data,
size_t size,
- void (*f)(Server *s, const char *buf, size_t raw_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len)
+ void (*f)(Manager *m, const char *buf, size_t raw_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len)
) {
- _cleanup_(server_freep) Server *s = NULL;
+ _cleanup_(manager_freep) Manager *m = NULL;
char *label = NULL;
size_t label_len = 0;
struct ucred *ucred = NULL;
if (size == 0)
return;
- assert_se(server_new(&s) >= 0);
- dummy_server_init(s, data, size);
- (*f)(s, s->buffer, size, ucred, tv, label, label_len);
+ assert_se(manager_new(&m) >= 0);
+ dummy_manager_init(m, data, size);
+ (*f)(m, m->buffer, size, ucred, tv, label, label_len);
}
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#pragma once
-#include "journald-server.h"
+#include "journald-manager.h"
-void dummy_server_init(Server *s, const uint8_t *buffer, size_t size);
+void dummy_manager_init(Manager *m, const uint8_t *buffer, size_t size);
void fuzz_journald_processing_function(
const uint8_t *data,
size_t size,
- void (*f)(Server *s, const char *buf, size_t raw_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len)
+ void (*f)(Manager *m, const char *buf, size_t raw_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len)
);
#include "iovec-util.h"
#include "journal-internal.h"
#include "journald-audit.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "missing_audit.h"
#include "string-util.h"
}
}
-void process_audit_string(Server *s, int type, const char *data, size_t size) {
+void process_audit_string(Manager *m, int type, const char *data, size_t size) {
size_t n = 0, z;
uint64_t seconds, msec, id;
const char *p, *type_name;
char id_field[STRLEN("_AUDIT_ID=") + DECIMAL_STR_MAX(uint64_t)],
type_field[STRLEN("_AUDIT_TYPE=") + DECIMAL_STR_MAX(int)];
struct iovec iovec[N_IOVEC_META_FIELDS + 7 + N_IOVEC_AUDIT_FIELDS];
- char *m, *type_field_name;
+ char *mm, *type_field_name;
int k;
- assert(s);
+ assert(m);
if (size <= 0)
return;
type_field_name = strjoina("_AUDIT_TYPE_NAME=", type_name);
iovec[n++] = IOVEC_MAKE_STRING(type_field_name);
- m = strjoina("MESSAGE=", type_name, " ", p);
- iovec[n++] = IOVEC_MAKE_STRING(m);
+ mm = strjoina("MESSAGE=", type_name, " ", p);
+ iovec[n++] = IOVEC_MAKE_STRING(mm);
z = n;
map_all_fields(p, map_fields_kernel, "_AUDIT_FIELD_", true, iovec, &n, n + N_IOVEC_AUDIT_FIELDS);
- server_dispatch_message(s, iovec, n, ELEMENTSOF(iovec), NULL,
- TIMEVAL_STORE((usec_t) seconds * USEC_PER_SEC + (usec_t) msec * USEC_PER_MSEC),
- LOG_NOTICE, 0);
+ manager_dispatch_message(m, iovec, n, ELEMENTSOF(iovec), NULL,
+ TIMEVAL_STORE((usec_t) seconds * USEC_PER_SEC + (usec_t) msec * USEC_PER_MSEC),
+ LOG_NOTICE, 0);
/* free() all entries that map_all_fields() added. All others
* are allocated on the stack or are constant. */
free(iovec[z].iov_base);
}
-void server_process_audit_message(
- Server *s,
+void manager_process_audit_message(
+ Manager *m,
const void *buffer,
size_t buffer_size,
const struct ucred *ucred,
const struct nlmsghdr *nl = buffer;
- assert(s);
+ assert(m);
if (buffer_size < ALIGN(sizeof(struct nlmsghdr)))
return;
if (nl->nlmsg_type < AUDIT_FIRST_USER_MSG && nl->nlmsg_type != AUDIT_USER)
return;
- process_audit_string(s, nl->nlmsg_type, NLMSG_DATA(nl), nl->nlmsg_len - ALIGN(sizeof(struct nlmsghdr)));
+ process_audit_string(m, nl->nlmsg_type, NLMSG_DATA(nl), nl->nlmsg_len - ALIGN(sizeof(struct nlmsghdr)));
}
static int enable_audit(int fd, bool b) {
return 0;
}
-int server_open_audit(Server *s) {
+int manager_open_audit(Manager *m) {
int r;
- if (s->audit_fd < 0) {
+ if (m->audit_fd < 0) {
static const union sockaddr_union sa = {
.nl.nl_family = AF_NETLINK,
.nl.nl_pid = 0,
.nl.nl_groups = AUDIT_NLGRP_READLOG,
};
- s->audit_fd = socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC|SOCK_NONBLOCK, NETLINK_AUDIT);
- if (s->audit_fd < 0) {
+ m->audit_fd = socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC|SOCK_NONBLOCK, NETLINK_AUDIT);
+ if (m->audit_fd < 0) {
if (ERRNO_IS_NOT_SUPPORTED(errno))
log_debug("Audit not supported in the kernel.");
else
return 0;
}
- if (bind(s->audit_fd, &sa.sa, sizeof(sa.nl)) < 0) {
+ if (bind(m->audit_fd, &sa.sa, sizeof(sa.nl)) < 0) {
log_warning_errno(errno,
"Failed to join audit multicast group. "
"The kernel is probably too old or multicast reading is not supported. "
"Ignoring: %m");
- s->audit_fd = safe_close(s->audit_fd);
+ m->audit_fd = safe_close(m->audit_fd);
return 0;
}
} else
- (void) fd_nonblock(s->audit_fd, true);
+ (void) fd_nonblock(m->audit_fd, true);
- r = setsockopt_int(s->audit_fd, SOL_SOCKET, SO_PASSCRED, true);
+ r = setsockopt_int(m->audit_fd, SOL_SOCKET, SO_PASSCRED, true);
if (r < 0)
return log_error_errno(r, "Failed to set SO_PASSCRED on audit socket: %m");
- r = sd_event_add_io(s->event, &s->audit_event_source, s->audit_fd, EPOLLIN, server_process_datagram, s);
+ r = sd_event_add_io(m->event, &m->audit_event_source, m->audit_fd, EPOLLIN, manager_process_datagram, m);
if (r < 0)
return log_error_errno(r, "Failed to add audit fd to event loop: %m");
- if (s->set_audit >= 0) {
+ if (m->set_audit >= 0) {
/* We are listening now, try to enable audit if configured so */
- r = enable_audit(s->audit_fd, s->set_audit);
+ r = enable_audit(m->audit_fd, m->set_audit);
if (r < 0)
log_warning_errno(r, "Failed to issue audit enable call: %m");
- else if (s->set_audit > 0)
+ else if (m->set_audit > 0)
log_debug("Auditing in kernel turned on.");
else
log_debug("Auditing in kernel turned off.");
#include "socket-util.h"
-typedef struct Server Server;
+typedef struct Manager Manager;
-void server_process_audit_message(Server *s, const void *buffer, size_t buffer_size, const struct ucred *ucred, const union sockaddr_union *sa, socklen_t salen);
+void manager_process_audit_message(Manager *m, const void *buffer, size_t buffer_size, const struct ucred *ucred, const union sockaddr_union *sa, socklen_t salen);
-void process_audit_string(Server *s, int type, const char *data, size_t size);
+void process_audit_string(Manager *m, int type, const char *data, size_t size);
-int server_open_audit(Server *s);
+int manager_open_audit(Manager *m);
#include "format-util.h"
#include "iovec-util.h"
#include "journald-console.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "parse-util.h"
#include "process-util.h"
#include "stdio-util.h"
return cached_printk_time;
}
-void server_forward_console(
- Server *s,
+void manager_forward_console(
+ Manager *m,
int priority,
const char *identifier,
const char *message,
const char *tty, *color_on = "", *color_off = "";
int n = 0;
- assert(s);
+ assert(m);
assert(message);
- if (LOG_PRI(priority) > s->max_level_console)
+ if (LOG_PRI(priority) > m->max_level_console)
return;
/* First: timestamp */
iovec[n++] = IOVEC_MAKE_STRING(color_off);
iovec[n++] = IOVEC_MAKE_STRING("\n");
- tty = s->tty_path ?: "/dev/console";
+ tty = m->tty_path ?: "/dev/console";
/* Before you ask: yes, on purpose we open/close the console for each log line we write individually. This is a
* good strategy to avoid journald getting killed by the kernel's SAK concept (it doesn't fix this entirely,
#include <sys/socket.h>
-typedef struct Server Server;
+typedef struct Manager Manager;
-void server_forward_console(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred);
+void manager_forward_console(Manager *m, int priority, const char *identifier, const char *message, const struct ucred *ucred);
#include "journal-util.h"
#include "journald-client.h"
#include "journald-context.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "parse-util.h"
#include "path-util.h"
#include "process-util.h"
return CMP(x->pid, y->pid);
}
-static int client_context_new(Server *s, pid_t pid, ClientContext **ret) {
+static int client_context_new(Manager *m, pid_t pid, ClientContext **ret) {
_cleanup_free_ ClientContext *c = NULL;
int r;
- assert(s);
+ assert(m);
assert(pid_is_valid(pid));
assert(ret);
- r = prioq_ensure_allocated(&s->client_contexts_lru, client_context_compare);
+ r = prioq_ensure_allocated(&m->client_contexts_lru, client_context_compare);
if (r < 0)
return r;
.timestamp = USEC_INFINITY,
.extra_fields_mtime = NSEC_INFINITY,
.log_level_max = -1,
- .log_ratelimit_interval = s->ratelimit_interval,
- .log_ratelimit_burst = s->ratelimit_burst,
+ .log_ratelimit_interval = m->ratelimit_interval,
+ .log_ratelimit_burst = m->ratelimit_burst,
.capability_quintet = CAPABILITY_QUINTET_NULL,
};
- r = hashmap_ensure_put(&s->client_contexts, NULL, PID_TO_PTR(pid), c);
+ r = hashmap_ensure_put(&m->client_contexts, NULL, PID_TO_PTR(pid), c);
if (r < 0)
return r;
return 0;
}
-static void client_context_reset(Server *s, ClientContext *c) {
- assert(s);
+static void client_context_reset(Manager *m, ClientContext *c) {
+ assert(m);
assert(c);
c->timestamp = USEC_INFINITY;
c->log_level_max = -1;
- c->log_ratelimit_interval = s->ratelimit_interval;
- c->log_ratelimit_burst = s->ratelimit_burst;
+ c->log_ratelimit_interval = m->ratelimit_interval;
+ c->log_ratelimit_burst = m->ratelimit_burst;
c->log_filter_allowed_patterns = set_free(c->log_filter_allowed_patterns);
c->log_filter_denied_patterns = set_free(c->log_filter_denied_patterns);
c->capability_quintet = CAPABILITY_QUINTET_NULL;
}
-static ClientContext* client_context_free(Server *s, ClientContext *c) {
- assert(s);
+static ClientContext* client_context_free(Manager *m, ClientContext *c) {
+ assert(m);
if (!c)
return NULL;
- assert_se(hashmap_remove(s->client_contexts, PID_TO_PTR(c->pid)) == c);
+ assert_se(hashmap_remove(m->client_contexts, PID_TO_PTR(c->pid)) == c);
if (c->in_lru)
- assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
+ assert_se(prioq_remove(m->client_contexts_lru, c, &c->lru_index) >= 0);
- client_context_reset(s, c);
+ client_context_reset(m, c);
return mfree(c);
}
return 0;
}
-static int client_context_read_cgroup(Server *s, ClientContext *c, const char *unit_id) {
+static int client_context_read_cgroup(Manager *m, ClientContext *c, const char *unit_id) {
_cleanup_free_ char *t = NULL;
int r;
assert(c);
/* Try to acquire the current cgroup path */
- r = cg_pid_get_path_shifted(c->pid, s->cgroup_root, &t);
+ r = cg_pid_get_path_shifted(c->pid, m->cgroup_root, &t);
if (r < 0 || empty_or_root(t)) {
/* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
* failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
}
static int client_context_read_invocation_id(
- Server *s,
+ Manager *m,
ClientContext *c) {
_cleanup_free_ char *p = NULL, *value = NULL;
int r;
- assert(s);
+ assert(m);
assert(c);
/* Read the invocation ID of a unit off a unit.
}
static int client_context_read_log_level_max(
- Server *s,
+ Manager *m,
ClientContext *c) {
_cleanup_free_ char *value = NULL;
}
static int client_context_read_extra_fields(
- Server *s,
+ Manager *m,
ClientContext *c) {
_cleanup_free_ struct iovec *iovec = NULL;
}
static void client_context_really_refresh(
- Server *s,
+ Manager *m,
ClientContext *c,
const struct ucred *ucred,
const char *label, size_t label_size,
const char *unit_id,
usec_t timestamp) {
- assert(s);
+ assert(m);
assert(c);
assert(pid_is_valid(c->pid));
(void) audit_session_from_pid(&PIDREF_MAKE_FROM_PID(c->pid), &c->auditid);
(void) audit_loginuid_from_pid(&PIDREF_MAKE_FROM_PID(c->pid), &c->loginuid);
- (void) client_context_read_cgroup(s, c, unit_id);
- (void) client_context_read_invocation_id(s, c);
- (void) client_context_read_log_level_max(s, c);
- (void) client_context_read_extra_fields(s, c);
+ (void) client_context_read_cgroup(m, c, unit_id);
+ (void) client_context_read_invocation_id(m, c);
+ (void) client_context_read_log_level_max(m, c);
+ (void) client_context_read_extra_fields(m, c);
(void) client_context_read_log_ratelimit_interval(c);
(void) client_context_read_log_ratelimit_burst(c);
if (c->in_lru) {
assert(c->n_ref == 0);
- prioq_reshuffle(s->client_contexts_lru, c, &c->lru_index);
+ prioq_reshuffle(m->client_contexts_lru, c, &c->lru_index);
}
}
void client_context_maybe_refresh(
- Server *s,
+ Manager *m,
ClientContext *c,
const struct ucred *ucred,
const char *label, size_t label_size,
const char *unit_id,
usec_t timestamp) {
- assert(s);
+ assert(m);
assert(c);
if (timestamp == USEC_INFINITY)
/* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
* entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
if (c->n_ref == 0 && c->timestamp + MAX_USEC < timestamp) {
- client_context_reset(s, c);
+ client_context_reset(m, c);
goto refresh;
}
return;
refresh:
- client_context_really_refresh(s, c, ucred, label, label_size, unit_id, timestamp);
+ client_context_really_refresh(m, c, ucred, label, label_size, unit_id, timestamp);
}
-static void client_context_try_shrink_to(Server *s, size_t limit) {
+static void client_context_try_shrink_to(Manager *m, size_t limit) {
ClientContext *c;
usec_t t;
- assert(s);
+ assert(m);
/* Flush any cache entries for PIDs that have already moved on. Don't do this
* too often, since it's a slow process. */
t = now(CLOCK_MONOTONIC);
- if (s->last_cache_pid_flush + MAX_USEC < t) {
- unsigned n = prioq_size(s->client_contexts_lru), idx = 0;
+ if (m->last_cache_pid_flush + MAX_USEC < t) {
+ unsigned n = prioq_size(m->client_contexts_lru), idx = 0;
/* We do a number of iterations based on the initial size of the prioq. When we remove an
* item, a new item is moved into its places, and items to the right might be reshuffled.
*/
for (unsigned i = 0; i < n; i++) {
- c = prioq_peek_by_index(s->client_contexts_lru, idx);
+ c = prioq_peek_by_index(m->client_contexts_lru, idx);
assert(c->n_ref == 0);
if (pid_is_unwaited(c->pid) == 0)
- client_context_free(s, c);
+ client_context_free(m, c);
else
idx++;
}
- s->last_cache_pid_flush = t;
+ m->last_cache_pid_flush = t;
}
/* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
* breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
* cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
- while (hashmap_size(s->client_contexts) > limit) {
- c = prioq_pop(s->client_contexts_lru);
+ while (hashmap_size(m->client_contexts) > limit) {
+ c = prioq_pop(m->client_contexts_lru);
if (!c)
break; /* All remaining entries are pinned, give up */
c->in_lru = false;
- client_context_free(s, c);
+ client_context_free(m, c);
}
}
-void client_context_flush_regular(Server *s) {
- client_context_try_shrink_to(s, 0);
+void client_context_flush_regular(Manager *m) {
+ client_context_try_shrink_to(m, 0);
}
-void client_context_flush_all(Server *s) {
- assert(s);
+void client_context_flush_all(Manager *m) {
+ assert(m);
/* Flush out all remaining entries. This assumes all references are already dropped. */
- s->my_context = client_context_release(s, s->my_context);
- s->pid1_context = client_context_release(s, s->pid1_context);
+ m->my_context = client_context_release(m, m->my_context);
+ m->pid1_context = client_context_release(m, m->pid1_context);
- client_context_flush_regular(s);
+ client_context_flush_regular(m);
- assert(prioq_isempty(s->client_contexts_lru));
- assert(hashmap_isempty(s->client_contexts));
+ assert(prioq_isempty(m->client_contexts_lru));
+ assert(hashmap_isempty(m->client_contexts));
- s->client_contexts_lru = prioq_free(s->client_contexts_lru);
- s->client_contexts = hashmap_free(s->client_contexts);
+ m->client_contexts_lru = prioq_free(m->client_contexts_lru);
+ m->client_contexts = hashmap_free(m->client_contexts);
}
static int client_context_get_internal(
- Server *s,
+ Manager *m,
pid_t pid,
const struct ucred *ucred,
const char *label, size_t label_len,
ClientContext *c;
int r;
- assert(s);
+ assert(m);
assert(ret);
if (!pid_is_valid(pid))
return -EINVAL;
- c = hashmap_get(s->client_contexts, PID_TO_PTR(pid));
+ c = hashmap_get(m->client_contexts, PID_TO_PTR(pid));
if (c) {
if (add_ref) {
if (c->in_lru) {
/* The entry wasn't pinned so far, let's remove it from the LRU list then */
assert(c->n_ref == 0);
- assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
+ assert_se(prioq_remove(m->client_contexts_lru, c, &c->lru_index) >= 0);
c->in_lru = false;
}
c->n_ref++;
}
- client_context_maybe_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
+ client_context_maybe_refresh(m, c, ucred, label, label_len, unit_id, USEC_INFINITY);
*ret = c;
return 0;
}
- client_context_try_shrink_to(s, cache_max()-1);
+ client_context_try_shrink_to(m, cache_max()-1);
- r = client_context_new(s, pid, &c);
+ r = client_context_new(m, pid, &c);
if (r < 0)
return r;
if (add_ref)
c->n_ref++;
else {
- r = prioq_put(s->client_contexts_lru, c, &c->lru_index);
+ r = prioq_put(m->client_contexts_lru, c, &c->lru_index);
if (r < 0) {
- client_context_free(s, c);
+ client_context_free(m, c);
return r;
}
c->in_lru = true;
}
- client_context_really_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
+ client_context_really_refresh(m, c, ucred, label, label_len, unit_id, USEC_INFINITY);
*ret = c;
return 0;
}
int client_context_get(
- Server *s,
+ Manager *m,
pid_t pid,
const struct ucred *ucred,
const char *label, size_t label_len,
const char *unit_id,
ClientContext **ret) {
- return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, false, ret);
+ return client_context_get_internal(m, pid, ucred, label, label_len, unit_id, false, ret);
}
int client_context_acquire(
- Server *s,
+ Manager *m,
pid_t pid,
const struct ucred *ucred,
const char *label, size_t label_len,
const char *unit_id,
ClientContext **ret) {
- return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, true, ret);
+ return client_context_get_internal(m, pid, ucred, label, label_len, unit_id, true, ret);
};
-ClientContext *client_context_release(Server *s, ClientContext *c) {
- assert(s);
+ClientContext *client_context_release(Manager *m, ClientContext *c) {
+ assert(m);
if (!c)
return NULL;
/* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
* right-away */
- if (prioq_put(s->client_contexts_lru, c, &c->lru_index) < 0)
- client_context_free(s, c);
+ if (prioq_put(m->client_contexts_lru, c, &c->lru_index) < 0)
+ client_context_free(m, c);
else
c->in_lru = true;
return NULL;
}
-void client_context_acquire_default(Server *s) {
+void client_context_acquire_default(Manager *m) {
int r;
- assert(s);
+ assert(m);
/* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
* generate driver messages. */
- if (!s->my_context) {
+ if (!m->my_context) {
struct ucred ucred = {
.pid = getpid_cached(),
.uid = getuid(),
.gid = getgid(),
};
- r = client_context_acquire(s, ucred.pid, &ucred, NULL, 0, NULL, &s->my_context);
+ r = client_context_acquire(m, ucred.pid, &ucred, NULL, 0, NULL, &m->my_context);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to acquire our own context, ignoring: %m");
}
- if (!s->namespace && !s->pid1_context) {
+ if (!m->namespace && !m->pid1_context) {
/* Acquire PID1's context, but only if we are in non-namespaced mode, since PID 1 is only
* going to log to the non-namespaced journal instance. */
- r = client_context_acquire(s, 1, NULL, NULL, 0, NULL, &s->pid1_context);
+ r = client_context_acquire(m, 1, NULL, NULL, 0, NULL, &m->pid1_context);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to acquire PID1's context, ignoring: %m");
#include "set.h"
#include "time-util.h"
-typedef struct Server Server;
+typedef struct Manager Manager;
typedef struct ClientContext {
unsigned n_ref;
} ClientContext;
int client_context_get(
- Server *s,
+ Manager *m,
pid_t pid,
const struct ucred *ucred,
const char *label, size_t label_len,
ClientContext **ret);
int client_context_acquire(
- Server *s,
+ Manager *m,
pid_t pid,
const struct ucred *ucred,
const char *label, size_t label_len,
const char *unit_id,
ClientContext **ret);
-ClientContext* client_context_release(Server *s, ClientContext *c);
+ClientContext* client_context_release(Manager *m, ClientContext *c);
void client_context_maybe_refresh(
- Server *s,
+ Manager *m,
ClientContext *c,
const struct ucred *ucred,
const char *label, size_t label_size,
const char *unit_id,
usec_t tstamp);
-void client_context_acquire_default(Server *s);
-void client_context_flush_all(Server *s);
-void client_context_flush_regular(Server *s);
+void client_context_acquire_default(Manager *m);
+void client_context_flush_all(Manager *m);
+void client_context_flush_regular(Manager *m);
static inline size_t client_context_extra_fields_n_iovec(const ClientContext *c) {
return c ? c->extra_fields_n_iovec : 0;
#include <stddef.h>
#include <sys/socket.h>
#include "conf-parser.h"
-#include "journald-server.h"
+#include "journald-manager.h"
%}
struct ConfigPerfItem;
%null_strings
%struct-type
%includes
%%
-Journal.Storage, config_parse_storage, 0, offsetof(Server, storage)
-Journal.Compress, config_parse_compress, 0, offsetof(Server, compress)
-Journal.Seal, config_parse_bool, 0, offsetof(Server, seal)
-Journal.ReadKMsg, config_parse_bool, 0, offsetof(Server, read_kmsg)
-Journal.Audit, config_parse_tristate, 0, offsetof(Server, set_audit)
-Journal.SyncIntervalSec, config_parse_sec, 0, offsetof(Server, sync_interval_usec)
+Journal.Storage, config_parse_storage, 0, offsetof(Manager, storage)
+Journal.Compress, config_parse_compress, 0, offsetof(Manager, compress)
+Journal.Seal, config_parse_bool, 0, offsetof(Manager, seal)
+Journal.ReadKMsg, config_parse_bool, 0, offsetof(Manager, read_kmsg)
+Journal.Audit, config_parse_tristate, 0, offsetof(Manager, set_audit)
+Journal.SyncIntervalSec, config_parse_sec, 0, offsetof(Manager, sync_interval_usec)
# The following is a legacy name for compatibility
-Journal.RateLimitInterval, config_parse_sec, 0, offsetof(Server, ratelimit_interval)
-Journal.RateLimitIntervalSec,config_parse_sec, 0, offsetof(Server, ratelimit_interval)
-Journal.RateLimitBurst, config_parse_unsigned, 0, offsetof(Server, ratelimit_burst)
-Journal.SystemMaxUse, config_parse_iec_uint64, 0, offsetof(Server, system_storage.metrics.max_use)
-Journal.SystemMaxFileSize, config_parse_iec_uint64, 0, offsetof(Server, system_storage.metrics.max_size)
-Journal.SystemKeepFree, config_parse_iec_uint64, 0, offsetof(Server, system_storage.metrics.keep_free)
-Journal.SystemMaxFiles, config_parse_uint64, 0, offsetof(Server, system_storage.metrics.n_max_files)
-Journal.RuntimeMaxUse, config_parse_iec_uint64, 0, offsetof(Server, runtime_storage.metrics.max_use)
-Journal.RuntimeMaxFileSize, config_parse_iec_uint64, 0, offsetof(Server, runtime_storage.metrics.max_size)
-Journal.RuntimeKeepFree, config_parse_iec_uint64, 0, offsetof(Server, runtime_storage.metrics.keep_free)
-Journal.RuntimeMaxFiles, config_parse_uint64, 0, offsetof(Server, runtime_storage.metrics.n_max_files)
-Journal.MaxRetentionSec, config_parse_sec, 0, offsetof(Server, max_retention_usec)
-Journal.MaxFileSec, config_parse_sec, 0, offsetof(Server, max_file_usec)
-Journal.ForwardToSyslog, config_parse_bool, 0, offsetof(Server, forward_to_syslog)
-Journal.ForwardToKMsg, config_parse_bool, 0, offsetof(Server, forward_to_kmsg)
-Journal.ForwardToConsole, config_parse_bool, 0, offsetof(Server, forward_to_console)
-Journal.ForwardToWall, config_parse_bool, 0, offsetof(Server, forward_to_wall)
-Journal.ForwardToSocket, config_parse_forward_to_socket, 0, offsetof(Server, forward_to_socket)
-Journal.TTYPath, config_parse_path, 0, offsetof(Server, tty_path)
-Journal.MaxLevelStore, config_parse_log_level, 0, offsetof(Server, max_level_store)
-Journal.MaxLevelSyslog, config_parse_log_level, 0, offsetof(Server, max_level_syslog)
-Journal.MaxLevelKMsg, config_parse_log_level, 0, offsetof(Server, max_level_kmsg)
-Journal.MaxLevelConsole, config_parse_log_level, 0, offsetof(Server, max_level_console)
-Journal.MaxLevelWall, config_parse_log_level, 0, offsetof(Server, max_level_wall)
-Journal.MaxLevelSocket, config_parse_log_level, 0, offsetof(Server, max_level_socket)
-Journal.SplitMode, config_parse_split_mode, 0, offsetof(Server, split_mode)
-Journal.LineMax, config_parse_line_max, 0, offsetof(Server, line_max)
+Journal.RateLimitInterval, config_parse_sec, 0, offsetof(Manager, ratelimit_interval)
+Journal.RateLimitIntervalSec,config_parse_sec, 0, offsetof(Manager, ratelimit_interval)
+Journal.RateLimitBurst, config_parse_unsigned, 0, offsetof(Manager, ratelimit_burst)
+Journal.SystemMaxUse, config_parse_iec_uint64, 0, offsetof(Manager, system_storage.metrics.max_use)
+Journal.SystemMaxFileSize, config_parse_iec_uint64, 0, offsetof(Manager, system_storage.metrics.max_size)
+Journal.SystemKeepFree, config_parse_iec_uint64, 0, offsetof(Manager, system_storage.metrics.keep_free)
+Journal.SystemMaxFiles, config_parse_uint64, 0, offsetof(Manager, system_storage.metrics.n_max_files)
+Journal.RuntimeMaxUse, config_parse_iec_uint64, 0, offsetof(Manager, runtime_storage.metrics.max_use)
+Journal.RuntimeMaxFileSize, config_parse_iec_uint64, 0, offsetof(Manager, runtime_storage.metrics.max_size)
+Journal.RuntimeKeepFree, config_parse_iec_uint64, 0, offsetof(Manager, runtime_storage.metrics.keep_free)
+Journal.RuntimeMaxFiles, config_parse_uint64, 0, offsetof(Manager, runtime_storage.metrics.n_max_files)
+Journal.MaxRetentionSec, config_parse_sec, 0, offsetof(Manager, max_retention_usec)
+Journal.MaxFileSec, config_parse_sec, 0, offsetof(Manager, max_file_usec)
+Journal.ForwardToSyslog, config_parse_bool, 0, offsetof(Manager, forward_to_syslog)
+Journal.ForwardToKMsg, config_parse_bool, 0, offsetof(Manager, forward_to_kmsg)
+Journal.ForwardToConsole, config_parse_bool, 0, offsetof(Manager, forward_to_console)
+Journal.ForwardToWall, config_parse_bool, 0, offsetof(Manager, forward_to_wall)
+Journal.ForwardToSocket, config_parse_forward_to_socket, 0, offsetof(Manager, forward_to_socket)
+Journal.TTYPath, config_parse_path, 0, offsetof(Manager, tty_path)
+Journal.MaxLevelStore, config_parse_log_level, 0, offsetof(Manager, max_level_store)
+Journal.MaxLevelSyslog, config_parse_log_level, 0, offsetof(Manager, max_level_syslog)
+Journal.MaxLevelKMsg, config_parse_log_level, 0, offsetof(Manager, max_level_kmsg)
+Journal.MaxLevelConsole, config_parse_log_level, 0, offsetof(Manager, max_level_console)
+Journal.MaxLevelWall, config_parse_log_level, 0, offsetof(Manager, max_level_wall)
+Journal.MaxLevelSocket, config_parse_log_level, 0, offsetof(Manager, max_level_socket)
+Journal.SplitMode, config_parse_split_mode, 0, offsetof(Manager, split_mode)
+Journal.LineMax, config_parse_line_max, 0, offsetof(Manager, line_max)
#include "iovec-util.h"
#include "journal-internal.h"
#include "journald-kmsg.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-syslog.h"
#include "log.h"
#include "parse-util.h"
#include "stdio-util.h"
#include "string-util.h"
-void server_forward_kmsg(
- Server *s,
+void manager_forward_kmsg(
+ Manager *m,
int priority,
const char *identifier,
const char *message,
header_pid[STRLEN("[]: ") + DECIMAL_STR_MAX(pid_t) + 1];
size_t n = 0;
- assert(s);
+ assert(m);
assert(priority >= 0);
assert(priority <= 999);
assert(message);
- if (_unlikely_(LOG_PRI(priority) > s->max_level_kmsg))
+ if (_unlikely_(LOG_PRI(priority) > m->max_level_kmsg))
return;
- if (_unlikely_(s->dev_kmsg_fd < 0))
+ if (_unlikely_(m->dev_kmsg_fd < 0))
return;
/* Never allow messages with kernel facility to be written to
iovec[n++] = IOVEC_MAKE_STRING(message);
iovec[n++] = IOVEC_MAKE_STRING("\n");
- if (writev(s->dev_kmsg_fd, iovec, n) < 0)
+ if (writev(m->dev_kmsg_fd, iovec, n) < 0)
log_debug_errno(errno, "Failed to write to /dev/kmsg for logging, ignoring: %m");
}
streq(identifier, program_invocation_short_name);
}
-void dev_kmsg_record(Server *s, char *p, size_t l) {
+void dev_kmsg_record(Manager *m, char *p, size_t l) {
_cleanup_free_ char *message = NULL, *syslog_pid = NULL, *syslog_identifier = NULL, *identifier = NULL, *pid = NULL;
struct iovec iovec[N_IOVEC_META_FIELDS + 7 + N_IOVEC_KERNEL_FIELDS + 2 + N_IOVEC_UDEV_FIELDS];
int saved_log_max_level = INT_MAX;
ClientContext *c = NULL;
- assert(s);
+ assert(m);
assert(p);
if (l <= 0)
if (r < 0 || priority < 0 || priority > 999)
return;
- if (s->forward_to_kmsg && LOG_FAC(priority) != LOG_KERN)
+ if (m->forward_to_kmsg && LOG_FAC(priority) != LOG_KERN)
return;
/* seqnum */
if (r < 0)
return;
- if (s->kernel_seqnum) {
+ if (m->kernel_seqnum) {
/* We already read this one? */
- if (serial < *s->kernel_seqnum)
+ if (serial < *m->kernel_seqnum)
return;
/* Did we lose any? */
- if (serial > *s->kernel_seqnum)
- server_driver_message(s, 0,
- LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_MISSED_STR),
- LOG_MESSAGE("Missed %"PRIu64" kernel messages",
- serial - *s->kernel_seqnum));
+ if (serial > *m->kernel_seqnum)
+ manager_driver_message(m, 0,
+ LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_MISSED_STR),
+ LOG_MESSAGE("Missed %"PRIu64" kernel messages",
+ serial - *m->kernel_seqnum));
/* Make sure we never read this one again. Note that
* we always store the next message serial we expect
* here, simply because this makes handling the first
* message with serial 0 easy. */
- *s->kernel_seqnum = serial + 1;
+ *m->kernel_seqnum = serial + 1;
}
/* CLOCK_BOOTTIME timestamp */
k = e + 1;
for (j = 0; l > 0 && j < N_IOVEC_KERNEL_FIELDS; j++) {
- char *m;
+ char *mm;
/* Metadata fields attached */
if (*k != ' ')
*e = 0;
- if (cunescape_length_with_prefix(k, e - k, "_KERNEL_", UNESCAPE_RELAX, &m) < 0)
+ if (cunescape_length_with_prefix(k, e - k, "_KERNEL_", UNESCAPE_RELAX, &mm) < 0)
break;
- if (startswith(m, "_KERNEL_DEVICE="))
- kernel_device = m + 15;
+ if (startswith(mm, "_KERNEL_DEVICE="))
+ kernel_device = mm + 15;
- iovec[n++] = IOVEC_MAKE_STRING(m);
+ iovec[n++] = IOVEC_MAKE_STRING(mm);
z++;
l -= (e - k) + 1;
/* Avoid logging any new messages when we're processing messages generated by ourselves via
* log_info() and friends to avoid infinite loops. */
if (is_us(identifier, pid)) {
- if (!ratelimit_below(&s->kmsg_own_ratelimit))
+ if (!ratelimit_below(&m->kmsg_own_ratelimit))
return;
saved_log_max_level = log_get_max_level();
- c = s->my_context;
+ c = m->my_context;
log_set_max_level(LOG_NULL);
}
if (cunescape_length_with_prefix(p, pl, "MESSAGE=", UNESCAPE_RELAX, &message) >= 0)
iovec[n++] = IOVEC_MAKE_STRING(message);
- server_dispatch_message(s, iovec, n, ELEMENTSOF(iovec), c, NULL, priority, 0);
+ manager_dispatch_message(m, iovec, n, ELEMENTSOF(iovec), c, NULL, priority, 0);
if (saved_log_max_level != INT_MAX)
log_set_max_level(saved_log_max_level);
- s->dev_kmsg_timestamp = usec;
- sync_req_revalidate_by_timestamp(s);
+ m->dev_kmsg_timestamp = usec;
+ sync_req_revalidate_by_timestamp(m);
finish:
for (j = 0; j < z; j++)
free(iovec[j].iov_base);
}
-static int server_read_dev_kmsg(Server *s) {
+static int manager_read_dev_kmsg(Manager *m) {
char buffer[8192+1]; /* the kernel-side limit per record is 8K currently */
ssize_t l;
- assert(s);
- assert(s->dev_kmsg_fd >= 0);
- assert(s->read_kmsg);
+ assert(m);
+ assert(m->dev_kmsg_fd >= 0);
+ assert(m->read_kmsg);
- l = read(s->dev_kmsg_fd, buffer, sizeof(buffer) - 1);
+ l = read(m->dev_kmsg_fd, buffer, sizeof(buffer) - 1);
if (l == 0)
return 0;
if (l < 0) {
return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT, "Failed to read from /dev/kmsg: %m");
}
- dev_kmsg_record(s, buffer, l);
+ dev_kmsg_record(m, buffer, l);
return 1;
}
-int server_flush_dev_kmsg(Server *s) {
+int manager_flush_dev_kmsg(Manager *m) {
int r;
- assert(s);
+ assert(m);
- if (s->dev_kmsg_fd < 0)
+ if (m->dev_kmsg_fd < 0)
return 0;
- if (!s->read_kmsg)
+ if (!m->read_kmsg)
return 0;
log_debug("Flushing /dev/kmsg...");
for (;;) {
- r = server_read_dev_kmsg(s);
+ r = manager_read_dev_kmsg(m);
if (r < 0)
return r;
}
static int dispatch_dev_kmsg(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
assert(es);
- assert(fd == s->dev_kmsg_fd);
+ assert(fd == m->dev_kmsg_fd);
if (revents & EPOLLERR)
log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
if (!(revents & EPOLLIN))
log_error("Got invalid event from epoll for /dev/kmsg: %"PRIx32, revents);
- return server_read_dev_kmsg(s);
+ return manager_read_dev_kmsg(m);
}
-int server_open_dev_kmsg(Server *s) {
+int manager_open_dev_kmsg(Manager *m) {
int r;
- assert(s);
- assert(s->dev_kmsg_fd < 0);
- assert(!s->dev_kmsg_event_source);
+ assert(m);
+ assert(m->dev_kmsg_fd < 0);
+ assert(!m->dev_kmsg_event_source);
- mode_t mode = O_CLOEXEC|O_NONBLOCK|O_NOCTTY|(s->read_kmsg ? O_RDWR : O_WRONLY);
+ mode_t mode = O_CLOEXEC|O_NONBLOCK|O_NOCTTY|(m->read_kmsg ? O_RDWR : O_WRONLY);
_cleanup_close_ int fd = open("/dev/kmsg", mode);
if (fd < 0) {
return 0;
}
- if (!s->read_kmsg) {
- s->dev_kmsg_fd = TAKE_FD(fd);
+ if (!m->read_kmsg) {
+ m->dev_kmsg_fd = TAKE_FD(fd);
return 0;
}
_cleanup_(sd_event_source_unrefp) sd_event_source *es = NULL;
- r = sd_event_add_io(s->event, &es, fd, EPOLLIN, dispatch_dev_kmsg, s);
+ r = sd_event_add_io(m->event, &es, fd, EPOLLIN, dispatch_dev_kmsg, m);
if (r < 0)
return log_error_errno(r, "Failed to add /dev/kmsg fd to event loop: %m");
if (r < 0)
return log_error_errno(r, "Failed to adjust priority of kmsg event source: %m");
- s->dev_kmsg_fd = TAKE_FD(fd);
- s->dev_kmsg_event_source = TAKE_PTR(es);
+ m->dev_kmsg_fd = TAKE_FD(fd);
+ m->dev_kmsg_event_source = TAKE_PTR(es);
return 0;
}
-int server_open_kernel_seqnum(Server *s) {
+int manager_open_kernel_seqnum(Manager *m) {
int r;
- assert(s);
+ assert(m);
/* We store the seqnum we last read in an mmapped file. That way we can just use it like a variable,
* but it is persistent and automatically flushed at reboot. */
- if (!s->read_kmsg)
+ if (!m->read_kmsg)
return 0;
- r = server_map_seqnum_file(s, "kernel-seqnum", sizeof(uint64_t), (void**) &s->kernel_seqnum);
+ r = manager_map_seqnum_file(m, "kernel-seqnum", sizeof(uint64_t), (void**) &m->kernel_seqnum);
if (r < 0)
return log_error_errno(r, "Failed to map kernel seqnum file: %m");
#include <stddef.h>
#include <sys/socket.h>
-typedef struct Server Server;
+typedef struct Manager Manager;
-int server_open_dev_kmsg(Server *s);
-int server_flush_dev_kmsg(Server *s);
+int manager_open_dev_kmsg(Manager *m);
+int manager_flush_dev_kmsg(Manager *m);
-void server_forward_kmsg(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred);
+void manager_forward_kmsg(Manager *m, int priority, const char *identifier, const char *message, const struct ucred *ucred);
-int server_open_kernel_seqnum(Server *s);
+int manager_open_kernel_seqnum(Manager *m);
-void dev_kmsg_record(Server *s, char *p, size_t l);
+void dev_kmsg_record(Manager *m, char *p, size_t l);
#include "journald-audit.h"
#include "journald-context.h"
#include "journald-kmsg.h"
+#include "journald-manager.h"
#include "journald-native.h"
#include "journald-rate-limit.h"
-#include "journald-server.h"
#include "journald-socket.h"
#include "journald-stream.h"
#include "journald-syslog.h"
#define FAILED_TO_WRITE_ENTRY_RATELIMIT ((const RateLimit) { .interval = 1 * USEC_PER_SEC, .burst = 1 })
-static int server_schedule_sync(Server *s, int priority);
-static int server_refresh_idle_timer(Server *s);
+static int manager_schedule_sync(Manager *m, int priority);
+static int manager_refresh_idle_timer(Manager *m);
-static int server_determine_path_usage(
- Server *s,
+static int manager_determine_path_usage(
+ Manager *m,
const char *path,
uint64_t *ret_used,
uint64_t *ret_free) {
_cleanup_closedir_ DIR *d = NULL;
struct statvfs ss;
- assert(s);
+ assert(m);
assert(path);
assert(ret_used);
assert(ret_free);
zero(*space);
}
-static int cache_space_refresh(Server *s, JournalStorage *storage) {
+static int cache_space_refresh(Manager *m, JournalStorage *storage) {
JournalStorageSpace *space;
JournalMetrics *metrics;
uint64_t vfs_used, vfs_avail, avail;
usec_t ts;
int r;
- assert(s);
+ assert(m);
metrics = &storage->metrics;
space = &storage->space;
if (space->timestamp != 0 && usec_add(space->timestamp, RECHECK_SPACE_USEC) > ts)
return 0;
- r = server_determine_path_usage(s, storage->path, &vfs_used, &vfs_avail);
+ r = manager_determine_path_usage(m, storage->path, &vfs_used, &vfs_avail);
if (r < 0)
return r;
storage->metrics.min_use = MAX(storage->metrics.min_use, storage->space.vfs_used);
}
-static JournalStorage* server_current_storage(Server *s) {
- assert(s);
+static JournalStorage* manager_current_storage(Manager *m) {
+ assert(m);
- return s->system_journal ? &s->system_storage : &s->runtime_storage;
+ return m->system_journal ? &m->system_storage : &m->runtime_storage;
}
-static int server_determine_space(Server *s, uint64_t *available, uint64_t *limit) {
+static int manager_determine_space(Manager *m, uint64_t *available, uint64_t *limit) {
JournalStorage *js;
int r;
- assert(s);
+ assert(m);
- js = server_current_storage(s);
+ js = manager_current_storage(m);
- r = cache_space_refresh(s, js);
+ r = cache_space_refresh(m, js);
if (r >= 0) {
if (available)
*available = js->space.available;
return r;
}
-void server_space_usage_message(Server *s, JournalStorage *storage) {
- assert(s);
+void manager_space_usage_message(Manager *m, JournalStorage *storage) {
+ assert(m);
if (!storage)
- storage = server_current_storage(s);
+ storage = manager_current_storage(m);
- if (cache_space_refresh(s, storage) < 0)
+ if (cache_space_refresh(m, storage) < 0)
return;
const JournalMetrics *metrics = &storage->metrics;
- server_driver_message(s, 0,
- LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_USAGE_STR),
- LOG_MESSAGE("%s (%s) is %s, max %s, %s free.",
- storage->name, storage->path,
- FORMAT_BYTES(storage->space.vfs_used),
- FORMAT_BYTES(storage->space.limit),
- FORMAT_BYTES(storage->space.available)),
- LOG_ITEM("JOURNAL_NAME=%s", storage->name),
- LOG_ITEM("JOURNAL_PATH=%s", storage->path),
- LOG_ITEM("CURRENT_USE=%"PRIu64, storage->space.vfs_used),
- LOG_ITEM("CURRENT_USE_PRETTY=%s", FORMAT_BYTES(storage->space.vfs_used)),
- LOG_ITEM("MAX_USE=%"PRIu64, metrics->max_use),
- LOG_ITEM("MAX_USE_PRETTY=%s", FORMAT_BYTES(metrics->max_use)),
- LOG_ITEM("DISK_KEEP_FREE=%"PRIu64, metrics->keep_free),
- LOG_ITEM("DISK_KEEP_FREE_PRETTY=%s", FORMAT_BYTES(metrics->keep_free)),
- LOG_ITEM("DISK_AVAILABLE=%"PRIu64, storage->space.vfs_available),
- LOG_ITEM("DISK_AVAILABLE_PRETTY=%s", FORMAT_BYTES(storage->space.vfs_available)),
- LOG_ITEM("LIMIT=%"PRIu64, storage->space.limit),
- LOG_ITEM("LIMIT_PRETTY=%s", FORMAT_BYTES(storage->space.limit)),
- LOG_ITEM("AVAILABLE=%"PRIu64, storage->space.available),
- LOG_ITEM("AVAILABLE_PRETTY=%s", FORMAT_BYTES(storage->space.available)));
-}
-
-static void server_add_acls(JournalFile *f, uid_t uid) {
+ manager_driver_message(m, 0,
+ LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_USAGE_STR),
+ LOG_MESSAGE("%s (%s) is %s, max %s, %s free.",
+ storage->name, storage->path,
+ FORMAT_BYTES(storage->space.vfs_used),
+ FORMAT_BYTES(storage->space.limit),
+ FORMAT_BYTES(storage->space.available)),
+ LOG_ITEM("JOURNAL_NAME=%s", storage->name),
+ LOG_ITEM("JOURNAL_PATH=%s", storage->path),
+ LOG_ITEM("CURRENT_USE=%"PRIu64, storage->space.vfs_used),
+ LOG_ITEM("CURRENT_USE_PRETTY=%s", FORMAT_BYTES(storage->space.vfs_used)),
+ LOG_ITEM("MAX_USE=%"PRIu64, metrics->max_use),
+ LOG_ITEM("MAX_USE_PRETTY=%s", FORMAT_BYTES(metrics->max_use)),
+ LOG_ITEM("DISK_KEEP_FREE=%"PRIu64, metrics->keep_free),
+ LOG_ITEM("DISK_KEEP_FREE_PRETTY=%s", FORMAT_BYTES(metrics->keep_free)),
+ LOG_ITEM("DISK_AVAILABLE=%"PRIu64, storage->space.vfs_available),
+ LOG_ITEM("DISK_AVAILABLE_PRETTY=%s", FORMAT_BYTES(storage->space.vfs_available)),
+ LOG_ITEM("LIMIT=%"PRIu64, storage->space.limit),
+ LOG_ITEM("LIMIT_PRETTY=%s", FORMAT_BYTES(storage->space.limit)),
+ LOG_ITEM("AVAILABLE=%"PRIu64, storage->space.available),
+ LOG_ITEM("AVAILABLE_PRETTY=%s", FORMAT_BYTES(storage->space.available)));
+}
+
+static void manager_add_acls(JournalFile *f, uid_t uid) {
assert(f);
#if HAVE_ACL
#endif
}
-static int server_open_journal(
- Server *s,
+static int manager_open_journal(
+ Manager *m,
bool reliably,
const char *fname,
int open_flags,
JournalFileFlags file_flags;
int r;
- assert(s);
+ assert(m);
assert(fname);
assert(ret);
file_flags =
- (s->compress.enabled ? JOURNAL_COMPRESS : 0) |
+ (m->compress.enabled ? JOURNAL_COMPRESS : 0) |
(seal ? JOURNAL_SEAL : 0) |
JOURNAL_STRICT_ORDER;
- set_clear(s->deferred_closes);
+ set_clear(m->deferred_closes);
if (reliably)
r = journal_file_open_reliably(
open_flags,
file_flags,
0640,
- s->compress.threshold_bytes,
+ m->compress.threshold_bytes,
metrics,
- s->mmap,
+ m->mmap,
&f);
else
r = journal_file_open(
open_flags,
file_flags,
0640,
- s->compress.threshold_bytes,
+ m->compress.threshold_bytes,
metrics,
- s->mmap,
+ m->mmap,
/* template= */ NULL,
&f);
if (r < 0)
return r;
- r = journal_file_enable_post_change_timer(f, s->event, POST_CHANGE_TIMER_INTERVAL_USEC);
+ r = journal_file_enable_post_change_timer(f, m->event, POST_CHANGE_TIMER_INTERVAL_USEC);
if (r < 0)
return r;
return r;
}
-static bool server_flushed_flag_is_set(Server *s) {
+static bool manager_flushed_flag_is_set(Manager *m) {
const char *fn;
- assert(s);
+ assert(m);
/* We don't support the "flushing" concept for namespace instances, we assume them to always have
* access to /var */
- if (s->namespace)
+ if (m->namespace)
return true;
- fn = strjoina(s->runtime_directory, "/flushed");
+ fn = strjoina(m->runtime_directory, "/flushed");
return access(fn, F_OK) >= 0;
}
-static void server_drop_flushed_flag(Server *s) {
+static void manager_drop_flushed_flag(Manager *m) {
const char *fn;
- assert(s);
+ assert(m);
- if (s->namespace)
+ if (m->namespace)
return;
- fn = strjoina(s->runtime_directory, "/flushed");
+ fn = strjoina(m->runtime_directory, "/flushed");
if (unlink(fn) < 0 && errno != ENOENT)
log_ratelimit_warning_errno(errno, JOURNAL_LOG_RATELIMIT,
"Failed to unlink %s, ignoring: %m", fn);
}
-static int server_system_journal_open(
- Server *s,
+static int manager_system_journal_open(
+ Manager *m,
bool flush_requested,
bool relinquish_requested) {
const char *fn;
int r = 0;
- if (!s->system_journal &&
- IN_SET(s->storage, STORAGE_PERSISTENT, STORAGE_AUTO) &&
- (flush_requested || server_flushed_flag_is_set(s)) &&
+ if (!m->system_journal &&
+ IN_SET(m->storage, STORAGE_PERSISTENT, STORAGE_AUTO) &&
+ (flush_requested || manager_flushed_flag_is_set(m)) &&
!relinquish_requested) {
/* If in auto mode: first try to create the machine path, but not the prefix.
*
* If in persistent mode: create /var/log/journal and the machine path */
- if (s->storage == STORAGE_PERSISTENT)
- (void) mkdir_parents(s->system_storage.path, 0755);
+ if (m->storage == STORAGE_PERSISTENT)
+ (void) mkdir_parents(m->system_storage.path, 0755);
- (void) mkdir(s->system_storage.path, 0755);
+ (void) mkdir(m->system_storage.path, 0755);
- fn = strjoina(s->system_storage.path, "/system.journal");
- r = server_open_journal(
- s,
+ fn = strjoina(m->system_storage.path, "/system.journal");
+ r = manager_open_journal(
+ m,
/* reliably= */ true,
fn,
O_RDWR|O_CREAT,
- s->seal,
- &s->system_storage.metrics,
- &s->system_journal);
+ m->seal,
+ &m->system_storage.metrics,
+ &m->system_journal);
if (r >= 0) {
- server_add_acls(s->system_journal, 0);
- (void) cache_space_refresh(s, &s->system_storage);
- patch_min_use(&s->system_storage);
+ manager_add_acls(m->system_journal, 0);
+ (void) cache_space_refresh(m, &m->system_storage);
+ patch_min_use(&m->system_storage);
} else {
if (!IN_SET(r, -ENOENT, -EROFS))
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
* journal is back.
*/
if (!flush_requested)
- (void) server_flush_to_var(s, true);
+ (void) manager_flush_to_var(m, true);
}
- if (!s->runtime_journal &&
- (s->storage != STORAGE_NONE)) {
+ if (!m->runtime_journal &&
+ (m->storage != STORAGE_NONE)) {
- fn = strjoina(s->runtime_storage.path, "/system.journal");
+ fn = strjoina(m->runtime_storage.path, "/system.journal");
- if (!s->system_journal || relinquish_requested) {
+ if (!m->system_journal || relinquish_requested) {
/* OK, we really need the runtime journal, so create it if necessary. */
- (void) mkdir_parents(s->runtime_storage.path, 0755);
- (void) mkdir(s->runtime_storage.path, 0750);
+ (void) mkdir_parents(m->runtime_storage.path, 0755);
+ (void) mkdir(m->runtime_storage.path, 0750);
- r = server_open_journal(
- s,
+ r = manager_open_journal(
+ m,
/* reliably= */ true,
fn,
O_RDWR|O_CREAT,
/* seal= */ false,
- &s->runtime_storage.metrics,
- &s->runtime_journal);
+ &m->runtime_storage.metrics,
+ &m->runtime_journal);
if (r < 0)
return log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to open runtime journal: %m");
- } else if (!server_flushed_flag_is_set(s)) {
+ } else if (!manager_flushed_flag_is_set(m)) {
/* Try to open the runtime journal, but only if it already exists, so that we can
* flush it into the system journal */
- r = server_open_journal(
- s,
+ r = manager_open_journal(
+ m,
/* reliably= */ false,
fn,
O_RDWR,
/* seal= */ false,
- &s->runtime_storage.metrics,
- &s->runtime_journal);
+ &m->runtime_storage.metrics,
+ &m->runtime_journal);
if (r < 0) {
if (r != -ENOENT)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
}
}
- if (s->runtime_journal) {
- server_add_acls(s->runtime_journal, 0);
- (void) cache_space_refresh(s, &s->runtime_storage);
- patch_min_use(&s->runtime_storage);
- server_drop_flushed_flag(s);
+ if (m->runtime_journal) {
+ manager_add_acls(m->runtime_journal, 0);
+ (void) cache_space_refresh(m, &m->runtime_storage);
+ patch_min_use(&m->runtime_storage);
+ manager_drop_flushed_flag(m);
}
}
return r;
}
-static int server_find_user_journal(Server *s, uid_t uid, JournalFile **ret) {
+static int manager_find_user_journal(Manager *m, uid_t uid, JournalFile **ret) {
_cleanup_(journal_file_offline_closep) JournalFile *f = NULL;
_cleanup_free_ char *p = NULL;
int r;
assert(!uid_for_system_journal(uid));
- f = ordered_hashmap_get(s->user_journals, UID_TO_PTR(uid));
+ f = ordered_hashmap_get(m->user_journals, UID_TO_PTR(uid));
if (f)
goto found;
- if (asprintf(&p, "%s/user-" UID_FMT ".journal", s->system_storage.path, uid) < 0)
+ if (asprintf(&p, "%s/user-" UID_FMT ".journal", m->system_storage.path, uid) < 0)
return log_oom();
/* Too many open? Then let's close one (or more) */
- while (ordered_hashmap_size(s->user_journals) >= USER_JOURNALS_MAX) {
+ while (ordered_hashmap_size(m->user_journals) >= USER_JOURNALS_MAX) {
JournalFile *first;
- assert_se(first = ordered_hashmap_steal_first(s->user_journals));
+ assert_se(first = ordered_hashmap_steal_first(m->user_journals));
(void) journal_file_offline_close(first);
}
- r = server_open_journal(
- s,
+ r = manager_open_journal(
+ m,
/* reliably= */ true,
p,
O_RDWR|O_CREAT,
- s->seal,
- &s->system_storage.metrics,
+ m->seal,
+ &m->system_storage.metrics,
&f);
if (r < 0)
return r;
- r = ordered_hashmap_put(s->user_journals, UID_TO_PTR(uid), f);
+ r = ordered_hashmap_put(m->user_journals, UID_TO_PTR(uid), f);
if (r < 0)
return r;
- server_add_acls(f, uid);
+ manager_add_acls(f, uid);
found:
*ret = TAKE_PTR(f);
return 0;
}
-static JournalFile* server_find_journal(Server *s, uid_t uid) {
+static JournalFile* manager_find_journal(Manager *m, uid_t uid) {
int r;
- assert(s);
+ assert(m);
/* A rotate that fails to create the new journal (ENOSPC) leaves the rotated journal as NULL. Unless
* we revisit opening, even after space is made available we'll continue to return NULL indefinitely.
* recover from failed rotates (or anything else that's left the journals as NULL).
*
* Fixes https://github.com/systemd/systemd/issues/3968 */
- (void) server_system_journal_open(s, /* flush_requested= */ false, /* relinquish_requested= */ false);
+ (void) manager_system_journal_open(m, /* flush_requested= */ false, /* relinquish_requested= */ false);
/* We split up user logs only on /var, not on /run. If the runtime file is open, we write to it
* exclusively, in order to guarantee proper order as soon as we flush /run to /var and close the
* runtime file. */
- if (s->runtime_journal)
- return s->runtime_journal;
+ if (m->runtime_journal)
+ return m->runtime_journal;
/* If we are not in persistent mode, then we need return NULL immediately rather than opening a
* persistent journal of any sort.
*
* Fixes https://github.com/systemd/systemd/issues/20390 */
- if (!IN_SET(s->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
+ if (!IN_SET(m->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
return NULL;
if (!uid_for_system_journal(uid)) {
JournalFile *f = NULL;
- r = server_find_user_journal(s, uid, &f);
+ r = manager_find_user_journal(m, uid, &f);
if (r >= 0)
return ASSERT_PTR(f);
log_warning_errno(r, "Failed to open user journal file, falling back to system journal: %m");
}
- return s->system_journal;
+ return m->system_journal;
}
-static int server_do_rotate(
- Server *s,
+static int manager_do_rotate(
+ Manager *m,
JournalFile **f,
const char* name,
bool seal,
JournalFileFlags file_flags;
int r;
- assert(s);
+ assert(m);
if (!*f)
return -EINVAL;
file_flags =
- (s->compress.enabled ? JOURNAL_COMPRESS : 0)|
+ (m->compress.enabled ? JOURNAL_COMPRESS : 0)|
(seal ? JOURNAL_SEAL : 0) |
JOURNAL_STRICT_ORDER;
- r = journal_file_rotate(f, s->mmap, file_flags, s->compress.threshold_bytes, s->deferred_closes);
+ r = journal_file_rotate(f, m->mmap, file_flags, m->compress.threshold_bytes, m->deferred_closes);
if (r < 0) {
if (*f)
return log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to create new %s journal: %m", name);
}
- server_add_acls(*f, uid);
+ manager_add_acls(*f, uid);
return r;
}
-static void server_process_deferred_closes(Server *s) {
+static void manager_process_deferred_closes(Manager *m) {
JournalFile *f;
/* Perform any deferred closes which aren't still offlining. */
- SET_FOREACH(f, s->deferred_closes) {
+ SET_FOREACH(f, m->deferred_closes) {
if (journal_file_is_offlining(f))
continue;
- (void) set_remove(s->deferred_closes, f);
+ (void) set_remove(m->deferred_closes, f);
(void) journal_file_offline_close(f);
}
}
-static void server_vacuum_deferred_closes(Server *s) {
- assert(s);
+static void manager_vacuum_deferred_closes(Manager *m) {
+ assert(m);
/* Make some room in the deferred closes list, so that it doesn't grow without bounds */
- if (set_size(s->deferred_closes) < DEFERRED_CLOSES_MAX)
+ if (set_size(m->deferred_closes) < DEFERRED_CLOSES_MAX)
return;
/* Let's first remove all journal files that might already have completed closing */
- server_process_deferred_closes(s);
+ manager_process_deferred_closes(m);
/* And now, let's close some more until we reach the limit again. */
- while (set_size(s->deferred_closes) >= DEFERRED_CLOSES_MAX) {
+ while (set_size(m->deferred_closes) >= DEFERRED_CLOSES_MAX) {
JournalFile *f;
- assert_se(f = set_steal_first(s->deferred_closes));
+ assert_se(f = set_steal_first(m->deferred_closes));
journal_file_offline_close(f);
}
}
-static int server_archive_offline_user_journals(Server *s) {
+static int manager_archive_offline_user_journals(Manager *m) {
_cleanup_closedir_ DIR *d = NULL;
int r;
- assert(s);
+ assert(m);
- d = opendir(s->system_storage.path);
+ d = opendir(m->system_storage.path);
if (!d) {
if (errno == ENOENT)
return 0;
return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT,
- "Failed to open %s: %m", s->system_storage.path);
+ "Failed to open %s: %m", m->system_storage.path);
}
for (;;) {
if (errno != 0)
log_ratelimit_warning_errno(errno, JOURNAL_LOG_RATELIMIT,
"Failed to enumerate %s, ignoring: %m",
- s->system_storage.path);
+ m->system_storage.path);
break;
}
}
/* Already rotated in the above loop? i.e. is it an open user journal? */
- if (ordered_hashmap_contains(s->user_journals, UID_TO_PTR(uid)))
+ if (ordered_hashmap_contains(m->user_journals, UID_TO_PTR(uid)))
continue;
- full = path_join(s->system_storage.path, de->d_name);
+ full = path_join(m->system_storage.path, de->d_name);
if (!full)
return log_oom();
}
/* Make some room in the set of deferred close()s */
- server_vacuum_deferred_closes(s);
+ manager_vacuum_deferred_closes(m);
/* Open the file briefly, so that we can archive it */
r = journal_file_open(
fd,
full,
O_RDWR,
- (s->compress.enabled ? JOURNAL_COMPRESS : 0) |
- (s->seal ? JOURNAL_SEAL : 0), /* strict order does not matter here */
+ (m->compress.enabled ? JOURNAL_COMPRESS : 0) |
+ (m->seal ? JOURNAL_SEAL : 0), /* strict order does not matter here */
0640,
- s->compress.threshold_bytes,
- &s->system_storage.metrics,
- s->mmap,
+ m->compress.threshold_bytes,
+ &m->system_storage.metrics,
+ m->mmap,
/* template= */ NULL,
&f);
if (r < 0) {
if (r < 0)
log_debug_errno(r, "Failed to archive journal file '%s', ignoring: %m", full);
- journal_file_initiate_close(TAKE_PTR(f), s->deferred_closes);
+ journal_file_initiate_close(TAKE_PTR(f), m->deferred_closes);
}
return 0;
}
-void server_rotate(Server *s) {
+void manager_rotate(Manager *m) {
JournalFile *f;
void *k;
int r;
log_debug("Rotating...");
/* First, rotate the system journal (either in its runtime flavour or in its runtime flavour) */
- (void) server_do_rotate(s, &s->runtime_journal, "runtime", /* seal= */ false, /* uid= */ 0);
- (void) server_do_rotate(s, &s->system_journal, "system", s->seal, /* uid= */ 0);
+ (void) manager_do_rotate(m, &m->runtime_journal, "runtime", /* seal= */ false, /* uid= */ 0);
+ (void) manager_do_rotate(m, &m->system_journal, "system", m->seal, /* uid= */ 0);
/* Then, rotate all user journals we have open (keeping them open) */
- ORDERED_HASHMAP_FOREACH_KEY(f, k, s->user_journals) {
- r = server_do_rotate(s, &f, "user", s->seal, PTR_TO_UID(k));
+ ORDERED_HASHMAP_FOREACH_KEY(f, k, m->user_journals) {
+ r = manager_do_rotate(m, &f, "user", m->seal, PTR_TO_UID(k));
if (r >= 0)
- ordered_hashmap_replace(s->user_journals, k, f);
+ ordered_hashmap_replace(m->user_journals, k, f);
else if (!f)
/* Old file has been closed and deallocated */
- ordered_hashmap_remove(s->user_journals, k);
+ ordered_hashmap_remove(m->user_journals, k);
}
/* Finally, also rotate all user journals we currently do not have open. (But do so only if we
* actually have access to /var, i.e. are not in the log-to-runtime-journal mode). */
- if (!s->runtime_journal)
- (void) server_archive_offline_user_journals(s);
+ if (!m->runtime_journal)
+ (void) manager_archive_offline_user_journals(m);
- server_process_deferred_closes(s);
+ manager_process_deferred_closes(m);
}
-static void server_rotate_journal(Server *s, JournalFile *f, uid_t uid) {
+static void manager_rotate_journal(Manager *m, JournalFile *f, uid_t uid) {
int r;
- assert(s);
+ assert(m);
assert(f);
- /* This is similar to server_rotate(), but rotates only specified journal file.
+ /* This is similar to manager_rotate(), but rotates only specified journal file.
*
* 💣💣💣 This invalidate 'f', and the caller cannot reuse the passed JournalFile object. 💣💣💣 */
- if (f == s->system_journal)
- (void) server_do_rotate(s, &s->system_journal, "system", s->seal, /* uid= */ 0);
- else if (f == s->runtime_journal)
- (void) server_do_rotate(s, &s->runtime_journal, "runtime", /* seal= */ false, /* uid= */ 0);
+ if (f == m->system_journal)
+ (void) manager_do_rotate(m, &m->system_journal, "system", m->seal, /* uid= */ 0);
+ else if (f == m->runtime_journal)
+ (void) manager_do_rotate(m, &m->runtime_journal, "runtime", /* seal= */ false, /* uid= */ 0);
else {
- assert(ordered_hashmap_get(s->user_journals, UID_TO_PTR(uid)) == f);
- r = server_do_rotate(s, &f, "user", s->seal, uid);
+ assert(ordered_hashmap_get(m->user_journals, UID_TO_PTR(uid)) == f);
+ r = manager_do_rotate(m, &f, "user", m->seal, uid);
if (r >= 0)
- ordered_hashmap_replace(s->user_journals, UID_TO_PTR(uid), f);
+ ordered_hashmap_replace(m->user_journals, UID_TO_PTR(uid), f);
else if (!f)
/* Old file has been closed and deallocated */
- ordered_hashmap_remove(s->user_journals, UID_TO_PTR(uid));
+ ordered_hashmap_remove(m->user_journals, UID_TO_PTR(uid));
}
- server_process_deferred_closes(s);
+ manager_process_deferred_closes(m);
}
-static void server_sync(Server *s, bool wait) {
+static void manager_sync(Manager *m, bool wait) {
JournalFile *f;
int r;
- if (s->system_journal) {
- r = journal_file_set_offline(s->system_journal, wait);
+ if (m->system_journal) {
+ r = journal_file_set_offline(m->system_journal, wait);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to sync system journal, ignoring: %m");
}
- ORDERED_HASHMAP_FOREACH(f, s->user_journals) {
+ ORDERED_HASHMAP_FOREACH(f, m->user_journals) {
r = journal_file_set_offline(f, wait);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to sync user journal, ignoring: %m");
}
- r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_OFF);
+ r = sd_event_source_set_enabled(m->sync_event_source, SD_EVENT_OFF);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to disable sync timer source, ignoring: %m");
- s->sync_scheduled = false;
+ m->sync_scheduled = false;
}
-static void server_do_vacuum(Server *s, JournalStorage *storage, bool verbose) {
+static void manager_do_vacuum(Manager *m, JournalStorage *storage, bool verbose) {
int r;
- assert(s);
+ assert(m);
assert(storage);
- (void) cache_space_refresh(s, storage);
+ (void) cache_space_refresh(m, storage);
if (verbose)
- server_space_usage_message(s, storage);
+ manager_space_usage_message(m, storage);
r = journal_directory_vacuum(storage->path, storage->space.limit,
- storage->metrics.n_max_files, s->max_retention_usec,
- &s->oldest_file_usec, verbose);
+ storage->metrics.n_max_files, m->max_retention_usec,
+ &m->oldest_file_usec, verbose);
if (r < 0 && r != -ENOENT)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to vacuum %s, ignoring: %m", storage->path);
cache_space_invalidate(&storage->space);
}
-void server_vacuum(Server *s, bool verbose) {
- assert(s);
+void manager_vacuum(Manager *m, bool verbose) {
+ assert(m);
log_debug("Vacuuming...");
- s->oldest_file_usec = 0;
+ m->oldest_file_usec = 0;
- if (s->system_journal)
- server_do_vacuum(s, &s->system_storage, verbose);
- if (s->runtime_journal)
- server_do_vacuum(s, &s->runtime_storage, verbose);
+ if (m->system_journal)
+ manager_do_vacuum(m, &m->system_storage, verbose);
+ if (m->runtime_journal)
+ manager_do_vacuum(m, &m->runtime_storage, verbose);
}
-static void server_cache_machine_id(Server *s) {
+static void manager_cache_machine_id(Manager *m) {
sd_id128_t id;
int r;
- assert(s);
+ assert(m);
r = sd_id128_get_machine(&id);
if (r < 0)
return;
- sd_id128_to_string(id, stpcpy(s->machine_id_field, "_MACHINE_ID="));
+ sd_id128_to_string(id, stpcpy(m->machine_id_field, "_MACHINE_ID="));
}
-static void server_cache_boot_id(Server *s) {
+static void manager_cache_boot_id(Manager *m) {
sd_id128_t id;
int r;
- assert(s);
+ assert(m);
r = sd_id128_get_boot(&id);
if (r < 0)
return;
- sd_id128_to_string(id, stpcpy(s->boot_id_field, "_BOOT_ID="));
+ sd_id128_to_string(id, stpcpy(m->boot_id_field, "_BOOT_ID="));
}
-static void server_cache_hostname(Server *s) {
+static void manager_cache_hostname(Manager *m) {
_cleanup_free_ char *t = NULL;
char *x;
- assert(s);
+ assert(m);
t = gethostname_malloc();
if (!t)
if (!x)
return;
- free_and_replace(s->hostname_field, x);
+ free_and_replace(m->hostname_field, x);
}
static bool shall_try_append_again(JournalFile *f, int r) {
case -EROFS: /* Read-only file system */
/* When appending an entry fails if shall_try_append_again returns true, the journal is
- * rotated. If the FS is read-only, rotation will fail and s->system_journal will be set to
- * NULL. After that, when find_journal will try to open the journal since s->system_journal
+ * rotated. If the FS is read-only, rotation will fail and m->system_journal will be set to
+ * NULL. After that, when find_journal will try to open the journal since m->system_journal
* will be NULL, it will open the runtime journal. */
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Read-only file system, rotating.", f->path);
return true;
}
}
-static void server_write_to_journal(
- Server *s,
+static void manager_write_to_journal(
+ Manager *m,
uid_t uid,
const struct iovec *iovec,
size_t n,
JournalFile *f;
int r;
- assert(s);
+ assert(m);
assert(iovec);
assert(n > 0);
assert(ts);
- if (ts->realtime < s->last_realtime_clock) {
+ if (ts->realtime < m->last_realtime_clock) {
/* When the time jumps backwards, let's immediately rotate. Of course, this should not happen during
* regular operation. However, when it does happen, then we should make sure that we start fresh files
* to ensure that the entries in the journal files are strictly ordered by time, in order to ensure
* bisection works correctly. */
log_ratelimit_info(JOURNAL_LOG_RATELIMIT, "Time jumped backwards, rotating.");
- server_rotate(s);
- server_vacuum(s, /* verbose = */ false);
+ manager_rotate(m);
+ manager_vacuum(m, /* verbose = */ false);
vacuumed = true;
}
- f = server_find_journal(s, uid);
+ f = manager_find_journal(m, uid);
if (!f)
return;
- if (journal_file_rotate_suggested(f, s->max_file_usec, LOG_DEBUG)) {
+ if (journal_file_rotate_suggested(f, m->max_file_usec, LOG_DEBUG)) {
if (vacuumed) {
log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
"Suppressing rotation, as we already rotated immediately before write attempt. Giving up.");
log_debug("%s: Journal header limits reached or header out-of-date, rotating.", f->path);
- server_rotate_journal(s, TAKE_PTR(f), uid);
- server_vacuum(s, /* verbose = */ false);
+ manager_rotate_journal(m, TAKE_PTR(f), uid);
+ manager_vacuum(m, /* verbose = */ false);
vacuumed = true;
- f = server_find_journal(s, uid);
+ f = manager_find_journal(m, uid);
if (!f)
return;
}
- s->last_realtime_clock = ts->realtime;
+ m->last_realtime_clock = ts->realtime;
r = journal_file_append_entry(
f,
ts,
/* boot_id= */ NULL,
iovec, n,
- &s->seqnum->seqnum,
- &s->seqnum->id,
+ &m->seqnum->seqnum,
+ &m->seqnum->id,
/* ret_object= */ NULL,
/* ret_offset= */ NULL);
if (r >= 0) {
- server_schedule_sync(s, priority);
+ manager_schedule_sync(m, priority);
return;
}
return;
}
- server_rotate_journal(s, TAKE_PTR(f), uid);
- server_vacuum(s, /* verbose = */ false);
+ manager_rotate_journal(m, TAKE_PTR(f), uid);
+ manager_vacuum(m, /* verbose = */ false);
- f = server_find_journal(s, uid);
+ f = manager_find_journal(m, uid);
if (!f)
return;
ts,
/* boot_id= */ NULL,
iovec, n,
- &s->seqnum->seqnum,
- &s->seqnum->id,
+ &m->seqnum->seqnum,
+ &m->seqnum->id,
/* ret_object= */ NULL,
/* ret_offset= */ NULL);
if (r < 0)
"Failed to write entry to %s (%zu items, %zu bytes) despite vacuuming, ignoring: %m",
f->path, n, iovec_total_size(iovec, n));
else
- server_schedule_sync(s, priority);
+ manager_schedule_sync(m, priority);
}
#define IOVEC_ADD_NUMERIC_FIELD(iovec, n, value, type, isset, format, field) \
iovec[n++] = IOVEC_MAKE_STRING(k); \
}
-static void server_dispatch_message_real(
- Server *s,
- struct iovec *iovec, size_t n, size_t m,
+static void manager_dispatch_message_real(
+ Manager *m,
+ struct iovec *iovec, size_t n, size_t mm,
const ClientContext *c,
const struct timeval *tv,
int priority,
uid_t journal_uid;
ClientContext *o;
- assert(s);
+ assert(m);
assert(iovec);
assert(n > 0);
assert(n +
N_IOVEC_META_FIELDS +
(pid_is_valid(object_pid) ? N_IOVEC_OBJECT_FIELDS : 0) +
- client_context_extra_fields_n_iovec(c) <= m);
+ client_context_extra_fields_n_iovec(c) <= mm);
if (c) {
IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->pid, pid_t, pid_is_valid, PID_FMT, "_PID");
}
}
- assert(n <= m);
+ assert(n <= mm);
- if (pid_is_valid(object_pid) && client_context_get(s, object_pid, NULL, NULL, 0, NULL, &o) >= 0) {
+ if (pid_is_valid(object_pid) && client_context_get(m, object_pid, NULL, NULL, 0, NULL, &o) >= 0) {
IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->pid, pid_t, pid_is_valid, PID_FMT, "OBJECT_PID");
IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_UID");
IOVEC_ADD_ID128_FIELD(iovec, n, o->invocation_id, "OBJECT_SYSTEMD_INVOCATION_ID");
}
- assert(n <= m);
+ assert(n <= mm);
if (tv) {
xsprintf(source_time, "_SOURCE_REALTIME_TIMESTAMP=" USEC_FMT, timeval_load(tv));
/* Note that strictly speaking storing the boot id here is
* redundant since the entry includes this in-line
* anyway. However, we need this indexed, too. */
- if (!isempty(s->boot_id_field))
- iovec[n++] = IOVEC_MAKE_STRING(s->boot_id_field);
+ if (!isempty(m->boot_id_field))
+ iovec[n++] = IOVEC_MAKE_STRING(m->boot_id_field);
- if (!isempty(s->machine_id_field))
- iovec[n++] = IOVEC_MAKE_STRING(s->machine_id_field);
+ if (!isempty(m->machine_id_field))
+ iovec[n++] = IOVEC_MAKE_STRING(m->machine_id_field);
- if (!isempty(s->hostname_field))
- iovec[n++] = IOVEC_MAKE_STRING(s->hostname_field);
+ if (!isempty(m->hostname_field))
+ iovec[n++] = IOVEC_MAKE_STRING(m->hostname_field);
- if (!isempty(s->namespace_field))
- iovec[n++] = IOVEC_MAKE_STRING(s->namespace_field);
+ if (!isempty(m->namespace_field))
+ iovec[n++] = IOVEC_MAKE_STRING(m->namespace_field);
iovec[n++] = in_initrd() ? IOVEC_MAKE_STRING("_RUNTIME_SCOPE=initrd") : IOVEC_MAKE_STRING("_RUNTIME_SCOPE=system");
- assert(n <= m);
+ assert(n <= mm);
- if (s->split_mode == SPLIT_UID && c && uid_is_valid(c->uid))
+ if (m->split_mode == SPLIT_UID && c && uid_is_valid(c->uid))
/* Split up strictly by (non-root) UID */
journal_uid = c->uid;
- else if (s->split_mode == SPLIT_LOGIN && c && c->uid > 0 && uid_is_valid(c->owner_uid))
+ else if (m->split_mode == SPLIT_LOGIN && c && c->uid > 0 && uid_is_valid(c->owner_uid))
/* Split up by login UIDs. We do this only if the
* realuid is not root, in order not to accidentally
* leak privileged information to the user that is
* not use the source time, and not even the time the event was originally seen, but instead simply
* the time we started processing it, as we want strictly linear ordering in what we write out.) */
struct dual_timestamp ts;
- event_dual_timestamp_now(s->event, &ts);
+ event_dual_timestamp_now(m->event, &ts);
- (void) server_forward_socket(s, iovec, n, &ts, priority);
+ (void) manager_forward_socket(m, iovec, n, &ts, priority);
- server_write_to_journal(s, journal_uid, iovec, n, &ts, priority);
+ manager_write_to_journal(m, journal_uid, iovec, n, &ts, priority);
}
-void server_driver_message_internal(Server *s, pid_t object_pid, const char *format, ...) {
+void manager_driver_message_internal(Manager *m, pid_t object_pid, const char *format, ...) {
struct iovec *iovec;
- size_t n = 0, k, m;
+ size_t n = 0, k, mm;
va_list ap;
int r;
- assert(s);
+ assert(m);
assert(format);
- m = N_IOVEC_META_FIELDS + 5 + N_IOVEC_PAYLOAD_FIELDS + client_context_extra_fields_n_iovec(s->my_context) + N_IOVEC_OBJECT_FIELDS;
- iovec = newa(struct iovec, m);
+ mm = N_IOVEC_META_FIELDS + 5 + N_IOVEC_PAYLOAD_FIELDS + client_context_extra_fields_n_iovec(m->my_context) + N_IOVEC_OBJECT_FIELDS;
+ iovec = newa(struct iovec, mm);
assert_cc(3 == LOG_FAC(LOG_DAEMON));
iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_FACILITY=3");
va_start(ap, format);
DISABLE_WARNING_FORMAT_NONLITERAL;
- r = log_format_iovec(iovec, m, &n, false, 0, format, ap);
+ r = log_format_iovec(iovec, mm, &n, false, 0, format, ap);
REENABLE_WARNING;
/* Error handling below */
va_end(ap);
if (r >= 0)
- server_dispatch_message_real(s, iovec, n, m, s->my_context, /* tv= */ NULL, LOG_INFO, object_pid);
+ manager_dispatch_message_real(m, iovec, n, mm, m->my_context, /* tv= */ NULL, LOG_INFO, object_pid);
while (k < n)
free(iovec[k++].iov_base);
n = 3;
iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=4");
iovec[n++] = IOVEC_MAKE_STRING(buf);
- server_dispatch_message_real(s, iovec, n, m, s->my_context, /* tv= */ NULL, LOG_INFO, object_pid);
+ manager_dispatch_message_real(m, iovec, n, mm, m->my_context, /* tv= */ NULL, LOG_INFO, object_pid);
}
}
-void server_dispatch_message(
- Server *s,
- struct iovec *iovec, size_t n, size_t m,
+void manager_dispatch_message(
+ Manager *m,
+ struct iovec *iovec, size_t n, size_t mm,
ClientContext *c,
const struct timeval *tv,
int priority,
uint64_t available = 0;
int rl;
- assert(s);
+ assert(m);
assert(iovec || n == 0);
if (n == 0)
return;
- if (LOG_PRI(priority) > s->max_level_store)
+ if (LOG_PRI(priority) > m->max_level_store)
return;
/* Stop early in case the information will not be stored
* in a journal. */
- if (s->storage == STORAGE_NONE)
+ if (m->storage == STORAGE_NONE)
return;
if (c && c->unit) {
- (void) server_determine_space(s, &available, /* limit= */ NULL);
+ (void) manager_determine_space(m, &available, /* limit= */ NULL);
rl = journal_ratelimit_test(
- &s->ratelimit_groups_by_id,
+ &m->ratelimit_groups_by_id,
c->unit,
c->log_ratelimit_interval,
c->log_ratelimit_burst,
/* Write a suppression message if we suppressed something */
if (rl > 1)
- server_driver_message(s, c->pid,
+ manager_driver_message(m, c->pid,
LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_DROPPED_STR),
LOG_MESSAGE("Suppressed %i messages from %s", rl - 1, c->unit),
LOG_ITEM("N_DROPPED=%i", rl - 1));
}
- server_dispatch_message_real(s, iovec, n, m, c, tv, priority, object_pid);
+ manager_dispatch_message_real(m, iovec, n, mm, c, tv, priority, object_pid);
}
-int server_flush_to_var(Server *s, bool require_flag_file) {
+int manager_flush_to_var(Manager *m, bool require_flag_file) {
sd_journal *j = NULL;
const char *fn;
unsigned n = 0;
usec_t start;
int r, k;
- assert(s);
+ assert(m);
- if (!IN_SET(s->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
+ if (!IN_SET(m->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
return 0;
- if (s->namespace) /* Flushing concept does not exist for namespace instances */
+ if (m->namespace) /* Flushing concept does not exist for namespace instances */
return 0;
- if (!s->runtime_journal) /* Nothing to flush? */
+ if (!m->runtime_journal) /* Nothing to flush? */
return 0;
- if (require_flag_file && !server_flushed_flag_is_set(s))
+ if (require_flag_file && !manager_flushed_flag_is_set(m))
return 0;
- (void) server_system_journal_open(s, /* flush_requested=*/ true, /* relinquish_requested= */ false);
+ (void) manager_system_journal_open(m, /* flush_requested=*/ true, /* relinquish_requested= */ false);
- if (!s->system_journal)
+ if (!m->system_journal)
return 0;
/* Offline and close the 'main' runtime journal file to allow the runtime journal to be opened with
* the SD_JOURNAL_ASSUME_IMMUTABLE flag in the below. */
- s->runtime_journal = journal_file_offline_close(s->runtime_journal);
+ m->runtime_journal = journal_file_offline_close(m->runtime_journal);
/* Reset current seqnum data to avoid unnecessary rotation when switching to system journal.
* See issue #30092. */
- zero(*s->seqnum);
+ zero(*m->seqnum);
- log_debug("Flushing to %s...", s->system_storage.path);
+ log_debug("Flushing to %s...", m->system_storage.path);
start = now(CLOCK_MONOTONIC);
r = journal_file_copy_entry(
f,
- s->system_journal,
+ m->system_journal,
o,
f->current_offset,
- &s->seqnum->seqnum,
- &s->seqnum->id);
+ &m->seqnum->seqnum,
+ &m->seqnum->id);
if (r >= 0)
continue;
- if (!shall_try_append_again(s->system_journal, r)) {
+ if (!shall_try_append_again(m->system_journal, r)) {
log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "Can't write entry: %m");
goto finish;
}
log_ratelimit_info(JOURNAL_LOG_RATELIMIT, "Rotating system journal.");
- server_rotate_journal(s, s->system_journal, /* uid = */ 0);
- server_vacuum(s, /* verbose = */ false);
+ manager_rotate_journal(m, m->system_journal, /* uid = */ 0);
+ manager_vacuum(m, /* verbose = */ false);
- if (!s->system_journal) {
+ if (!m->system_journal) {
log_ratelimit_notice(JOURNAL_LOG_RATELIMIT,
"Didn't flush runtime journal since rotation of system journal wasn't successful.");
r = -EIO;
log_debug("Retrying write.");
r = journal_file_copy_entry(
f,
- s->system_journal,
+ m->system_journal,
o,
f->current_offset,
- &s->seqnum->seqnum,
- &s->seqnum->id);
+ &m->seqnum->seqnum,
+ &m->seqnum->id);
if (r < 0) {
log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "Can't write entry: %m");
goto finish;
r = 0;
finish:
- if (s->system_journal)
- journal_file_post_change(s->system_journal);
+ if (m->system_journal)
+ journal_file_post_change(m->system_journal);
/* Save parent directories of runtime journals before closing runtime journals. */
_cleanup_strv_free_ char **dirs = NULL;
/* Remove the runtime directory if the all entries are successfully flushed to /var/. */
if (r >= 0) {
- r = rm_rf(s->runtime_storage.path, REMOVE_ROOT);
+ r = rm_rf(m->runtime_storage.path, REMOVE_ROOT);
if (r < 0)
- log_debug_errno(r, "Failed to remove runtime journal directory %s, ignoring: %m", s->runtime_storage.path);
+ log_debug_errno(r, "Failed to remove runtime journal directory %s, ignoring: %m", m->runtime_storage.path);
else
- log_debug("Removed runtime journal directory %s.", s->runtime_storage.path);
+ log_debug("Removed runtime journal directory %s.", m->runtime_storage.path);
/* The initrd may have a different machine ID from the host's one. Typically, that happens
* when our tests running on qemu, as the host's initrd is picked as is without updating
}
}
- server_driver_message(s, 0,
+ manager_driver_message(m, 0,
LOG_MESSAGE("Time spent on flushing to %s is %s for %u entries.",
- s->system_storage.path,
+ m->system_storage.path,
FORMAT_TIMESPAN(usec_sub_unsigned(now(CLOCK_MONOTONIC), start), 0),
n));
- fn = strjoina(s->runtime_directory, "/flushed");
+ fn = strjoina(m->runtime_directory, "/flushed");
k = touch(fn);
if (k < 0)
log_ratelimit_warning_errno(k, JOURNAL_LOG_RATELIMIT,
"Failed to touch %s, ignoring: %m", fn);
- server_refresh_idle_timer(s);
+ manager_refresh_idle_timer(m);
return r;
}
-int server_relinquish_var(Server *s) {
- assert(s);
+int manager_relinquish_var(Manager *m) {
+ assert(m);
- if (s->storage == STORAGE_NONE)
+ if (m->storage == STORAGE_NONE)
return 0;
- if (s->namespace) /* Concept does not exist for namespaced instances */
+ if (m->namespace) /* Concept does not exist for namespaced instances */
return -EOPNOTSUPP;
- if (s->runtime_journal && !s->system_journal)
+ if (m->runtime_journal && !m->system_journal)
return 0;
- log_debug("Relinquishing %s...", s->system_storage.path);
+ log_debug("Relinquishing %s...", m->system_storage.path);
- (void) server_system_journal_open(s, /* flush_requested */ false, /* relinquish_requested=*/ true);
+ (void) manager_system_journal_open(m, /* flush_requested */ false, /* relinquish_requested=*/ true);
- s->system_journal = journal_file_offline_close(s->system_journal);
- ordered_hashmap_clear(s->user_journals);
- set_clear(s->deferred_closes);
+ m->system_journal = journal_file_offline_close(m->system_journal);
+ ordered_hashmap_clear(m->user_journals);
+ set_clear(m->deferred_closes);
- server_refresh_idle_timer(s);
+ manager_refresh_idle_timer(m);
return 0;
}
-int server_process_datagram(
+int manager_process_datagram(
sd_event_source *es,
int fd,
uint32_t revents,
void *userdata) {
- size_t label_len = 0, m;
- Server *s = ASSERT_PTR(userdata);
+ size_t label_len = 0, mm;
+ Manager *m = ASSERT_PTR(userdata);
struct ucred *ucred = NULL;
struct timeval tv_buf, *tv = NULL;
struct cmsghdr *cmsg;
.msg_namelen = sizeof(sa),
};
- assert(fd == s->native_fd || fd == s->syslog_fd || fd == s->audit_fd);
+ assert(fd == m->native_fd || fd == m->syslog_fd || fd == m->audit_fd);
if (revents != EPOLLIN)
return log_error_errno(SYNTHETIC_ERRNO(EIO),
(void) ioctl(fd, SIOCINQ, &v);
/* Fix it up, if it is too small. We use the same fixed value as auditd here. Awful! */
- m = PAGE_ALIGN(MAX3((size_t) v + 1,
+ mm = PAGE_ALIGN(MAX3((size_t) v + 1,
(size_t) LINE_MAX,
ALIGN(sizeof(struct nlmsghdr)) + ALIGN((size_t) MAX_AUDIT_MESSAGE_LENGTH)) + 1);
- if (!GREEDY_REALLOC(s->buffer, m))
+ if (!GREEDY_REALLOC(m->buffer, mm))
return log_oom();
- iovec = IOVEC_MAKE(s->buffer, MALLOC_ELEMENTSOF(s->buffer) - 1); /* Leave room for trailing NUL we add later */
+ iovec = IOVEC_MAKE(m->buffer, MALLOC_ELEMENTSOF(m->buffer) - 1); /* Leave room for trailing NUL we add later */
n = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC);
if (ERRNO_IS_NEG_TRANSIENT(n))
}
/* And a trailing NUL, just in case */
- s->buffer[n] = 0;
+ m->buffer[n] = 0;
- if (fd == s->syslog_fd) {
+ if (fd == m->syslog_fd) {
if (n > 0 && n_fds == 0)
- server_process_syslog_message(s, s->buffer, n, ucred, tv, label, label_len);
+ manager_process_syslog_message(m, m->buffer, n, ucred, tv, label, label_len);
else if (n_fds > 0)
log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
"Got file descriptors via syslog socket. Ignoring.");
if (tv)
- s->syslog_timestamp = timeval_load(tv);
+ m->syslog_timestamp = timeval_load(tv);
- } else if (fd == s->native_fd) {
+ } else if (fd == m->native_fd) {
if (n > 0 && n_fds == 0)
- server_process_native_message(s, s->buffer, n, ucred, tv, label, label_len);
+ manager_process_native_message(m, m->buffer, n, ucred, tv, label, label_len);
else if (n == 0 && n_fds == 1)
- (void) server_process_native_file(s, fds[0], ucred, tv, label, label_len);
+ (void) manager_process_native_file(m, fds[0], ucred, tv, label, label_len);
else if (n_fds > 0)
log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
"Got too many file descriptors via native socket. Ignoring.");
if (tv)
- s->native_timestamp = timeval_load(tv);
+ m->native_timestamp = timeval_load(tv);
} else {
- assert(fd == s->audit_fd);
+ assert(fd == m->audit_fd);
if (n > 0 && n_fds == 0)
- server_process_audit_message(s, s->buffer, n, ucred, &sa, msghdr.msg_namelen);
+ manager_process_audit_message(m, m->buffer, n, ucred, &sa, msghdr.msg_namelen);
else if (n_fds > 0)
log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
"Got file descriptors via audit socket. Ignoring.");
close_many(fds, n_fds);
if (tv)
- sync_req_revalidate_by_timestamp(s);
+ sync_req_revalidate_by_timestamp(m);
- server_refresh_idle_timer(s);
+ manager_refresh_idle_timer(m);
return 0;
}
-void server_full_flush(Server *s) {
- assert(s);
+void manager_full_flush(Manager *m) {
+ assert(m);
- (void) server_flush_to_var(s, false);
- server_sync(s, /* wait = */ false);
- server_vacuum(s, false);
+ (void) manager_flush_to_var(m, false);
+ manager_sync(m, /* wait = */ false);
+ manager_vacuum(m, false);
- server_space_usage_message(s, NULL);
+ manager_space_usage_message(m, NULL);
- server_refresh_idle_timer(s);
+ manager_refresh_idle_timer(m);
}
static int dispatch_sigusr1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
- if (s->namespace) {
+ if (m->namespace) {
log_error("Received SIGUSR1 signal from PID %u, but flushing runtime journals not supported for namespaced instances.", si->ssi_pid);
return 0;
}
log_info("Received SIGUSR1 signal from PID %u, as request to flush runtime journal.", si->ssi_pid);
- server_full_flush(s);
+ manager_full_flush(m);
return 0;
}
-void server_full_rotate(Server *s) {
+void manager_full_rotate(Manager *m) {
const char *fn;
int r;
- assert(s);
+ assert(m);
- server_rotate(s);
- server_vacuum(s, true);
+ manager_rotate(m);
+ manager_vacuum(m, true);
- if (s->system_journal)
- patch_min_use(&s->system_storage);
- if (s->runtime_journal)
- patch_min_use(&s->runtime_storage);
+ if (m->system_journal)
+ patch_min_use(&m->system_storage);
+ if (m->runtime_journal)
+ patch_min_use(&m->runtime_storage);
/* Let clients know when the most recent rotation happened. */
- fn = strjoina(s->runtime_directory, "/rotated");
+ fn = strjoina(m->runtime_directory, "/rotated");
r = write_timestamp_file_atomic(fn, now(CLOCK_MONOTONIC));
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
}
static int dispatch_sigusr2(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
log_info("Received SIGUSR2 signal from PID %u, as request to rotate journal, rotating.", si->ssi_pid);
- server_full_rotate(s);
+ manager_full_rotate(m);
return 0;
}
static int dispatch_sigterm(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
_cleanup_(sd_event_source_disable_unrefp) sd_event_source *news = NULL;
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
log_received_signal(LOG_INFO, si);
* start up next – unless we are going down for the final system shutdown, in which case everything
* is lost. */
- r = sd_event_add_defer(s->event, &news, NULL, NULL); /* NULL handler means → exit when triggered */
+ r = sd_event_add_defer(m->event, &news, NULL, NULL); /* NULL handler means → exit when triggered */
if (r < 0) {
log_error_errno(r, "Failed to allocate exit idle event handler: %m");
goto fail;
news = sd_event_source_unref(news);
- r = sd_event_add_time_relative(s->event, &news, CLOCK_MONOTONIC, 10 * USEC_PER_SEC, 0, NULL, NULL);
+ r = sd_event_add_time_relative(m->event, &news, CLOCK_MONOTONIC, 10 * USEC_PER_SEC, 0, NULL, NULL);
if (r < 0) {
log_error_errno(r, "Failed to allocate exit timeout event handler: %m");
goto fail;
return 0;
fail:
- sd_event_exit(s->event, 0);
+ sd_event_exit(m->event, 0);
return 0;
}
-void server_full_sync(Server *s, bool wait) {
+void manager_full_sync(Manager *m, bool wait) {
const char *fn;
int r;
- assert(s);
+ assert(m);
- server_sync(s, wait);
+ manager_sync(m, wait);
/* Let clients know when the most recent sync happened. */
- fn = strjoina(s->runtime_directory, "/synced");
+ fn = strjoina(m->runtime_directory, "/synced");
r = write_timestamp_file_atomic(fn, now(CLOCK_MONOTONIC));
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
}
static int dispatch_sigrtmin1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
log_debug("Received SIGRTMIN1 signal from PID %u, as request to sync.", si->ssi_pid);
- server_full_sync(s, /* wait = */ false);
+ manager_full_sync(m, /* wait = */ false);
return 0;
}
-static int server_setup_signals(Server *s) {
+static int manager_setup_signals(Manager *m) {
int r;
- assert(s);
+ assert(m);
- r = sd_event_add_signal(s->event, &s->sigusr1_event_source, SIGUSR1|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigusr1, s);
+ r = sd_event_add_signal(m->event, &m->sigusr1_event_source, SIGUSR1|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigusr1, m);
if (r < 0)
return r;
- r = sd_event_add_signal(s->event, &s->sigusr2_event_source, SIGUSR2|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigusr2, s);
+ r = sd_event_add_signal(m->event, &m->sigusr2_event_source, SIGUSR2|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigusr2, m);
if (r < 0)
return r;
- r = sd_event_add_signal(s->event, &s->sigterm_event_source, SIGTERM|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigterm, s);
+ r = sd_event_add_signal(m->event, &m->sigterm_event_source, SIGTERM|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigterm, m);
if (r < 0)
return r;
/* Let's process SIGTERM early, so that we definitely react to it */
- r = sd_event_source_set_priority(s->sigterm_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
+ r = sd_event_source_set_priority(m->sigterm_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
if (r < 0)
return r;
/* When journald is invoked on the terminal (when debugging), it's useful if C-c is handled
* equivalent to SIGTERM. */
- r = sd_event_add_signal(s->event, &s->sigint_event_source, SIGINT|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigterm, s);
+ r = sd_event_add_signal(m->event, &m->sigint_event_source, SIGINT|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigterm, m);
if (r < 0)
return r;
- r = sd_event_source_set_priority(s->sigint_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
+ r = sd_event_source_set_priority(m->sigint_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
if (r < 0)
return r;
/* SIGRTMIN+1 causes an immediate sync. We process this very late, so that everything else queued at
* this point is really written to disk. Clients can watch /run/systemd/journal/synced with inotify
* until its mtime changes to see when a sync happened. */
- r = sd_event_add_signal(s->event, &s->sigrtmin1_event_source, (SIGRTMIN+1)|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigrtmin1, s);
+ r = sd_event_add_signal(m->event, &m->sigrtmin1_event_source, (SIGRTMIN+1)|SD_EVENT_SIGNAL_PROCMASK, dispatch_sigrtmin1, m);
if (r < 0)
return r;
- r = sd_event_source_set_priority(s->sigrtmin1_event_source, SD_EVENT_PRIORITY_NORMAL+15);
+ r = sd_event_source_set_priority(m->sigrtmin1_event_source, SD_EVENT_PRIORITY_NORMAL+15);
if (r < 0)
return r;
- r = sd_event_add_signal(s->event, /* ret_event_source= */ NULL, (SIGRTMIN+18)|SD_EVENT_SIGNAL_PROCMASK, sigrtmin18_handler, &s->sigrtmin18_info);
+ r = sd_event_add_signal(m->event, /* ret_event_source= */ NULL, (SIGRTMIN+18)|SD_EVENT_SIGNAL_PROCMASK, sigrtmin18_handler, &m->sigrtmin18_info);
if (r < 0)
return r;
}
static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
- Server *s = ASSERT_PTR(data);
+ Manager *m = ASSERT_PTR(data);
int r;
if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_syslog")) {
if (r < 0)
log_warning("Failed to parse forward to syslog switch \"%s\". Ignoring.", value);
else
- s->forward_to_syslog = r;
+ m->forward_to_syslog = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_kmsg")) {
if (r < 0)
log_warning("Failed to parse forward to kmsg switch \"%s\". Ignoring.", value);
else
- s->forward_to_kmsg = r;
+ m->forward_to_kmsg = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_console")) {
if (r < 0)
log_warning("Failed to parse forward to console switch \"%s\". Ignoring.", value);
else
- s->forward_to_console = r;
+ m->forward_to_console = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_wall")) {
if (r < 0)
log_warning("Failed to parse forward to wall switch \"%s\". Ignoring.", value);
else
- s->forward_to_wall = r;
+ m->forward_to_wall = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_console")) {
if (r < 0)
log_warning("Failed to parse max level console value \"%s\". Ignoring.", value);
else
- s->max_level_console = r;
+ m->max_level_console = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_store")) {
if (r < 0)
log_warning("Failed to parse max level store value \"%s\". Ignoring.", value);
else
- s->max_level_store = r;
+ m->max_level_store = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_syslog")) {
if (r < 0)
log_warning("Failed to parse max level syslog value \"%s\". Ignoring.", value);
else
- s->max_level_syslog = r;
+ m->max_level_syslog = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_kmsg")) {
if (r < 0)
log_warning("Failed to parse max level kmsg value \"%s\". Ignoring.", value);
else
- s->max_level_kmsg = r;
+ m->max_level_kmsg = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_wall")) {
if (r < 0)
log_warning("Failed to parse max level wall value \"%s\". Ignoring.", value);
else
- s->max_level_wall = r;
+ m->max_level_wall = r;
} else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_socket")) {
if (r < 0)
log_warning("Failed to parse max level socket value \"%s\". Ignoring.", value);
else
- s->max_level_socket = r;
+ m->max_level_socket = r;
} else if (startswith(key, "systemd.journald"))
log_warning("Unknown journald kernel command line option \"%s\". Ignoring.", key);
return 0;
}
-static int server_parse_config_file(Server *s) {
+static int manager_parse_config_file(Manager *m) {
const char *conf_file;
- assert(s);
+ assert(m);
- if (s->namespace)
- conf_file = strjoina("systemd/journald@", s->namespace, ".conf");
+ if (m->namespace)
+ conf_file = strjoina("systemd/journald@", m->namespace, ".conf");
else
conf_file = "systemd/journald.conf";
"Journal\0",
config_item_perf_lookup, journald_gperf_lookup,
CONFIG_PARSE_WARN,
- /* userdata= */ s);
+ /* userdata= */ m);
}
-static int server_dispatch_sync(sd_event_source *es, usec_t t, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+static int manager_dispatch_sync(sd_event_source *es, usec_t t, void *userdata) {
+ Manager *m = ASSERT_PTR(userdata);
- server_sync(s, /* wait = */ false);
+ manager_sync(m, /* wait = */ false);
return 0;
}
-static int server_schedule_sync(Server *s, int priority) {
+static int manager_schedule_sync(Manager *m, int priority) {
int r;
- assert(s);
+ assert(m);
if (priority <= LOG_CRIT) {
/* Immediately sync to disk when this is of priority CRIT, ALERT, EMERG */
- server_sync(s, /* wait = */ false);
+ manager_sync(m, /* wait = */ false);
return 0;
}
- if (!s->event || sd_event_get_state(s->event) == SD_EVENT_FINISHED) {
+ if (!m->event || sd_event_get_state(m->event) == SD_EVENT_FINISHED) {
/* Shutting down the server? Let's sync immediately. */
- server_sync(s, /* wait = */ false);
+ manager_sync(m, /* wait = */ false);
return 0;
}
- if (s->sync_scheduled)
+ if (m->sync_scheduled)
return 0;
- if (s->sync_interval_usec > 0) {
+ if (m->sync_interval_usec > 0) {
- if (!s->sync_event_source) {
+ if (!m->sync_event_source) {
r = sd_event_add_time_relative(
- s->event,
- &s->sync_event_source,
+ m->event,
+ &m->sync_event_source,
CLOCK_MONOTONIC,
- s->sync_interval_usec, 0,
- server_dispatch_sync, s);
+ m->sync_interval_usec, 0,
+ manager_dispatch_sync, m);
if (r < 0)
return r;
- r = sd_event_source_set_priority(s->sync_event_source, SD_EVENT_PRIORITY_IMPORTANT);
+ r = sd_event_source_set_priority(m->sync_event_source, SD_EVENT_PRIORITY_IMPORTANT);
} else {
- r = sd_event_source_set_time_relative(s->sync_event_source, s->sync_interval_usec);
+ r = sd_event_source_set_time_relative(m->sync_event_source, m->sync_interval_usec);
if (r < 0)
return r;
- r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_ONESHOT);
+ r = sd_event_source_set_enabled(m->sync_event_source, SD_EVENT_ONESHOT);
}
if (r < 0)
return r;
- s->sync_scheduled = true;
+ m->sync_scheduled = true;
}
return 0;
}
static int dispatch_hostname_change(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
- server_cache_hostname(s);
+ manager_cache_hostname(m);
return 0;
}
-static int server_open_hostname(Server *s) {
+static int manager_open_hostname(Manager *m) {
int r;
- assert(s);
+ assert(m);
- s->hostname_fd = open("/proc/sys/kernel/hostname",
+ m->hostname_fd = open("/proc/sys/kernel/hostname",
O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
- if (s->hostname_fd < 0)
+ if (m->hostname_fd < 0)
return log_error_errno(errno, "Failed to open /proc/sys/kernel/hostname: %m");
- r = sd_event_add_io(s->event, &s->hostname_event_source, s->hostname_fd, 0, dispatch_hostname_change, s);
+ r = sd_event_add_io(m->event, &m->hostname_event_source, m->hostname_fd, 0, dispatch_hostname_change, m);
if (r < 0)
return log_error_errno(r, "Failed to register hostname fd in event loop: %m");
- r = sd_event_source_set_priority(s->hostname_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
+ r = sd_event_source_set_priority(m->hostname_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
if (r < 0)
return log_error_errno(r, "Failed to adjust priority of hostname event source: %m");
}
static int dispatch_notify_event(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
- assert(s->notify_event_source == es);
- assert(s->notify_fd == fd);
+ assert(m->notify_event_source == es);
+ assert(m->notify_fd == fd);
/* The $NOTIFY_SOCKET is writable again, now send exactly one
* message on it. Either it's the watchdog event, the initial
* to write anymore, turn our event source off. The next time
* there's something to send it will be turned on again. */
- if (!s->sent_notify_ready) {
- if (send(s->notify_fd, NOTIFY_READY_MESSAGE, strlen(NOTIFY_READY_MESSAGE), MSG_DONTWAIT) < 0) {
+ if (!m->sent_notify_ready) {
+ if (send(m->notify_fd, NOTIFY_READY_MESSAGE, strlen(NOTIFY_READY_MESSAGE), MSG_DONTWAIT) < 0) {
if (errno == EAGAIN)
return 0;
return log_error_errno(errno, "Failed to send READY=1 notification message: %m");
}
- s->sent_notify_ready = true;
+ m->sent_notify_ready = true;
log_debug("Sent READY=1 notification.");
- } else if (s->send_watchdog) {
+ } else if (m->send_watchdog) {
static const char p[] = "WATCHDOG=1";
- if (send(s->notify_fd, p, strlen(p), MSG_DONTWAIT) < 0) {
+ if (send(m->notify_fd, p, strlen(p), MSG_DONTWAIT) < 0) {
if (errno == EAGAIN)
return 0;
return log_error_errno(errno, "Failed to send WATCHDOG=1 notification message: %m");
}
- s->send_watchdog = false;
+ m->send_watchdog = false;
log_debug("Sent WATCHDOG=1 notification.");
- } else if (s->stdout_streams_notify_queue)
+ } else if (m->stdout_streams_notify_queue)
/* Dispatch one stream notification event */
- stdout_stream_send_notify(s->stdout_streams_notify_queue);
+ stdout_stream_send_notify(m->stdout_streams_notify_queue);
/* Leave us enabled if there's still more to do. */
- if (s->send_watchdog || s->stdout_streams_notify_queue)
+ if (m->send_watchdog || m->stdout_streams_notify_queue)
return 0;
/* There was nothing to do anymore, let's turn ourselves off. */
}
static int dispatch_watchdog(sd_event_source *es, uint64_t usec, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
- s->send_watchdog = true;
+ m->send_watchdog = true;
- r = sd_event_source_set_enabled(s->notify_event_source, SD_EVENT_ON);
+ r = sd_event_source_set_enabled(m->notify_event_source, SD_EVENT_ON);
if (r < 0)
log_warning_errno(r, "Failed to turn on notify event source: %m");
- r = sd_event_source_set_time(s->watchdog_event_source, usec + s->watchdog_usec / 2);
+ r = sd_event_source_set_time(m->watchdog_event_source, usec + m->watchdog_usec / 2);
if (r < 0)
return log_error_errno(r, "Failed to restart watchdog event source: %m");
- r = sd_event_source_set_enabled(s->watchdog_event_source, SD_EVENT_ON);
+ r = sd_event_source_set_enabled(m->watchdog_event_source, SD_EVENT_ON);
if (r < 0)
return log_error_errno(r, "Failed to enable watchdog event source: %m");
return 0;
}
-static int server_connect_notify(Server *s) {
+static int manager_connect_notify(Manager *m) {
union sockaddr_union sa;
socklen_t sa_len;
const char *e;
int r;
- assert(s);
- assert(s->notify_fd < 0);
- assert(!s->notify_event_source);
+ assert(m);
+ assert(m->notify_fd < 0);
+ assert(!m->notify_event_source);
/*
* So here's the problem: we'd like to send notification messages to PID 1, but we cannot do that via
return log_error_errno(r, "NOTIFY_SOCKET set to invalid value '%s': %m", e);
sa_len = r;
- s->notify_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
- if (s->notify_fd < 0)
+ m->notify_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (m->notify_fd < 0)
return log_error_errno(errno, "Failed to create notify socket: %m");
- (void) fd_inc_sndbuf(s->notify_fd, NOTIFY_SNDBUF_SIZE);
+ (void) fd_inc_sndbuf(m->notify_fd, NOTIFY_SNDBUF_SIZE);
- r = connect(s->notify_fd, &sa.sa, sa_len);
+ r = connect(m->notify_fd, &sa.sa, sa_len);
if (r < 0)
return log_error_errno(errno, "Failed to connect to notify socket: %m");
- r = sd_event_add_io(s->event, &s->notify_event_source, s->notify_fd, EPOLLOUT, dispatch_notify_event, s);
+ r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLOUT, dispatch_notify_event, m);
if (r < 0)
return log_error_errno(r, "Failed to watch notification socket: %m");
- if (sd_watchdog_enabled(false, &s->watchdog_usec) > 0) {
- s->send_watchdog = true;
+ if (sd_watchdog_enabled(false, &m->watchdog_usec) > 0) {
+ m->send_watchdog = true;
- r = sd_event_add_time_relative(s->event, &s->watchdog_event_source, CLOCK_MONOTONIC, s->watchdog_usec/2, s->watchdog_usec/4, dispatch_watchdog, s);
+ r = sd_event_add_time_relative(m->event, &m->watchdog_event_source, CLOCK_MONOTONIC, m->watchdog_usec/2, m->watchdog_usec/4, dispatch_watchdog, m);
if (r < 0)
return log_error_errno(r, "Failed to add watchdog time event: %m");
}
return 0;
}
-int server_map_seqnum_file(
- Server *s,
+int manager_map_seqnum_file(
+ Manager *m,
const char *fname,
size_t size,
void **ret) {
uint64_t *p;
int r;
- assert(s);
+ assert(m);
assert(fname);
assert(size > 0);
assert(ret);
- fn = path_join(s->runtime_directory, fname);
+ fn = path_join(m->runtime_directory, fname);
if (!fn)
return -ENOMEM;
return 0;
}
-static void server_unmap_seqnum_file(void *p, size_t size) {
+static void manager_unmap_seqnum_file(void *p, size_t size) {
assert(size > 0);
if (!p)
assert_se(munmap(p, size) >= 0);
}
-static bool server_is_idle(Server *s) {
- assert(s);
+static bool manager_is_idle(Manager *m) {
+ assert(m);
/* The server for the main namespace is never idle */
- if (!s->namespace)
+ if (!m->namespace)
return false;
/* If a retention maximum is set larger than the idle time we need to be running to enforce it, hence
* turn off the idle logic. */
- if (s->max_retention_usec > IDLE_TIMEOUT_USEC)
+ if (m->max_retention_usec > IDLE_TIMEOUT_USEC)
return false;
/* We aren't idle if we have a varlink client */
- if (sd_varlink_server_current_connections(s->varlink_server) > 0)
+ if (sd_varlink_server_current_connections(m->varlink_server) > 0)
return false;
/* If we have stdout streams we aren't idle */
- if (s->n_stdout_streams > 0)
+ if (m->n_stdout_streams > 0)
return false;
return true;
}
-static int server_idle_handler(sd_event_source *source, uint64_t usec, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+static int manager_idle_handler(sd_event_source *source, uint64_t usec, void *userdata) {
+ Manager *m = ASSERT_PTR(userdata);
assert(source);
- log_debug("Server is idle, exiting.");
- sd_event_exit(s->event, 0);
+ log_debug("Manager is idle, exiting.");
+ sd_event_exit(m->event, 0);
return 0;
}
-int server_start_or_stop_idle_timer(Server *s) {
+int manager_start_or_stop_idle_timer(Manager *m) {
_cleanup_(sd_event_source_unrefp) sd_event_source *source = NULL;
int r;
- assert(s);
+ assert(m);
- if (sd_event_get_state(s->event) == SD_EVENT_FINISHED ||
- !server_is_idle(s)) {
- s->idle_event_source = sd_event_source_disable_unref(s->idle_event_source);
+ if (sd_event_get_state(m->event) == SD_EVENT_FINISHED ||
+ !manager_is_idle(m)) {
+ m->idle_event_source = sd_event_source_disable_unref(m->idle_event_source);
return 0;
}
- if (s->idle_event_source)
+ if (m->idle_event_source)
return 1;
- r = sd_event_add_time_relative(s->event, &source, CLOCK_MONOTONIC, IDLE_TIMEOUT_USEC, 0, server_idle_handler, s);
+ r = sd_event_add_time_relative(m->event, &source, CLOCK_MONOTONIC, IDLE_TIMEOUT_USEC, 0, manager_idle_handler, m);
if (r < 0)
return log_error_errno(r, "Failed to allocate idle timer: %m");
(void) sd_event_source_set_description(source, "idle-timer");
- s->idle_event_source = TAKE_PTR(source);
+ m->idle_event_source = TAKE_PTR(source);
return 1;
}
-static int server_refresh_idle_timer(Server *s) {
+static int manager_refresh_idle_timer(Manager *m) {
int r;
- assert(s);
+ assert(m);
- if (!s->idle_event_source)
+ if (!m->idle_event_source)
return 0;
- r = sd_event_source_set_time_relative(s->idle_event_source, IDLE_TIMEOUT_USEC);
+ r = sd_event_source_set_time_relative(m->idle_event_source, IDLE_TIMEOUT_USEC);
if (r < 0)
return log_error_errno(r, "Failed to refresh idle timer: %m");
return 1;
}
-static int server_set_namespace(Server *s, const char *namespace) {
- assert(s);
+static int manager_set_namespace(Manager *m, const char *namespace) {
+ assert(m);
if (!namespace)
return 0;
if (!log_namespace_name_valid(namespace))
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Specified namespace name not valid, refusing: %s", namespace);
- s->namespace = strdup(namespace);
- if (!s->namespace)
+ m->namespace = strdup(namespace);
+ if (!m->namespace)
return log_oom();
- s->namespace_field = strjoin("_NAMESPACE=", namespace);
- if (!s->namespace_field)
+ m->namespace_field = strjoin("_NAMESPACE=", namespace);
+ if (!m->namespace_field)
return log_oom();
return 1;
}
-static int server_memory_pressure(sd_event_source *es, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+static int manager_memory_pressure(sd_event_source *es, void *userdata) {
+ Manager *m = ASSERT_PTR(userdata);
log_info("Under memory pressure, flushing caches.");
/* Flushed the cached info we might have about client processes */
- client_context_flush_regular(s);
+ client_context_flush_regular(m);
/* Let's also close all user files (but keep the system/runtime one open) */
for (;;) {
- JournalFile *first = ordered_hashmap_steal_first(s->user_journals);
+ JournalFile *first = ordered_hashmap_steal_first(m->user_journals);
if (!first)
break;
return 0;
}
-static int server_setup_memory_pressure(Server *s) {
+static int manager_setup_memory_pressure(Manager *m) {
int r;
- assert(s);
+ assert(m);
- r = sd_event_add_memory_pressure(s->event, NULL, server_memory_pressure, s);
+ r = sd_event_add_memory_pressure(m->event, NULL, manager_memory_pressure, m);
if (r < 0)
log_full_errno(ERRNO_IS_NOT_SUPPORTED(r) || ERRNO_IS_PRIVILEGE(r) || (r == -EHOSTDOWN) ? LOG_DEBUG : LOG_NOTICE, r,
"Failed to install memory pressure event source, ignoring: %m");
return 0;
}
-static void server_load_credentials(Server *s) {
+static void manager_load_credentials(Manager *m) {
_cleanup_free_ void *data = NULL;
int r;
- assert(s);
+ assert(m);
r = read_credential("journal.forward_to_socket", &data, NULL);
if (r < 0)
log_debug_errno(r, "Failed to read credential journal.forward_to_socket, ignoring: %m");
else {
- r = socket_address_parse(&s->forward_to_socket, data);
+ r = socket_address_parse(&m->forward_to_socket, data);
if (r < 0)
log_debug_errno(r, "Failed to parse socket address '%s' from credential journal.forward_to_socket, ignoring: %m", (char *) data);
}
if (r < 0)
log_debug_errno(r, "Failed to parse storage '%s' from credential journal.storage, ignoring: %m", (char *) data);
else
- s->storage = r;
+ m->storage = r;
}
}
-int server_new(Server **ret) {
- _cleanup_(server_freep) Server *s = NULL;
+int manager_new(Manager **ret) {
+ _cleanup_(manager_freep) Manager *m = NULL;
assert(ret);
- s = new(Server, 1);
- if (!s)
+ m = new(Manager, 1);
+ if (!m)
return -ENOMEM;
- *s = (Server) {
+ *m = (Manager) {
.syslog_fd = -EBADF,
.native_fd = -EBADF,
.stdout_fd = -EBADF,
.burst = DEFAULT_KMSG_OWN_BURST,
},
- .sigrtmin18_info.memory_pressure_handler = server_memory_pressure,
- .sigrtmin18_info.memory_pressure_userdata = s,
+ .sigrtmin18_info.memory_pressure_handler = manager_memory_pressure,
+ .sigrtmin18_info.memory_pressure_userdata = m,
};
- *ret = TAKE_PTR(s);
+ *ret = TAKE_PTR(m);
return 0;
}
-int server_init(Server *s, const char *namespace) {
+int manager_init(Manager *m, const char *namespace) {
const char *native_socket, *syslog_socket, *stdout_socket, *varlink_socket, *e;
_cleanup_fdset_free_ FDSet *fds = NULL;
int n, r, varlink_fd = -EBADF;
bool no_sockets;
- assert(s);
+ assert(m);
- r = server_set_namespace(s, namespace);
+ r = manager_set_namespace(m, namespace);
if (r < 0)
return r;
/* By default, only read from /dev/kmsg if are the main namespace */
- s->read_kmsg = !s->namespace;
- s->storage = s->namespace ? STORAGE_PERSISTENT : STORAGE_AUTO;
+ m->read_kmsg = !m->namespace;
+ m->storage = m->namespace ? STORAGE_PERSISTENT : STORAGE_AUTO;
- journal_reset_metrics(&s->system_storage.metrics);
- journal_reset_metrics(&s->runtime_storage.metrics);
+ journal_reset_metrics(&m->system_storage.metrics);
+ journal_reset_metrics(&m->runtime_storage.metrics);
- server_load_credentials(s);
- server_parse_config_file(s);
+ manager_load_credentials(m);
+ manager_parse_config_file(m);
- if (!s->namespace) {
+ if (!m->namespace) {
/* Parse kernel command line, but only if we are not a namespace instance */
- r = proc_cmdline_parse(parse_proc_cmdline_item, s, PROC_CMDLINE_STRIP_RD_PREFIX);
+ r = proc_cmdline_parse(parse_proc_cmdline_item, m, PROC_CMDLINE_STRIP_RD_PREFIX);
if (r < 0)
log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
}
- if (!!s->ratelimit_interval != !!s->ratelimit_burst) { /* One set to 0 and the other not? */
+ if (!!m->ratelimit_interval != !!m->ratelimit_burst) { /* One set to 0 and the other not? */
log_debug("Setting both rate limit interval and burst from "USEC_FMT",%u to 0,0",
- s->ratelimit_interval, s->ratelimit_burst);
- s->ratelimit_interval = s->ratelimit_burst = 0;
+ m->ratelimit_interval, m->ratelimit_burst);
+ m->ratelimit_interval = m->ratelimit_burst = 0;
}
e = getenv("RUNTIME_DIRECTORY");
if (e)
- s->runtime_directory = strdup(e);
- else if (s->namespace)
- s->runtime_directory = strjoin("/run/systemd/journal.", s->namespace);
+ m->runtime_directory = strdup(e);
+ else if (m->namespace)
+ m->runtime_directory = strjoin("/run/systemd/journal.", m->namespace);
else
- s->runtime_directory = strdup("/run/systemd/journal");
- if (!s->runtime_directory)
+ m->runtime_directory = strdup("/run/systemd/journal");
+ if (!m->runtime_directory)
return log_oom();
- (void) mkdir_p(s->runtime_directory, 0755);
+ (void) mkdir_p(m->runtime_directory, 0755);
- s->user_journals = ordered_hashmap_new(&journal_file_hash_ops_offline_close);
- if (!s->user_journals)
+ m->user_journals = ordered_hashmap_new(&journal_file_hash_ops_offline_close);
+ if (!m->user_journals)
return log_oom();
- s->mmap = mmap_cache_new();
- if (!s->mmap)
+ m->mmap = mmap_cache_new();
+ if (!m->mmap)
return log_oom();
- s->deferred_closes = set_new(&journal_file_hash_ops_offline_close);
- if (!s->deferred_closes)
+ m->deferred_closes = set_new(&journal_file_hash_ops_offline_close);
+ if (!m->deferred_closes)
return log_oom();
- r = sd_event_default(&s->event);
+ r = sd_event_default(&m->event);
if (r < 0)
return log_error_errno(r, "Failed to create event loop: %m");
if (n < 0)
return log_error_errno(n, "Failed to read listening file descriptors from environment: %m");
- native_socket = strjoina(s->runtime_directory, "/socket");
- stdout_socket = strjoina(s->runtime_directory, "/stdout");
- syslog_socket = strjoina(s->runtime_directory, "/dev-log");
- varlink_socket = strjoina(s->runtime_directory, "/io.systemd.journal");
+ native_socket = strjoina(m->runtime_directory, "/socket");
+ stdout_socket = strjoina(m->runtime_directory, "/stdout");
+ syslog_socket = strjoina(m->runtime_directory, "/dev-log");
+ varlink_socket = strjoina(m->runtime_directory, "/io.systemd.journal");
for (int fd = SD_LISTEN_FDS_START; fd < SD_LISTEN_FDS_START + n; fd++)
if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, native_socket, 0) > 0) {
- if (s->native_fd >= 0)
+ if (m->native_fd >= 0)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Too many native sockets passed.");
- s->native_fd = fd;
+ m->native_fd = fd;
} else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, stdout_socket, 0) > 0) {
- if (s->stdout_fd >= 0)
+ if (m->stdout_fd >= 0)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Too many stdout sockets passed.");
- s->stdout_fd = fd;
+ m->stdout_fd = fd;
} else if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, syslog_socket, 0) > 0) {
- if (s->syslog_fd >= 0)
+ if (m->syslog_fd >= 0)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Too many /dev/log sockets passed.");
- s->syslog_fd = fd;
+ m->syslog_fd = fd;
} else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, varlink_socket, 0) > 0) {
varlink_fd = fd;
} else if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
- if (s->audit_fd >= 0)
+ if (m->audit_fd >= 0)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Too many audit sockets passed.");
- s->audit_fd = fd;
+ m->audit_fd = fd;
} else {
}
/* Try to restore streams, but don't bother if this fails */
- (void) server_restore_streams(s, fds);
+ (void) manager_restore_streams(m, fds);
if (!fdset_isempty(fds)) {
log_warning("%u unknown file descriptors passed, closing.", fdset_size(fds));
fds = fdset_free(fds);
}
- no_sockets = s->native_fd < 0 && s->stdout_fd < 0 && s->syslog_fd < 0 && s->audit_fd < 0 && varlink_fd < 0;
+ no_sockets = m->native_fd < 0 && m->stdout_fd < 0 && m->syslog_fd < 0 && m->audit_fd < 0 && varlink_fd < 0;
/* always open stdout, syslog, native, and kmsg sockets */
/* systemd-journald.socket: /run/systemd/journal/stdout */
- r = server_open_stdout_socket(s, stdout_socket);
+ r = manager_open_stdout_socket(m, stdout_socket);
if (r < 0)
return r;
/* systemd-journald-dev-log.socket: /run/systemd/journal/dev-log */
- r = server_open_syslog_socket(s, syslog_socket);
+ r = manager_open_syslog_socket(m, syslog_socket);
if (r < 0)
return r;
/* systemd-journald.socket: /run/systemd/journal/socket */
- r = server_open_native_socket(s, native_socket);
+ r = manager_open_native_socket(m, native_socket);
if (r < 0)
return r;
/* /dev/kmsg */
- r = server_open_dev_kmsg(s);
+ r = manager_open_dev_kmsg(m);
if (r < 0)
return r;
/* Unless we got *some* sockets and not audit, open audit socket */
- if (s->audit_fd >= 0 || no_sockets) {
+ if (m->audit_fd >= 0 || no_sockets) {
log_info("Collecting audit messages is enabled.");
- r = server_open_audit(s);
+ r = manager_open_audit(m);
if (r < 0)
return r;
} else
log_info("Collecting audit messages is disabled.");
- r = server_open_varlink(s, varlink_socket, varlink_fd);
+ r = manager_open_varlink(m, varlink_socket, varlink_fd);
if (r < 0)
return r;
- r = server_map_seqnum_file(s, "seqnum", sizeof(SeqnumData), (void**) &s->seqnum);
+ r = manager_map_seqnum_file(m, "seqnum", sizeof(SeqnumData), (void**) &m->seqnum);
if (r < 0)
return log_error_errno(r, "Failed to map main seqnum file: %m");
- r = server_open_kernel_seqnum(s);
+ r = manager_open_kernel_seqnum(m);
if (r < 0)
return r;
- r = server_open_hostname(s);
+ r = manager_open_hostname(m);
if (r < 0)
return r;
- r = server_setup_signals(s);
+ r = manager_setup_signals(m);
if (r < 0)
return r;
- r = server_setup_memory_pressure(s);
+ r = manager_setup_memory_pressure(m);
if (r < 0)
return r;
- r = cg_get_root_path(&s->cgroup_root);
+ r = cg_get_root_path(&m->cgroup_root);
if (r < 0)
return log_error_errno(r, "Failed to acquire cgroup root path: %m");
- server_cache_hostname(s);
- server_cache_boot_id(s);
- server_cache_machine_id(s);
+ manager_cache_hostname(m);
+ manager_cache_boot_id(m);
+ manager_cache_machine_id(m);
- if (s->namespace)
- s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s), ".", s->namespace);
+ if (m->namespace)
+ m->runtime_storage.path = strjoin("/run/log/journal/", MANAGER_MACHINE_ID(m), ".", m->namespace);
else
- s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s));
- if (!s->runtime_storage.path)
+ m->runtime_storage.path = strjoin("/run/log/journal/", MANAGER_MACHINE_ID(m));
+ if (!m->runtime_storage.path)
return log_oom();
e = getenv("LOGS_DIRECTORY");
if (e)
- s->system_storage.path = strdup(e);
- else if (s->namespace)
- s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s), ".", s->namespace);
+ m->system_storage.path = strdup(e);
+ else if (m->namespace)
+ m->system_storage.path = strjoin("/var/log/journal/", MANAGER_MACHINE_ID(m), ".", m->namespace);
else
- s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s));
- if (!s->system_storage.path)
+ m->system_storage.path = strjoin("/var/log/journal/", MANAGER_MACHINE_ID(m));
+ if (!m->system_storage.path)
return log_oom();
- (void) server_connect_notify(s);
+ (void) manager_connect_notify(m);
- (void) client_context_acquire_default(s);
+ (void) client_context_acquire_default(m);
- r = server_system_journal_open(s, /* flush_requested= */ false, /* relinquish_requested= */ false);
+ r = manager_system_journal_open(m, /* flush_requested= */ false, /* relinquish_requested= */ false);
if (r < 0)
return r;
- server_start_or_stop_idle_timer(s);
+ manager_start_or_stop_idle_timer(m);
return 0;
}
-void server_maybe_append_tags(Server *s) {
+void manager_maybe_append_tags(Manager *m) {
#if HAVE_GCRYPT
JournalFile *f;
usec_t n;
n = now(CLOCK_REALTIME);
- if (s->system_journal)
- journal_file_maybe_append_tag(s->system_journal, n);
+ if (m->system_journal)
+ journal_file_maybe_append_tag(m->system_journal, n);
- ORDERED_HASHMAP_FOREACH(f, s->user_journals)
+ ORDERED_HASHMAP_FOREACH(f, m->user_journals)
journal_file_maybe_append_tag(f, n);
#endif
}
-Server* server_free(Server *s) {
- if (!s)
+Manager* manager_free(Manager *m) {
+ if (!m)
return NULL;
- free(s->namespace);
- free(s->namespace_field);
+ free(m->namespace);
+ free(m->namespace_field);
- set_free(s->deferred_closes);
+ set_free(m->deferred_closes);
- while (s->stdout_streams)
- stdout_stream_free(s->stdout_streams);
+ while (m->stdout_streams)
+ stdout_stream_free(m->stdout_streams);
- client_context_flush_all(s);
+ client_context_flush_all(m);
- (void) journal_file_offline_close(s->system_journal);
- (void) journal_file_offline_close(s->runtime_journal);
+ (void) journal_file_offline_close(m->system_journal);
+ (void) journal_file_offline_close(m->runtime_journal);
- ordered_hashmap_free(s->user_journals);
+ ordered_hashmap_free(m->user_journals);
- sd_varlink_server_unref(s->varlink_server);
+ sd_varlink_server_unref(m->varlink_server);
- sd_event_source_unref(s->syslog_event_source);
- sd_event_source_unref(s->native_event_source);
- sd_event_source_unref(s->stdout_event_source);
- sd_event_source_unref(s->dev_kmsg_event_source);
- sd_event_source_unref(s->audit_event_source);
- sd_event_source_unref(s->sync_event_source);
- sd_event_source_unref(s->sigusr1_event_source);
- sd_event_source_unref(s->sigusr2_event_source);
- sd_event_source_unref(s->sigterm_event_source);
- sd_event_source_unref(s->sigint_event_source);
- sd_event_source_unref(s->sigrtmin1_event_source);
- sd_event_source_unref(s->hostname_event_source);
- sd_event_source_unref(s->notify_event_source);
- sd_event_source_unref(s->watchdog_event_source);
- sd_event_source_unref(s->idle_event_source);
- sd_event_unref(s->event);
+ sd_event_source_unref(m->syslog_event_source);
+ sd_event_source_unref(m->native_event_source);
+ sd_event_source_unref(m->stdout_event_source);
+ sd_event_source_unref(m->dev_kmsg_event_source);
+ sd_event_source_unref(m->audit_event_source);
+ sd_event_source_unref(m->sync_event_source);
+ sd_event_source_unref(m->sigusr1_event_source);
+ sd_event_source_unref(m->sigusr2_event_source);
+ sd_event_source_unref(m->sigterm_event_source);
+ sd_event_source_unref(m->sigint_event_source);
+ sd_event_source_unref(m->sigrtmin1_event_source);
+ sd_event_source_unref(m->hostname_event_source);
+ sd_event_source_unref(m->notify_event_source);
+ sd_event_source_unref(m->watchdog_event_source);
+ sd_event_source_unref(m->idle_event_source);
+ sd_event_unref(m->event);
- safe_close(s->syslog_fd);
- safe_close(s->native_fd);
- safe_close(s->stdout_fd);
- safe_close(s->dev_kmsg_fd);
- safe_close(s->audit_fd);
- safe_close(s->hostname_fd);
- safe_close(s->notify_fd);
- safe_close(s->forward_socket_fd);
+ safe_close(m->syslog_fd);
+ safe_close(m->native_fd);
+ safe_close(m->stdout_fd);
+ safe_close(m->dev_kmsg_fd);
+ safe_close(m->audit_fd);
+ safe_close(m->hostname_fd);
+ safe_close(m->notify_fd);
+ safe_close(m->forward_socket_fd);
- ordered_hashmap_free(s->ratelimit_groups_by_id);
+ ordered_hashmap_free(m->ratelimit_groups_by_id);
- server_unmap_seqnum_file(s->seqnum, sizeof(*s->seqnum));
- server_unmap_seqnum_file(s->kernel_seqnum, sizeof(*s->kernel_seqnum));
+ manager_unmap_seqnum_file(m->seqnum, sizeof(*m->seqnum));
+ manager_unmap_seqnum_file(m->kernel_seqnum, sizeof(*m->kernel_seqnum));
- free(s->buffer);
- free(s->tty_path);
- free(s->cgroup_root);
- free(s->hostname_field);
- free(s->runtime_storage.path);
- free(s->system_storage.path);
- free(s->runtime_directory);
+ free(m->buffer);
+ free(m->tty_path);
+ free(m->cgroup_root);
+ free(m->hostname_field);
+ free(m->runtime_storage.path);
+ free(m->system_storage.path);
+ free(m->runtime_directory);
- mmap_cache_unref(s->mmap);
+ mmap_cache_unref(m->mmap);
SyncReq *req;
- while ((req = prioq_peek(s->sync_req_realtime_prioq)))
+ while ((req = prioq_peek(m->sync_req_realtime_prioq)))
sync_req_free(req);
- prioq_free(s->sync_req_realtime_prioq);
+ prioq_free(m->sync_req_realtime_prioq);
- while ((req = prioq_peek(s->sync_req_boottime_prioq)))
+ while ((req = prioq_peek(m->sync_req_boottime_prioq)))
sync_req_free(req);
- prioq_free(s->sync_req_boottime_prioq);
+ prioq_free(m->sync_req_boottime_prioq);
- return mfree(s);
+ return mfree(m);
}
static const char* const storage_table[_STORAGE_MAX] = {
uint64_t seqnum;
} SeqnumData;
-typedef struct Server {
+typedef struct Manager {
char *namespace;
int syslog_fd;
/* Pending synchronization requests with non-zero rqlen counter */
LIST_HEAD(SyncReq, sync_req_pending_rqlen);
-} Server;
+} Manager;
-#define SERVER_MACHINE_ID(s) ((s)->machine_id_field + STRLEN("_MACHINE_ID="))
+#define MANAGER_MACHINE_ID(s) ((s)->machine_id_field + STRLEN("_MACHINE_ID="))
/* Extra fields for any log messages */
#define N_IOVEC_META_FIELDS 24
/* audit: Maximum number of extra fields we'll import from audit messages */
#define N_IOVEC_AUDIT_FIELDS 64
-void server_dispatch_message(Server *s, struct iovec *iovec, size_t n, size_t m, ClientContext *c, const struct timeval *tv, int priority, pid_t object_pid);
-void server_driver_message_internal(Server *s, pid_t object_pid, const char *format, ...) _sentinel_;
-#define server_driver_message(...) server_driver_message_internal(__VA_ARGS__, NULL)
+void manager_dispatch_message(Manager *m, struct iovec *iovec, size_t n, size_t k, ClientContext *c, const struct timeval *tv, int priority, pid_t object_pid);
+void manager_driver_message_internal(Manager *m, pid_t object_pid, const char *format, ...) _sentinel_;
+#define manager_driver_message(...) manager_driver_message_internal(__VA_ARGS__, NULL)
/* gperf lookup function */
const struct ConfigPerfItem* journald_gperf_lookup(const char *key, GPERF_LEN_TYPE length);
const char* split_mode_to_string(SplitMode s) _const_;
SplitMode split_mode_from_string(const char *s) _pure_;
-int server_new(Server **ret);
-int server_init(Server *s, const char *namespace);
-Server* server_free(Server *s);
-DEFINE_TRIVIAL_CLEANUP_FUNC(Server*, server_free);
-void server_full_sync(Server *s, bool wait);
-void server_vacuum(Server *s, bool verbose);
-void server_rotate(Server *s);
-void server_full_rotate(Server *s);
-int server_flush_to_var(Server *s, bool require_flag_file);
-void server_full_flush(Server *s);
-int server_relinquish_var(Server *s);
-void server_maybe_append_tags(Server *s);
-int server_process_datagram(sd_event_source *es, int fd, uint32_t revents, void *userdata);
-void server_space_usage_message(Server *s, JournalStorage *storage);
-
-int server_start_or_stop_idle_timer(Server *s);
-
-int server_map_seqnum_file(Server *s, const char *fname, size_t size, void **ret);
+int manager_new(Manager **ret);
+int manager_init(Manager *m, const char *namespace);
+Manager* manager_free(Manager *m);
+DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
+void manager_full_sync(Manager *m, bool wait);
+void manager_vacuum(Manager *m, bool verbose);
+void manager_rotate(Manager *m);
+void manager_full_rotate(Manager *m);
+int manager_flush_to_var(Manager *m, bool require_flag_file);
+void manager_full_flush(Manager *m);
+int manager_relinquish_var(Manager *m);
+void manager_maybe_append_tags(Manager *m);
+int manager_process_datagram(sd_event_source *es, int fd, uint32_t revents, void *userdata);
+void manager_space_usage_message(Manager *m, JournalStorage *storage);
+
+int manager_start_or_stop_idle_timer(Manager *m);
+
+int manager_map_seqnum_file(Manager *m, const char *fname, size_t size, void **ret);
#include "journald-client.h"
#include "journald-console.h"
#include "journald-kmsg.h"
+#include "journald-manager.h"
#include "journald-native.h"
-#include "journald-server.h"
#include "journald-syslog.h"
#include "journald-wall.h"
#include "memfd-util.h"
return ucred && ucred->uid == 0;
}
-static void server_process_entry_meta(
+static void manager_process_entry_meta(
const char *p, size_t l,
const struct ucred *ucred,
int *priority,
}
}
-static int server_process_entry(
- Server *s,
+static int manager_process_entry(
+ Manager *m,
const void *buffer, size_t *remaining,
ClientContext *context,
const struct ucred *ucred,
iovec[n++] = IOVEC_MAKE((char*) p, l);
entry_size += l;
- server_process_entry_meta(p, l, ucred,
+ manager_process_entry_meta(p, l, ucred,
&priority,
&identifier,
&message,
entry_size += iovec[n].iov_len;
n++;
- server_process_entry_meta(k, (e - p) + 1 + l, ucred,
+ manager_process_entry_meta(k, (e - p) + 1 + l, ucred,
&priority,
&identifier,
&message,
if (message) {
/* Ensure message is not NULL, otherwise strlen(message) would crash. This check needs to
- * be here until server_process_entry() is able to process messages containing \0 characters,
+ * be here until manager_process_entry() is able to process messages containing \0 characters,
* as we would have access to the actual size of message. */
r = client_context_check_keep_log(context, message, strlen(message));
if (r <= 0)
goto finish;
- if (s->forward_to_syslog)
- server_forward_syslog(s, syslog_fixup_facility(priority), identifier, message, ucred, tv);
+ if (m->forward_to_syslog)
+ manager_forward_syslog(m, syslog_fixup_facility(priority), identifier, message, ucred, tv);
- if (s->forward_to_kmsg)
- server_forward_kmsg(s, priority, identifier, message, ucred);
+ if (m->forward_to_kmsg)
+ manager_forward_kmsg(m, priority, identifier, message, ucred);
- if (s->forward_to_console)
- server_forward_console(s, priority, identifier, message, ucred);
+ if (m->forward_to_console)
+ manager_forward_console(m, priority, identifier, message, ucred);
- if (s->forward_to_wall)
- server_forward_wall(s, priority, identifier, message, ucred);
+ if (m->forward_to_wall)
+ manager_forward_wall(m, priority, identifier, message, ucred);
}
- server_dispatch_message(s, iovec, n, MALLOC_ELEMENTSOF(iovec), context, tv, priority, object_pid);
+ manager_dispatch_message(m, iovec, n, MALLOC_ELEMENTSOF(iovec), context, tv, priority, object_pid);
finish:
for (j = 0; j < n; j++) {
return r;
}
-void server_process_native_message(
- Server *s,
+void manager_process_native_message(
+ Manager *m,
const char *buffer, size_t buffer_size,
const struct ucred *ucred,
const struct timeval *tv,
ClientContext *context = NULL;
int r;
- assert(s);
+ assert(m);
assert(buffer || buffer_size == 0);
if (ucred && pid_is_valid(ucred->pid)) {
- r = client_context_get(s, ucred->pid, ucred, label, label_len, NULL, &context);
+ r = client_context_get(m, ucred->pid, ucred, label, label_len, NULL, &context);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to retrieve credentials for PID " PID_FMT ", ignoring: %m",
}
do {
- r = server_process_entry(s,
+ r = manager_process_entry(m,
(const uint8_t*) buffer + (buffer_size - remaining), &remaining,
context, ucred, tv, label, label_len);
} while (r == 0);
}
-int server_process_native_file(
- Server *s,
+int manager_process_native_file(
+ Manager *m,
int fd,
const struct ucred *ucred,
const struct timeval *tv,
/* Data is in the passed fd, probably it didn't fit in a datagram. */
- assert(s);
+ assert(m);
assert(fd >= 0);
if (fstat(fd, &st) < 0)
return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT,
"Failed to map memfd: %m");
- server_process_native_message(s, p, st.st_size, ucred, tv, label, label_len);
+ manager_process_native_message(m, p, st.st_size, ucred, tv, label, label_len);
assert_se(munmap(p, ps) >= 0);
return 0;
return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT,
"Failed to read file: %m");
if (n > 0)
- server_process_native_message(s, p, n, ucred, tv, label, label_len);
+ manager_process_native_message(m, p, n, ucred, tv, label, label_len);
return 0;
}
-int server_open_native_socket(Server *s, const char *native_socket) {
+int manager_open_native_socket(Manager *m, const char *native_socket) {
int r;
- assert(s);
+ assert(m);
assert(native_socket);
- if (s->native_fd < 0) {
+ if (m->native_fd < 0) {
union sockaddr_union sa;
size_t sa_len;
return log_error_errno(r, "Unable to use namespace path %s for AF_UNIX socket: %m", native_socket);
sa_len = r;
- s->native_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
- if (s->native_fd < 0)
+ m->native_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (m->native_fd < 0)
return log_error_errno(errno, "socket() failed: %m");
(void) sockaddr_un_unlink(&sa.un);
- r = bind(s->native_fd, &sa.sa, sa_len);
+ r = bind(m->native_fd, &sa.sa, sa_len);
if (r < 0)
return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
(void) chmod(sa.un.sun_path, 0666);
} else
- (void) fd_nonblock(s->native_fd, true);
+ (void) fd_nonblock(m->native_fd, true);
- r = setsockopt_int(s->native_fd, SOL_SOCKET, SO_PASSCRED, true);
+ r = setsockopt_int(m->native_fd, SOL_SOCKET, SO_PASSCRED, true);
if (r < 0)
return log_error_errno(r, "SO_PASSCRED failed: %m");
if (mac_selinux_use()) {
- r = setsockopt_int(s->native_fd, SOL_SOCKET, SO_PASSSEC, true);
+ r = setsockopt_int(m->native_fd, SOL_SOCKET, SO_PASSSEC, true);
if (r < 0)
log_warning_errno(r, "SO_PASSSEC failed: %m");
}
- r = setsockopt_int(s->native_fd, SOL_SOCKET, SO_TIMESTAMP, true);
+ r = setsockopt_int(m->native_fd, SOL_SOCKET, SO_TIMESTAMP, true);
if (r < 0)
return log_error_errno(r, "SO_TIMESTAMP failed: %m");
- r = sd_event_add_io(s->event, &s->native_event_source, s->native_fd, EPOLLIN, server_process_datagram, s);
+ r = sd_event_add_io(m->event, &m->native_event_source, m->native_fd, EPOLLIN, manager_process_datagram, m);
if (r < 0)
- return log_error_errno(r, "Failed to add native server fd to event loop: %m");
+ return log_error_errno(r, "Failed to add native manager fd to event loop: %m");
- r = sd_event_source_set_priority(s->native_event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ r = sd_event_source_set_priority(m->native_event_source, SD_EVENT_PRIORITY_NORMAL+5);
if (r < 0)
return log_error_errno(r, "Failed to adjust native event source priority: %m");
#include <stddef.h>
#include <sys/socket.h>
-typedef struct Server Server;
+typedef struct Manager Manager;
-void server_process_native_message(
- Server *s,
+void manager_process_native_message(
+ Manager *m,
const char *buffer,
size_t buffer_size,
const struct ucred *ucred,
const char *label,
size_t label_len);
-int server_process_native_file(
- Server *s,
+int manager_process_native_file(
+ Manager *m,
int fd,
const struct ucred *ucred,
const struct timeval *tv,
const char *label,
size_t label_len);
-int server_open_native_socket(Server *s, const char *native_socket);
+int manager_open_native_socket(Manager *m, const char *native_socket);
#include "fd-util.h"
#include "iovec-util.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-socket.h"
#include "log.h"
#include "macro.h"
#include "socket-util.h"
#include "sparse-endian.h"
-static int server_open_forward_socket(Server *s) {
+static int manager_open_forward_socket(Manager *m) {
_cleanup_close_ int socket_fd = -EBADF;
const SocketAddress *addr;
int family;
- assert(s);
+ assert(m);
/* Noop if there is nothing to do. */
- if (s->forward_to_socket.sockaddr.sa.sa_family == AF_UNSPEC || s->namespace)
+ if (m->forward_to_socket.sockaddr.sa.sa_family == AF_UNSPEC || m->namespace)
return 0;
/* All ready, nothing to do. */
- if (s->forward_socket_fd >= 0)
+ if (m->forward_socket_fd >= 0)
return 1;
- addr = &s->forward_to_socket;
+ addr = &m->forward_to_socket;
family = socket_address_family(addr);
if (connect(socket_fd, &addr->sockaddr.sa, addr->size) < 0)
return log_debug_errno(errno, "Failed to connect to remote address for forwarding, ignoring: %m");
- s->forward_socket_fd = TAKE_FD(socket_fd);
+ m->forward_socket_fd = TAKE_FD(socket_fd);
log_debug("Successfully connected to remote address for forwarding.");
return 1;
}
return false;
}
-int server_forward_socket(
- Server *s,
+int manager_forward_socket(
+ Manager *m,
const struct iovec *iovec,
size_t n_iovec,
const dual_timestamp *ts,
le64_t *len;
int r;
- assert(s);
+ assert(m);
assert(iovec);
assert(n_iovec > 0);
assert(ts);
- if (LOG_PRI(priority) > s->max_level_socket)
+ if (LOG_PRI(priority) > m->max_level_socket)
return 0;
- r = server_open_forward_socket(s);
+ r = manager_open_forward_socket(m);
if (r <= 0)
return r;
xsprintf(monotonic_buf, "__MONOTONIC_TIMESTAMP="USEC_FMT"\n\n", ts->monotonic);
iov[iov_idx++] = IOVEC_MAKE_STRING(monotonic_buf);
- if (writev(s->forward_socket_fd, iov, iov_idx) < 0) {
+ if (writev(m->forward_socket_fd, iov, iov_idx) < 0) {
log_debug_errno(errno, "Failed to forward log message over socket: %m");
/* If we failed to send once we will probably fail again so wait for a new connection to
* establish before attempting to forward again. */
- s->forward_socket_fd = safe_close(s->forward_socket_fd);
+ m->forward_socket_fd = safe_close(m->forward_socket_fd);
}
return 0;
#include "time-util.h"
-typedef struct Server Server;
+typedef struct Manager Manager;
-int server_forward_socket(Server *s, const struct iovec *iovec, size_t n, const dual_timestamp *ts, int priority);
+int manager_forward_socket(Manager *m, const struct iovec *iovec, size_t n, const dual_timestamp *ts, int priority);
#include "journald-console.h"
#include "journald-context.h"
#include "journald-kmsg.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-stream.h"
#include "journald-syslog.h"
#include "journald-wall.h"
while (s->stream_sync_reqs)
stream_sync_req_free(s->stream_sync_reqs);
- if (s->server) {
+ if (s->manager) {
if (s->context)
- client_context_release(s->server, s->context);
+ client_context_release(s->manager, s->context);
- assert(s->server->n_stdout_streams > 0);
- s->server->n_stdout_streams--;
- LIST_REMOVE(stdout_stream, s->server->stdout_streams, s);
+ assert(s->manager->n_stdout_streams > 0);
+ s->manager->n_stdout_streams--;
+ LIST_REMOVE(stdout_stream, s->manager->stdout_streams, s);
if (s->in_notify_queue)
- LIST_REMOVE(stdout_stream_notify_queue, s->server->stdout_streams_notify_queue, s);
+ LIST_REMOVE(stdout_stream_notify_queue, s->manager->stdout_streams_notify_queue, s);
}
sd_event_source_disable_unref(s->event_source);
sync_req_revalidate(TAKE_PTR(req));
}
- Server *server = s->server;
+ Manager *manager = s->manager;
stdout_stream_free(TAKE_PTR(s));
- (void) server_start_or_stop_idle_timer(server); /* Maybe we are idle now? */
+ (void) manager_start_or_stop_idle_timer(manager); /* Maybe we are idle now? */
}
static int stdout_stream_save(StdoutStream *s) {
"Failed to stat connected stream: %m");
/* We use device and inode numbers as identifier for the stream */
- r = asprintf(&s->state_file, "%s/streams/%lu:%lu", s->server->runtime_directory, (unsigned long) st.st_dev, (unsigned long) st.st_ino);
+ r = asprintf(&s->state_file, "%s/streams/%lu:%lu", s->manager->runtime_directory, (unsigned long) st.st_dev, (unsigned long) st.st_ino);
if (r < 0)
return log_oom();
}
if (r < 0)
goto fail;
- if (rename(temp_path, s->state_file) < 0) {
- r = -errno;
+ r = RET_NERRNO(rename(temp_path, s->state_file));
+ if (r < 0)
goto fail;
- }
temp_path = mfree(temp_path);
if (!s->fdstore && !s->in_notify_queue) {
- LIST_PREPEND(stdout_stream_notify_queue, s->server->stdout_streams_notify_queue, s);
+ LIST_PREPEND(stdout_stream_notify_queue, s->manager->stdout_streams_notify_queue, s);
s->in_notify_queue = true;
- if (s->server->notify_event_source) {
- r = sd_event_source_set_enabled(s->server->notify_event_source, SD_EVENT_ON);
+ if (s->manager->notify_event_source) {
+ r = sd_event_source_set_enabled(s->manager->notify_event_source, SD_EVENT_ON);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "Failed to enable notify event source: %m");
}
assert(line_break < _LINE_BREAK_MAX);
if (s->context)
- (void) client_context_maybe_refresh(s->server, s->context, NULL, NULL, 0, NULL, USEC_INFINITY);
+ (void) client_context_maybe_refresh(s->manager, s->context, NULL, NULL, 0, NULL, USEC_INFINITY);
else if (pid_is_valid(s->ucred.pid)) {
- r = client_context_acquire(s->server, s->ucred.pid, &s->ucred, s->label, strlen_ptr(s->label), s->unit_id, &s->context);
+ r = client_context_acquire(s->manager, s->ucred.pid, &s->ucred, s->label, strlen_ptr(s->label), s->unit_id, &s->context);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to acquire client context, ignoring: %m");
if (r <= 0)
return r;
- if (s->forward_to_syslog || s->server->forward_to_syslog)
- server_forward_syslog(s->server, syslog_fixup_facility(priority), s->identifier, p, &s->ucred, NULL);
+ if (s->forward_to_syslog || s->manager->forward_to_syslog)
+ manager_forward_syslog(s->manager, syslog_fixup_facility(priority), s->identifier, p, &s->ucred, NULL);
- if (s->forward_to_kmsg || s->server->forward_to_kmsg)
- server_forward_kmsg(s->server, priority, s->identifier, p, &s->ucred);
+ if (s->forward_to_kmsg || s->manager->forward_to_kmsg)
+ manager_forward_kmsg(s->manager, priority, s->identifier, p, &s->ucred);
- if (s->forward_to_console || s->server->forward_to_console)
- server_forward_console(s->server, priority, s->identifier, p, &s->ucred);
+ if (s->forward_to_console || s->manager->forward_to_console)
+ manager_forward_console(s->manager, priority, s->identifier, p, &s->ucred);
- if (s->server->forward_to_wall)
- server_forward_wall(s->server, priority, s->identifier, p, &s->ucred);
+ if (s->manager->forward_to_wall)
+ manager_forward_wall(s->manager, priority, s->identifier, p, &s->ucred);
m = N_IOVEC_META_FIELDS + 7 + client_context_extra_fields_n_iovec(s->context);
iovec = newa(struct iovec, m);
if (message)
iovec[n++] = IOVEC_MAKE_STRING(message);
- server_dispatch_message(s->server, iovec, n, m, s->context, NULL, priority, 0);
+ manager_dispatch_message(s->manager, iovec, n, m, s->context, NULL, priority, 0);
return 0;
}
return STDOUT_STREAM_SETUP_PROTOCOL_LINE_MAX;
/* After the protocol's "setup" phase is complete, let's use whatever the user configured */
- return s->server->line_max;
+ return s->manager->line_max;
}
static int stdout_stream_scan(
/* Try to make use of the allocated buffer in full, but never read more than the configured line size. Also,
* always leave room for a terminating NUL we might need to add. */
- limit = MIN(allocated - 1, MAX(s->server->line_max, STDOUT_STREAM_SETUP_PROTOCOL_LINE_MAX));
+ limit = MIN(allocated - 1, MAX(s->manager->line_max, STDOUT_STREAM_SETUP_PROTOCOL_LINE_MAX));
assert(s->length <= limit);
iovec = IOVEC_MAKE(s->buffer + s->length, limit - s->length);
if (r < 0)
goto terminate;
- s->context = client_context_release(s->server, s->context);
+ s->context = client_context_release(s->manager, s->context);
p = s->buffer + s->length;
} else {
return 0;
}
-int stdout_stream_install(Server *s, int fd, StdoutStream **ret) {
+int stdout_stream_install(Manager *m, int fd, StdoutStream **ret) {
_cleanup_(stdout_stream_freep) StdoutStream *stream = NULL;
sd_id128_t id;
int r;
- assert(s);
+ assert(m);
assert(fd >= 0);
r = sd_id128_randomize(&id);
(void) shutdown(fd, SHUT_WR);
- r = sd_event_add_io(s->event, &stream->event_source, fd, EPOLLIN, stdout_stream_process, stream);
+ r = sd_event_add_io(m->event, &stream->event_source, fd, EPOLLIN, stdout_stream_process, stream);
if (r < 0)
return log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "Failed to add stream to event loop: %m");
stream->fd = fd;
- stream->server = s;
- LIST_PREPEND(stdout_stream, s->stdout_streams, stream);
- s->n_stdout_streams++;
+ stream->manager = m;
+ LIST_PREPEND(stdout_stream, m->stdout_streams, stream);
+ m->n_stdout_streams++;
- (void) server_start_or_stop_idle_timer(s); /* Maybe no longer idle? */
+ (void) manager_start_or_stop_idle_timer(m); /* Maybe no longer idle? */
if (ret)
*ret = stream;
static int stdout_stream_new(sd_event_source *es, int listen_fd, uint32_t revents, void *userdata) {
_cleanup_close_ int fd = -EBADF;
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
if (revents != EPOLLIN)
"Got invalid event from epoll for stdout server fd: %" PRIx32,
revents);
- fd = accept4(s->stdout_fd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
+ fd = accept4(m->stdout_fd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
if (fd < 0) {
if (ERRNO_IS_ACCEPT_AGAIN(errno))
return 0;
return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT, "Failed to accept stdout connection: %m");
}
- if (s->n_stdout_streams >= STDOUT_STREAMS_MAX) {
+ if (m->n_stdout_streams >= STDOUT_STREAMS_MAX) {
struct ucred u = UCRED_INVALID;
(void) getpeercred(fd, &u);
*/
fd = safe_close(fd);
- server_driver_message(s, u.pid, LOG_MESSAGE("Too many stdout streams, refusing connection."));
+ manager_driver_message(m, u.pid, LOG_MESSAGE("Too many stdout streams, refusing connection."));
- server_notify_stream(s, /* stream= */ NULL);
+ manager_notify_stream(m, /* stream= */ NULL);
return 0;
}
StdoutStream *stream;
- r = stdout_stream_install(s, fd, &stream);
+ r = stdout_stream_install(m, fd, &stream);
if (r < 0) {
- server_notify_stream(s, /* stream= */ NULL);
+ manager_notify_stream(m, /* stream= */ NULL);
return r;
}
TAKE_FD(fd);
/* Tell the synchronization logic that we dropped one item from the incoming connection queue */
- server_notify_stream(s, stream);
+ manager_notify_stream(m, stream);
return 0;
}
assert(fname);
if (!stream->state_file) {
- stream->state_file = path_join(stream->server->runtime_directory, "streams", fname);
+ stream->state_file = path_join(stream->manager->runtime_directory, "streams", fname);
if (!stream->state_file)
return log_oom();
}
return 0;
}
-static int stdout_stream_restore(Server *s, const char *fname, int fd) {
+static int stdout_stream_restore(Manager *m, const char *fname, int fd) {
StdoutStream *stream;
int r;
- assert(s);
+ assert(m);
assert(fname);
assert(fd >= 0);
- if (s->n_stdout_streams >= STDOUT_STREAMS_MAX)
+ if (m->n_stdout_streams >= STDOUT_STREAMS_MAX)
return log_warning_errno(SYNTHETIC_ERRNO(ENOBUFS),
"Too many stdout streams, refusing restoring of stream.");
- r = stdout_stream_install(s, fd, &stream);
+ r = stdout_stream_install(m, fd, &stream);
if (r < 0)
return r;
return 0;
}
-int server_restore_streams(Server *s, FDSet *fds) {
+int manager_restore_streams(Manager *m, FDSet *fds) {
_cleanup_closedir_ DIR *d = NULL;
const char *path;
int r;
- path = strjoina(s->runtime_directory, "/streams");
+ path = strjoina(m->runtime_directory, "/streams");
d = opendir(path);
if (!d) {
if (errno == ENOENT)
fdset_remove(fds, fd);
- r = stdout_stream_restore(s, de->d_name, fd);
+ r = stdout_stream_restore(m, de->d_name, fd);
if (r < 0)
safe_close(fd);
}
return log_error_errno(errno, "Failed to read streams directory: %m");
}
-int server_open_stdout_socket(Server *s, const char *stdout_socket) {
+int manager_open_stdout_socket(Manager *m, const char *stdout_socket) {
int r;
- assert(s);
+ assert(m);
assert(stdout_socket);
- if (s->stdout_fd < 0) {
+ if (m->stdout_fd < 0) {
union sockaddr_union sa;
socklen_t sa_len;
return log_error_errno(r, "Unable to use namespace path %s for AF_UNIX socket: %m", stdout_socket);
sa_len = r;
- s->stdout_fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
- if (s->stdout_fd < 0)
+ m->stdout_fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (m->stdout_fd < 0)
return log_error_errno(errno, "socket() failed: %m");
(void) sockaddr_un_unlink(&sa.un);
- r = bind(s->stdout_fd, &sa.sa, sa_len);
+ r = bind(m->stdout_fd, &sa.sa, sa_len);
if (r < 0)
return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
(void) chmod(sa.un.sun_path, 0666);
- if (listen(s->stdout_fd, SOMAXCONN_DELUXE) < 0)
+ if (listen(m->stdout_fd, SOMAXCONN_DELUXE) < 0)
return log_error_errno(errno, "listen(%s) failed: %m", sa.un.sun_path);
} else
- (void) fd_nonblock(s->stdout_fd, true);
+ (void) fd_nonblock(m->stdout_fd, true);
- r = sd_event_add_io(s->event, &s->stdout_event_source, s->stdout_fd, EPOLLIN, stdout_stream_new, s);
+ r = sd_event_add_io(m->event, &m->stdout_event_source, m->stdout_fd, EPOLLIN, stdout_stream_new, m);
if (r < 0)
return log_error_errno(r, "Failed to add stdout server fd to event source: %m");
- r = sd_event_source_set_priority(s->stdout_event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ r = sd_event_source_set_priority(m->stdout_event_source, SD_EVENT_PRIORITY_NORMAL+5);
if (r < 0)
return log_error_errno(r, "Failed to adjust priority of stdout server event source: %m");
assert(s);
assert(!s->fdstore);
assert(s->in_notify_queue);
- assert(s->server);
- assert(s->server->notify_fd >= 0);
+ assert(s->manager);
+ assert(s->manager->notify_fd >= 0);
/* Store the connection fd in PID 1, so that we get it passed
* in again on next start */
memcpy(CMSG_DATA(cmsg), &s->fd, sizeof(int));
- l = sendmsg(s->server->notify_fd, &msghdr, MSG_DONTWAIT|MSG_NOSIGNAL);
+ l = sendmsg(s->manager->notify_fd, &msghdr, MSG_DONTWAIT|MSG_NOSIGNAL);
if (l < 0) {
if (errno == EAGAIN)
return;
s->fdstore = 1;
}
- LIST_REMOVE(stdout_stream_notify_queue, s->server->stdout_streams_notify_queue, s);
+ LIST_REMOVE(stdout_stream_notify_queue, s->manager->stdout_streams_notify_queue, s);
s->in_notify_queue = false;
}
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#pragma once
-typedef struct Server Server;
+typedef struct Manager Manager;
typedef struct StdoutStream StdoutStream;
typedef struct StreamSyncReq StreamSyncReq;
} StdoutStreamState;
struct StdoutStream {
- Server *server;
+ Manager *manager;
StdoutStreamState state;
int fd;
LIST_HEAD(StreamSyncReq, stream_sync_reqs);
};
-int server_open_stdout_socket(Server *s, const char *stdout_socket);
-int server_restore_streams(Server *s, FDSet *fds);
+int manager_open_stdout_socket(Manager *m, const char *stdout_socket);
+int manager_restore_streams(Manager *m, FDSet *fds);
StdoutStream* stdout_stream_free(StdoutStream *s);
-int stdout_stream_install(Server *s, int fd, StdoutStream **ret);
+int stdout_stream_install(Manager *m, int fd, StdoutStream **ret);
void stdout_stream_terminate(StdoutStream *s);
void stdout_stream_send_notify(StdoutStream *s);
#include "sd-varlink.h"
#include "io-util.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-stream.h"
#include "journald-sync.h"
#include "journald-varlink.h"
int r;
assert(req);
- assert(req->server);
+ assert(req->manager);
/* In case the clock jumped backwards, let's adjust the timestamp, to guarantee reasonably quick
* termination */
/* If this sync request is still in the priority queue it means we still need to check if
* incoming message timestamps are now newer than then sync request timestamp. */
- if (req->server->native_fd >= 0 &&
- req->server->native_timestamp < req->realtime) {
- r = fd_wait_for_event(req->server->native_fd, POLLIN, /* timeout= */ 0);
+ if (req->manager->native_fd >= 0 &&
+ req->manager->native_timestamp < req->realtime) {
+ r = fd_wait_for_event(req->manager->native_fd, POLLIN, /* timeout= */ 0);
if (r < 0)
log_debug_errno(r, "Failed to determine pending IO events of native socket, ignoring: %m");
else if (r != 0) /* if there's more queued we need to wait for the timestamp to pass. If it's idle though we are done here. */
return false;
}
- if (req->server->syslog_fd >= 0&&
- req->server->syslog_timestamp < req->realtime) {
- r = fd_wait_for_event(req->server->syslog_fd, POLLIN, /* timeout= */ 0);
+ if (req->manager->syslog_fd >= 0&&
+ req->manager->syslog_timestamp < req->realtime) {
+ r = fd_wait_for_event(req->manager->syslog_fd, POLLIN, /* timeout= */ 0);
if (r < 0)
log_debug_errno(r, "Failed to determine pending IO events of syslog socket, ignoring: %m");
else if (r != 0)
/* This sync request is fulfilled for the native + syslog datagram streams? Then, let's
* remove this sync request from the priority queue, so that we dont need to consider it
* anymore. */
- assert(prioq_remove(req->server->sync_req_realtime_prioq, req, &req->realtime_prioq_idx) > 0);
+ assert(prioq_remove(req->manager->sync_req_realtime_prioq, req, &req->realtime_prioq_idx) > 0);
}
if (req->boottime_prioq_idx != PRIOQ_IDX_NULL) {
/* Very similar to the above, but for /dev/kmsg we operate on the CLOCK_BOOTTIME clock */
- if (req->server->dev_kmsg_fd >= 0 &&
- req->server->dev_kmsg_timestamp < req->boottime) {
- r = fd_wait_for_event(req->server->dev_kmsg_fd, POLLIN, /* timeout= */ 0);
+ if (req->manager->dev_kmsg_fd >= 0 &&
+ req->manager->dev_kmsg_timestamp < req->boottime) {
+ r = fd_wait_for_event(req->manager->dev_kmsg_fd, POLLIN, /* timeout= */ 0);
if (r < 0)
log_debug_errno(r, "Failed to determine pending IO events of /dev/kmsg file descriptor, ignoring: %m");
else if (r != 0)
return false;
}
- assert(prioq_remove(req->server->sync_req_boottime_prioq, req, &req->boottime_prioq_idx) > 0);
+ assert(prioq_remove(req->manager->sync_req_boottime_prioq, req, &req->boottime_prioq_idx) > 0);
}
/* If there are still streams with pending counters, we still need to look into things */
if (!req)
return NULL;
- if (req->server) {
+ if (req->manager) {
if (req->realtime_prioq_idx != PRIOQ_IDX_NULL)
- assert_se(prioq_remove(req->server->sync_req_realtime_prioq, req, &req->realtime_prioq_idx) > 0);
+ assert_se(prioq_remove(req->manager->sync_req_realtime_prioq, req, &req->realtime_prioq_idx) > 0);
if (req->boottime_prioq_idx != PRIOQ_IDX_NULL)
- assert_se(prioq_remove(req->server->sync_req_boottime_prioq, req, &req->boottime_prioq_idx) > 0);
+ assert_se(prioq_remove(req->manager->sync_req_boottime_prioq, req, &req->boottime_prioq_idx) > 0);
if (req->pending_rqlen > 0)
- LIST_REMOVE(pending_rqlen, req->server->sync_req_pending_rqlen, req);
+ LIST_REMOVE(pending_rqlen, req->manager->sync_req_pending_rqlen, req);
}
req->idle_event_source = sd_event_source_disable_unref(req->idle_event_source);
return 1;
}
-int sync_req_new(Server *s, sd_varlink *link, SyncReq **ret) {
+int sync_req_new(Manager *m, sd_varlink *link, SyncReq **ret) {
int r;
- assert(s);
+ assert(m);
assert(link);
assert(ret);
return -ENOMEM;
*req = (SyncReq) {
- .server = s,
+ .manager = m,
.link = sd_varlink_ref(link),
.realtime_prioq_idx = PRIOQ_IDX_NULL,
.boottime_prioq_idx = PRIOQ_IDX_NULL,
req->realtime = now(CLOCK_REALTIME);
req->boottime = now(CLOCK_BOOTTIME);
- if (s->native_event_source || s->syslog_event_source) {
- r = prioq_ensure_put(&s->sync_req_realtime_prioq, sync_req_realtime_compare, req, &req->realtime_prioq_idx);
+ if (m->native_event_source || m->syslog_event_source) {
+ r = prioq_ensure_put(&m->sync_req_realtime_prioq, sync_req_realtime_compare, req, &req->realtime_prioq_idx);
if (r < 0)
return r;
}
- if (s->dev_kmsg_event_source) {
- r = prioq_ensure_put(&s->sync_req_boottime_prioq, sync_req_boottime_compare, req, &req->boottime_prioq_idx);
+ if (m->dev_kmsg_event_source) {
+ r = prioq_ensure_put(&m->sync_req_boottime_prioq, sync_req_boottime_compare, req, &req->boottime_prioq_idx);
if (r < 0)
return r;
}
- r = sd_event_add_defer(s->event, &req->idle_event_source, on_idle, req);
+ r = sd_event_add_defer(m->event, &req->idle_event_source, on_idle, req);
if (r < 0)
return r;
/* Now determine the pending byte counter for each stdout stream. If non-zero allocate a
* StreamSyncReq for the stream to keep track of it */
- LIST_FOREACH(stdout_stream, ss, s->stdout_streams) {
+ LIST_FOREACH(stdout_stream, ss, m->stdout_streams) {
r = sync_req_add_stream(req, ss);
if (r < 0)
return r;
/* Also track how many pending, incoming stream sockets there are currently, so that we process them
* too */
- r = af_unix_get_qlen(s->stdout_fd, &req->pending_rqlen);
+ r = af_unix_get_qlen(m->stdout_fd, &req->pending_rqlen);
if (r < 0)
log_warning_errno(r, "Failed to determine current incoming queue length, ignoring: %m");
if (req->pending_rqlen > 0)
- LIST_PREPEND(pending_rqlen, s->sync_req_pending_rqlen, req);
+ LIST_PREPEND(pending_rqlen, m->sync_req_pending_rqlen, req);
*ret = TAKE_PTR(req);
return 0;
/* If there are no more connections to wait for, remove us from the list of synchronization
* requests with non-zero pending connection counters */
if (n == 0)
- LIST_REMOVE(pending_rqlen, req->server->sync_req_pending_rqlen, req);
+ LIST_REMOVE(pending_rqlen, req->manager->sync_req_pending_rqlen, req);
}
req->pending_rqlen = n;
sync_req_revalidate(req);
}
-void server_notify_stream(Server *s, StdoutStream *ss) {
+void manager_notify_stream(Manager *m, StdoutStream *ss) {
int r;
- assert(s);
+ assert(m);
/* Invoked whenever a new connection was accept()ed, i.e. dropped off the queue of pending incoming
* connections. */
- if (!s->sync_req_pending_rqlen)
+ if (!m->sync_req_pending_rqlen)
return;
uint32_t current_qlen;
- r = af_unix_get_qlen(s->stdout_fd, ¤t_qlen);
+ r = af_unix_get_qlen(m->stdout_fd, ¤t_qlen);
if (r < 0) {
log_warning_errno(r, "Failed to determine current AF_UNIX stream socket pending connections, ignoring: %m");
current_qlen = UINT32_MAX;
}
- LIST_FOREACH(pending_rqlen, sr, s->sync_req_pending_rqlen)
+ LIST_FOREACH(pending_rqlen, sr, m->sync_req_pending_rqlen)
/* NB: this might invalidate the SyncReq object! */
sync_req_advance_rqlen_revalidate(sr, current_qlen, ss);
}
return true;
}
-void sync_req_revalidate_by_timestamp(Server *s) {
- assert(s);
+void sync_req_revalidate_by_timestamp(Manager *m) {
+ assert(m);
/* Go through the pending sync requests by timestamp, and complete those for which a sync is now
* complete. */
SyncReq *req;
- while ((req = prioq_peek(s->sync_req_realtime_prioq)))
+ while ((req = prioq_peek(m->sync_req_realtime_prioq)))
if (!sync_req_revalidate(req))
break;
- while ((req = prioq_peek(s->sync_req_boottime_prioq)))
+ while ((req = prioq_peek(m->sync_req_boottime_prioq)))
if (!sync_req_revalidate(req))
break;
}
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#pragma once
-typedef struct Server Server;
+typedef struct Manager Manager;
typedef struct StreamSyncReq StreamSyncReq;
typedef struct SyncReq SyncReq;
/* Encapsulates a synchronization request */
struct SyncReq {
- Server *server;
+ Manager *manager;
sd_varlink *link;
bool offline; /* if true, we'll offline the journal files after sync is complete */
DEFINE_TRIVIAL_CLEANUP_FUNC(StreamSyncReq*, stream_sync_req_free);
void stream_sync_req_advance_revalidate(StreamSyncReq *ssr, size_t p);
-int sync_req_new(Server *s, sd_varlink *link, SyncReq **ret);
+int sync_req_new(Manager *m, sd_varlink *link, SyncReq **ret);
SyncReq* sync_req_free(SyncReq *req);
DEFINE_TRIVIAL_CLEANUP_FUNC(SyncReq*, sync_req_free);
bool sync_req_revalidate(SyncReq *req);
-void sync_req_revalidate_by_timestamp(Server *s);
+void sync_req_revalidate_by_timestamp(Manager *m);
-void server_notify_stream(Server *s, StdoutStream *ss);
+void manager_notify_stream(Manager *m, StdoutStream *ss);
#include "journald-client.h"
#include "journald-console.h"
#include "journald-kmsg.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-syslog.h"
#include "journald-wall.h"
#include "process-util.h"
#define WARN_FORWARD_SYSLOG_MISSED_USEC (30 * USEC_PER_SEC)
static void forward_syslog_iovec(
- Server *s,
+ Manager *m,
const struct iovec *iovec,
unsigned n_iovec,
const struct ucred *ucred,
const char *j;
int r;
- assert(s);
+ assert(m);
assert(iovec);
assert(n_iovec > 0);
- j = strjoina(s->runtime_directory, "/syslog");
+ j = strjoina(m->runtime_directory, "/syslog");
r = sockaddr_un_set_path(&sa.un, j);
if (r < 0) {
log_debug_errno(r, "Forwarding socket path %s too long for AF_UNIX, not forwarding: %m", j);
/* Forward the syslog message we received via /dev/log to /run/systemd/syslog. Unfortunately we
* currently can't set the SO_TIMESTAMP auxiliary data, and hence we don't. */
- if (sendmsg(s->syslog_fd, &msghdr, MSG_NOSIGNAL) >= 0)
+ if (sendmsg(m->syslog_fd, &msghdr, MSG_NOSIGNAL) >= 0)
return;
/* The socket is full? I guess the syslog implementation is
* too slow, and we shouldn't wait for that... */
if (errno == EAGAIN) {
- s->n_forward_syslog_missed++;
+ m->n_forward_syslog_missed++;
return;
}
u.pid = getpid_cached();
memcpy(CMSG_DATA(cmsg), &u, sizeof(struct ucred));
- if (sendmsg(s->syslog_fd, &msghdr, MSG_NOSIGNAL) >= 0)
+ if (sendmsg(m->syslog_fd, &msghdr, MSG_NOSIGNAL) >= 0)
return;
if (errno == EAGAIN) {
- s->n_forward_syslog_missed++;
+ m->n_forward_syslog_missed++;
return;
}
}
log_debug_errno(errno, "Failed to forward syslog message: %m");
}
-static void forward_syslog_raw(Server *s, int priority, const char *buffer, size_t buffer_len, const struct ucred *ucred, const struct timeval *tv) {
+static void forward_syslog_raw(
+ Manager *m,
+ int priority,
+ const char *buffer,
+ size_t buffer_len,
+ const struct ucred *ucred,
+ const struct timeval *tv) {
+
struct iovec iovec;
- assert(s);
+ assert(m);
assert(buffer);
- if (LOG_PRI(priority) > s->max_level_syslog)
+ if (LOG_PRI(priority) > m->max_level_syslog)
return;
iovec = IOVEC_MAKE((char *) buffer, buffer_len);
- forward_syslog_iovec(s, &iovec, 1, ucred, tv);
+ forward_syslog_iovec(m, &iovec, 1, ucred, tv);
}
-void server_forward_syslog(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred, const struct timeval *tv) {
+void manager_forward_syslog(
+ Manager *m,
+ int priority,
+ const char *identifier,
+ const char *message,
+ const struct ucred *ucred,
+ const struct timeval *tv) {
+
struct iovec iovec[5];
char header_priority[DECIMAL_STR_MAX(priority) + 3], header_time[64],
header_pid[STRLEN("[]: ") + DECIMAL_STR_MAX(pid_t) + 1];
struct tm tm;
_cleanup_free_ char *ident_buf = NULL;
- assert(s);
+ assert(m);
assert(priority >= 0);
assert(priority <= 999);
assert(message);
- if (LOG_PRI(priority) > s->max_level_syslog)
+ if (LOG_PRI(priority) > m->max_level_syslog)
return;
/* First: priority field */
/* Fourth: message */
iovec[n++] = IOVEC_MAKE_STRING(message);
- forward_syslog_iovec(s, iovec, n, ucred, tv);
+ forward_syslog_iovec(m, iovec, n, ucred, tv);
}
int syslog_fixup_facility(int priority) {
return p - t;
}
-void server_process_syslog_message(
- Server *s,
+void manager_process_syslog_message(
+ Manager *m,
const char *buf,
size_t raw_len,
const struct ucred *ucred,
int priority = LOG_USER | LOG_INFO, r;
ClientContext *context = NULL;
struct iovec *iovec;
- size_t n = 0, m, i, leading_ws, syslog_ts_len;
+ size_t n = 0, mm, i, leading_ws, syslog_ts_len;
bool store_raw;
- assert(s);
+ assert(m);
assert(buf);
/* The message cannot be empty. */
assert(raw_len > 0);
assert(buf[raw_len] == '\0');
if (ucred && pid_is_valid(ucred->pid)) {
- r = client_context_get(s, ucred->pid, ucred, label, label_len, NULL, &context);
+ r = client_context_get(m, ucred->pid, ucred, label, label_len, NULL, &context);
if (r < 0)
log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
"Failed to retrieve credentials for PID " PID_FMT ", ignoring: %m",
syslog_parse_identifier(&msg, &identifier, &pid);
- if (s->forward_to_syslog)
- forward_syslog_raw(s, priority, buf, raw_len, ucred, tv);
+ if (m->forward_to_syslog)
+ forward_syslog_raw(m, priority, buf, raw_len, ucred, tv);
- if (s->forward_to_kmsg)
- server_forward_kmsg(s, priority, identifier, msg, ucred);
+ if (m->forward_to_kmsg)
+ manager_forward_kmsg(m, priority, identifier, msg, ucred);
- if (s->forward_to_console)
- server_forward_console(s, priority, identifier, msg, ucred);
+ if (m->forward_to_console)
+ manager_forward_console(m, priority, identifier, msg, ucred);
- if (s->forward_to_wall)
- server_forward_wall(s, priority, identifier, msg, ucred);
+ if (m->forward_to_wall)
+ manager_forward_wall(m, priority, identifier, msg, ucred);
- m = N_IOVEC_META_FIELDS + 8 + client_context_extra_fields_n_iovec(context);
- iovec = newa(struct iovec, m);
+ mm = N_IOVEC_META_FIELDS + 8 + client_context_extra_fields_n_iovec(context);
+ iovec = newa(struct iovec, mm);
iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=syslog");
iovec[n++] = IOVEC_MAKE(msg_raw, hlen + raw_len);
}
- server_dispatch_message(s, iovec, n, m, context, tv, priority, 0);
+ manager_dispatch_message(m, iovec, n, mm, context, tv, priority, 0);
}
-int server_open_syslog_socket(Server *s, const char *syslog_socket) {
+int manager_open_syslog_socket(Manager *m, const char *syslog_socket) {
int r;
- assert(s);
+ assert(m);
assert(syslog_socket);
- if (s->syslog_fd < 0) {
+ if (m->syslog_fd < 0) {
union sockaddr_union sa;
socklen_t sa_len;
return log_error_errno(r, "Unable to use namespace path %s for AF_UNIX socket: %m", syslog_socket);
sa_len = r;
- s->syslog_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
- if (s->syslog_fd < 0)
+ m->syslog_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (m->syslog_fd < 0)
return log_error_errno(errno, "socket() failed: %m");
(void) sockaddr_un_unlink(&sa.un);
- r = bind(s->syslog_fd, &sa.sa, sa_len);
+ r = bind(m->syslog_fd, &sa.sa, sa_len);
if (r < 0)
return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
(void) chmod(sa.un.sun_path, 0666);
} else
- (void) fd_nonblock(s->syslog_fd, true);
+ (void) fd_nonblock(m->syslog_fd, true);
- r = setsockopt_int(s->syslog_fd, SOL_SOCKET, SO_PASSCRED, true);
+ r = setsockopt_int(m->syslog_fd, SOL_SOCKET, SO_PASSCRED, true);
if (r < 0)
return log_error_errno(r, "SO_PASSCRED failed: %m");
if (mac_selinux_use()) {
- r = setsockopt_int(s->syslog_fd, SOL_SOCKET, SO_PASSSEC, true);
+ r = setsockopt_int(m->syslog_fd, SOL_SOCKET, SO_PASSSEC, true);
if (r < 0)
log_warning_errno(r, "SO_PASSSEC failed: %m");
}
- r = setsockopt_int(s->syslog_fd, SOL_SOCKET, SO_TIMESTAMP, true);
+ r = setsockopt_int(m->syslog_fd, SOL_SOCKET, SO_TIMESTAMP, true);
if (r < 0)
return log_error_errno(r, "SO_TIMESTAMP failed: %m");
- r = sd_event_add_io(s->event, &s->syslog_event_source, s->syslog_fd, EPOLLIN, server_process_datagram, s);
+ r = sd_event_add_io(m->event, &m->syslog_event_source, m->syslog_fd, EPOLLIN, manager_process_datagram, m);
if (r < 0)
- return log_error_errno(r, "Failed to add syslog server fd to event loop: %m");
+ return log_error_errno(r, "Failed to add syslog sevrer fd to event loop: %m");
- r = sd_event_source_set_priority(s->syslog_event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ r = sd_event_source_set_priority(m->syslog_event_source, SD_EVENT_PRIORITY_NORMAL+5);
if (r < 0)
return log_error_errno(r, "Failed to adjust syslog event source priority: %m");
return 0;
}
-void server_maybe_warn_forward_syslog_missed(Server *s) {
+void manager_maybe_warn_forward_syslog_missed(Manager *m) {
usec_t n;
- assert(s);
+ assert(m);
- if (s->n_forward_syslog_missed <= 0)
+ if (m->n_forward_syslog_missed <= 0)
return;
n = now(CLOCK_MONOTONIC);
- if (s->last_warn_forward_syslog_missed + WARN_FORWARD_SYSLOG_MISSED_USEC > n)
+ if (m->last_warn_forward_syslog_missed + WARN_FORWARD_SYSLOG_MISSED_USEC > n)
return;
- server_driver_message(s, 0,
- LOG_MESSAGE_ID(SD_MESSAGE_FORWARD_SYSLOG_MISSED_STR),
- LOG_MESSAGE("Forwarding to syslog missed %u messages.",
- s->n_forward_syslog_missed));
+ manager_driver_message(m, 0,
+ LOG_MESSAGE_ID(SD_MESSAGE_FORWARD_SYSLOG_MISSED_STR),
+ LOG_MESSAGE("Forwarding to syslog missed %u messages.",
+ m->n_forward_syslog_missed));
- s->n_forward_syslog_missed = 0;
- s->last_warn_forward_syslog_missed = n;
+ m->n_forward_syslog_missed = 0;
+ m->last_warn_forward_syslog_missed = n;
}
#include "macro.h"
-typedef struct Server Server;
+typedef struct Manager Manager;
int syslog_fixup_facility(int priority) _const_;
size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid);
-void server_forward_syslog(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred, const struct timeval *tv);
+void manager_forward_syslog(Manager *m, int priority, const char *identifier, const char *message, const struct ucred *ucred, const struct timeval *tv);
-void server_process_syslog_message(Server *s, const char *buf, size_t buf_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len);
-int server_open_syslog_socket(Server *s, const char *syslog_socket);
+void manager_process_syslog_message(Manager *m, const char *buf, size_t buf_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len);
+int manager_open_syslog_socket(Manager *m, const char *syslog_socket);
-void server_maybe_warn_forward_syslog_missed(Server *s);
+void manager_maybe_warn_forward_syslog_missed(Manager *m);
/* SPDX-License-Identifier: LGPL-2.1-or-later */
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-sync.h"
#include "journald-varlink.h"
#include "varlink-io.systemd.Journal.h"
* anymore. */
if (req->offline)
- server_full_sync(req->server, /* wait = */ true);
+ manager_full_sync(req->manager, /* wait = */ true);
/* Disconnect the SyncReq from the Varlink connection object, and free it */
_cleanup_(sd_varlink_unrefp) sd_varlink *vl = TAKE_PTR(req->link);
- sd_varlink_set_userdata(vl, req->server); /* reinstall server object */
+ sd_varlink_set_userdata(vl, req->manager); /* reinstall manager object */
req = sync_req_free(req);
r = sd_varlink_reply(vl, NULL);
{}
};
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
assert(link);
_cleanup_(sync_req_freep) SyncReq *sr = NULL;
- r = sync_req_new(s, link, &sr);
+ r = sync_req_new(m, link, &sr);
if (r < 0)
return r;
}
static int vl_method_rotate(sd_varlink *link, sd_json_variant *parameters, sd_varlink_method_flags_t flags, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
assert(link);
return r;
log_info("Received client request to rotate journal, rotating.");
- server_full_rotate(s);
+ manager_full_rotate(m);
return sd_varlink_reply(link, NULL);
}
static int vl_method_flush_to_var(sd_varlink *link, sd_json_variant *parameters, sd_varlink_method_flags_t flags, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
assert(link);
if (r < 0)
return r;
- if (s->namespace)
+ if (m->namespace)
return sd_varlink_error(link, "io.systemd.Journal.NotSupportedByNamespaces", NULL);
log_info("Received client request to flush runtime journal.");
- server_full_flush(s);
+ manager_full_flush(m);
return sd_varlink_reply(link, NULL);
}
static int vl_method_relinquish_var(sd_varlink *link, sd_json_variant *parameters, sd_varlink_method_flags_t flags, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
int r;
assert(link);
if (r < 0)
return r;
- if (s->namespace)
+ if (m->namespace)
return sd_varlink_error(link, "io.systemd.Journal.NotSupportedByNamespaces", NULL);
- log_info("Received client request to relinquish %s access.", s->system_storage.path);
- server_relinquish_var(s);
+ log_info("Received client request to relinquish %s access.", m->system_storage.path);
+ manager_relinquish_var(m);
return sd_varlink_reply(link, NULL);
}
static int vl_connect(sd_varlink_server *server, sd_varlink *link, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
assert(server);
assert(link);
- (void) server_start_or_stop_idle_timer(s); /* maybe we are no longer idle */
+ (void) manager_start_or_stop_idle_timer(m); /* maybe we are no longer idle */
return 0;
}
static void vl_disconnect(sd_varlink_server *server, sd_varlink *link, void *userdata) {
- Server *s = ASSERT_PTR(userdata);
+ Manager *m = ASSERT_PTR(userdata);
assert(server);
assert(link);
void *u = sd_varlink_get_userdata(link);
- if (u != s) {
+ if (u != m) {
/* If this is a Varlink connection that does not have the Server object as userdata, then it has a SyncReq object instead. Let's finish it. */
SyncReq *req = u;
- sd_varlink_set_userdata(link, s); /* reinstall server object */
+ sd_varlink_set_userdata(link, m); /* reinstall server object */
sync_req_free(req);
}
- (void) server_start_or_stop_idle_timer(s); /* maybe we are idle now */
+ (void) manager_start_or_stop_idle_timer(m); /* maybe we are idle now */
}
-int server_open_varlink(Server *s, const char *socket, int fd) {
+int manager_open_varlink(Manager *m, const char *socket, int fd) {
int r;
- assert(s);
+ assert(m);
r = varlink_server_new(
- &s->varlink_server,
+ &m->varlink_server,
SD_VARLINK_SERVER_ACCOUNT_UID|SD_VARLINK_SERVER_INHERIT_USERDATA,
- s);
+ m);
if (r < 0)
return log_error_errno(r, "Failed to allocate varlink server object: %m");
r = sd_varlink_server_add_interface_many(
- s->varlink_server,
+ m->varlink_server,
&vl_interface_io_systemd_Journal,
&vl_interface_io_systemd_service);
if (r < 0)
return log_error_errno(r, "Failed to add Journal interface to varlink server: %m");
r = sd_varlink_server_bind_method_many(
- s->varlink_server,
+ m->varlink_server,
"io.systemd.Journal.Synchronize", vl_method_synchronize,
"io.systemd.Journal.Rotate", vl_method_rotate,
"io.systemd.Journal.FlushToVar", vl_method_flush_to_var,
if (r < 0)
return r;
- r = sd_varlink_server_bind_connect(s->varlink_server, vl_connect);
+ r = sd_varlink_server_bind_connect(m->varlink_server, vl_connect);
if (r < 0)
return r;
- r = sd_varlink_server_bind_disconnect(s->varlink_server, vl_disconnect);
+ r = sd_varlink_server_bind_disconnect(m->varlink_server, vl_disconnect);
if (r < 0)
return r;
if (fd < 0)
- r = sd_varlink_server_listen_address(s->varlink_server, socket, 0666);
+ r = sd_varlink_server_listen_address(m->varlink_server, socket, 0666);
else
- r = sd_varlink_server_listen_fd(s->varlink_server, fd);
+ r = sd_varlink_server_listen_fd(m->varlink_server, fd);
if (r < 0)
return r;
- r = sd_varlink_server_attach_event(s->varlink_server, s->event, SD_EVENT_PRIORITY_NORMAL);
+ r = sd_varlink_server_attach_event(m->varlink_server, m->event, SD_EVENT_PRIORITY_NORMAL);
if (r < 0)
return r;
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#pragma once
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-sync.h"
-int server_open_varlink(Server *s, const char *socket, int fd);
+int manager_open_varlink(Manager *m, const char *socket, int fd);
void sync_req_varlink_reply(SyncReq *req);
#include "alloc-util.h"
#include "format-util.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-wall.h"
#include "process-util.h"
#include "string-util.h"
#include "wall.h"
-void server_forward_wall(
- Server *s,
+void manager_forward_wall(
+ Manager *m,
int priority,
const char *identifier,
const char *message,
const char *l;
int r;
- assert(s);
+ assert(m);
assert(message);
- if (LOG_PRI(priority) > s->max_level_wall)
+ if (LOG_PRI(priority) > m->max_level_wall)
return;
if (ucred) {
#include <sys/socket.h>
-typedef struct Server Server;
+typedef struct Manager Manager;
-void server_forward_wall(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred);
+void manager_forward_wall(Manager *m, int priority, const char *identifier, const char *message, const struct ucred *ucred);
#include "format-util.h"
#include "journal-authenticate.h"
#include "journald-kmsg.h"
-#include "journald-server.h"
+#include "journald-manager.h"
#include "journald-syslog.h"
#include "main-func.h"
#include "process-util.h"
#include "terminal-util.h"
static int run(int argc, char *argv[]) {
- _cleanup_(server_freep) Server *s = NULL;
+ _cleanup_(manager_freep) Manager *m = NULL;
const char *namespace;
LogTarget log_target;
int r;
sigbus_install();
- r = server_new(&s);
+ r = manager_new(&m);
if (r < 0)
return log_oom();
- r = server_init(s, namespace);
+ r = manager_init(m, namespace);
if (r < 0)
return r;
- server_vacuum(s, /* verbose = */ false);
- server_flush_to_var(s, /* require_flag_file = */ true);
- server_flush_dev_kmsg(s);
+ manager_vacuum(m, /* verbose = */ false);
+ manager_flush_to_var(m, /* require_flag_file = */ true);
+ manager_flush_dev_kmsg(m);
- if (s->namespace)
- log_debug("systemd-journald running as PID "PID_FMT" for namespace '%s'.", getpid_cached(), s->namespace);
+ if (m->namespace)
+ log_debug("systemd-journald running as PID "PID_FMT" for namespace '%s'.", getpid_cached(), m->namespace);
else
log_debug("systemd-journald running as PID "PID_FMT" for the system.", getpid_cached());
- server_driver_message(s, 0,
- LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_START_STR),
- LOG_MESSAGE("Journal started"));
+ manager_driver_message(m, 0,
+ LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_START_STR),
+ LOG_MESSAGE("Journal started"));
/* Make sure to send the usage message *after* flushing the
* journal so entries from the runtime journals are ordered
* before this message. See #4190 for some details. */
- server_space_usage_message(s, NULL);
+ manager_space_usage_message(m, NULL);
for (;;) {
usec_t t, n;
- r = sd_event_get_state(s->event);
+ r = sd_event_get_state(m->event);
if (r < 0)
return log_error_errno(r, "Failed to get event loop state: %m");
if (r == SD_EVENT_FINISHED)
break;
- r = sd_event_now(s->event, CLOCK_REALTIME, &n);
+ r = sd_event_now(m->event, CLOCK_REALTIME, &n);
if (r < 0)
return log_error_errno(r, "Failed to get the current time: %m");
- if (s->max_retention_usec > 0 && s->oldest_file_usec > 0) {
+ if (m->max_retention_usec > 0 && m->oldest_file_usec > 0) {
/* Calculate when to rotate the next time */
- t = usec_sub_unsigned(usec_add(s->oldest_file_usec, s->max_retention_usec), n);
+ t = usec_sub_unsigned(usec_add(m->oldest_file_usec, m->max_retention_usec), n);
/* The retention time is reached, so let's vacuum! */
if (t <= 0) {
log_info("Retention time reached, vacuuming.");
- server_vacuum(s, /* verbose = */ false);
+ manager_vacuum(m, /* verbose = */ false);
continue;
}
} else
t = USEC_INFINITY;
#if HAVE_GCRYPT
- if (s->system_journal) {
+ if (m->system_journal) {
usec_t u;
- if (journal_file_next_evolve_usec(s->system_journal, &u))
+ if (journal_file_next_evolve_usec(m->system_journal, &u))
t = MIN(t, usec_sub_unsigned(u, n));
}
#endif
- r = sd_event_run(s->event, t);
+ r = sd_event_run(m->event, t);
if (r < 0)
return log_error_errno(r, "Failed to run event loop: %m");
- server_maybe_append_tags(s);
- server_maybe_warn_forward_syslog_missed(s);
+ manager_maybe_append_tags(m);
+ manager_maybe_warn_forward_syslog_missed(m);
}
- if (s->namespace)
- log_debug("systemd-journald stopped as PID "PID_FMT" for namespace '%s'.", getpid_cached(), s->namespace);
+ if (m->namespace)
+ log_debug("systemd-journald stopped as PID "PID_FMT" for namespace '%s'.", getpid_cached(), m->namespace);
else
log_debug("systemd-journald stopped as PID "PID_FMT" for the system.", getpid_cached());
- server_driver_message(s, 0,
- LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_STOP_STR),
- LOG_MESSAGE("Journal stopped"));
+ manager_driver_message(m, 0,
+ LOG_MESSAGE_ID(SD_MESSAGE_JOURNAL_STOP_STR),
+ LOG_MESSAGE("Journal stopped"));
return 0;
}
'journald-console.c',
'journald-context.c',
'journald-kmsg.c',
+ 'journald-manager.c',
'journald-native.c',
'journald-rate-limit.c',
- 'journald-server.c',
'journald-socket.c',
'journald-stream.c',
'journald-sync.c',
#include <string.h>
#include <sys/un.h>
-#include "journald-server.h"
+#include "journald-manager.h"
#include "log.h"
#include "path-util.h"
#include "socket-util.h"
/* SPDX-License-Identifier: LGPL-2.1-or-later */
-#include "journald-server.h"
+#include "journald-manager.h"
#include "test-tables.h"
#include "tests.h"