1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
6 #include <linux/magic.h>
9 #include <sys/inotify.h>
13 #include "sd-journal.h"
15 #include "alloc-util.h"
18 #include "dirent-util.h"
23 #include "format-util.h"
26 #include "hostname-util.h"
27 #include "id128-util.h"
28 #include "inotify-util.h"
30 #include "journal-def.h"
31 #include "journal-file.h"
32 #include "journal-internal.h"
35 #include "nulstr-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "replace-var.h"
39 #include "stat-util.h"
40 #include "stdio-util.h"
41 #include "string-util.h"
43 #include "syslog-util.h"
45 #define JOURNAL_FILES_MAX 7168
47 #define JOURNAL_FILES_RECHECK_USEC (2 * USEC_PER_SEC)
49 /* The maximum size of variable values we'll expand in catalog entries. We bind this to PATH_MAX for now, as
50 * we want to be able to show all officially valid paths at least */
51 #define REPLACE_VAR_MAX PATH_MAX
53 #define DEFAULT_DATA_THRESHOLD (64*1024)
55 static void remove_file_real(sd_journal
*j
, JournalFile
*f
);
57 static bool journal_pid_changed(sd_journal
*j
) {
60 /* We don't support people creating a journal object and
61 * keeping it around over a fork(). Let's complain. */
63 return j
->original_pid
!= getpid_cached();
66 static int journal_put_error(sd_journal
*j
, int r
, const char *path
) {
67 _cleanup_free_
char *copy
= NULL
;
70 /* Memorize an error we encountered, and store which
71 * file/directory it was generated from. Note that we store
72 * only *one* path per error code, as the error code is the
73 * key into the hashmap, and the path is the value. This means
74 * we keep track only of all error kinds, but not of all error
75 * locations. This has the benefit that the hashmap cannot
78 * We return an error here only if we didn't manage to
79 * memorize the real error. */
90 k
= hashmap_ensure_put(&j
->errors
, NULL
, INT_TO_PTR(r
), copy
);
102 static void detach_location(sd_journal
*j
) {
107 j
->current_file
= NULL
;
108 j
->current_field
= 0;
110 ORDERED_HASHMAP_FOREACH(f
, j
->files
)
111 journal_file_reset_location(f
);
114 static void init_location(Location
*l
, LocationType type
, JournalFile
*f
, Object
*o
) {
116 assert(IN_SET(type
, LOCATION_DISCRETE
, LOCATION_SEEK
));
121 .seqnum
= le64toh(o
->entry
.seqnum
),
122 .seqnum_id
= f
->header
->seqnum_id
,
123 .realtime
= le64toh(o
->entry
.realtime
),
124 .monotonic
= le64toh(o
->entry
.monotonic
),
125 .boot_id
= o
->entry
.boot_id
,
126 .xor_hash
= le64toh(o
->entry
.xor_hash
),
128 .realtime_set
= true,
129 .monotonic_set
= true,
130 .xor_hash_set
= true,
134 static void set_location(sd_journal
*j
, JournalFile
*f
, Object
*o
) {
139 init_location(&j
->current_location
, LOCATION_DISCRETE
, f
, o
);
142 j
->current_field
= 0;
144 /* Let f know its candidate entry was picked. */
145 assert(f
->location_type
== LOCATION_SEEK
);
146 f
->location_type
= LOCATION_DISCRETE
;
149 static int match_is_valid(const void *data
, size_t size
) {
157 if (((char*) data
)[0] == '_' && ((char*) data
)[1] == '_')
161 for (p
= b
; p
< b
+ size
; p
++) {
169 if (*p
>= 'A' && *p
<= 'Z')
172 if (*p
>= '0' && *p
<= '9')
181 static bool same_field(const void *_a
, size_t s
, const void *_b
, size_t t
) {
182 const uint8_t *a
= _a
, *b
= _b
;
185 for (j
= 0; j
< s
&& j
< t
; j
++) {
194 assert_not_reached();
197 static Match
*match_new(Match
*p
, MatchType t
) {
210 LIST_PREPEND(matches
, p
->matches
, m
);
215 static Match
*match_free(Match
*m
) {
219 match_free(m
->matches
);
222 LIST_REMOVE(matches
, m
->parent
->matches
, m
);
228 static Match
*match_free_if_empty(Match
*m
) {
229 if (!m
|| m
->matches
)
232 return match_free(m
);
235 _public_
int sd_journal_add_match(sd_journal
*j
, const void *data
, size_t size
) {
236 Match
*l3
, *l4
, *add_here
= NULL
, *m
= NULL
;
239 assert_return(j
, -EINVAL
);
240 assert_return(!journal_pid_changed(j
), -ECHILD
);
241 assert_return(data
, -EINVAL
);
246 assert_return(match_is_valid(data
, size
), -EINVAL
);
252 * level 4: concrete matches */
255 j
->level0
= match_new(NULL
, MATCH_AND_TERM
);
261 j
->level1
= match_new(j
->level0
, MATCH_OR_TERM
);
267 j
->level2
= match_new(j
->level1
, MATCH_AND_TERM
);
272 assert(j
->level0
->type
== MATCH_AND_TERM
);
273 assert(j
->level1
->type
== MATCH_OR_TERM
);
274 assert(j
->level2
->type
== MATCH_AND_TERM
);
276 /* Old-style Jenkins (unkeyed) hashing only here. We do not cover new-style siphash (keyed) hashing
277 * here, since it's different for each file, and thus can't be pre-calculated in the Match object. */
278 hash
= jenkins_hash64(data
, size
);
280 LIST_FOREACH(matches
, l3
, j
->level2
->matches
) {
281 assert(l3
->type
== MATCH_OR_TERM
);
283 LIST_FOREACH(matches
, l4
, l3
->matches
) {
284 assert(l4
->type
== MATCH_DISCRETE
);
286 /* Exactly the same match already? Then ignore
288 if (l4
->hash
== hash
&&
290 memcmp(l4
->data
, data
, size
) == 0)
293 /* Same field? Then let's add this to this OR term */
294 if (same_field(data
, size
, l4
->data
, l4
->size
)) {
305 add_here
= match_new(j
->level2
, MATCH_OR_TERM
);
310 m
= match_new(add_here
, MATCH_DISCRETE
);
316 m
->data
= memdup(data
, size
);
326 match_free_if_empty(add_here
);
327 j
->level2
= match_free_if_empty(j
->level2
);
328 j
->level1
= match_free_if_empty(j
->level1
);
329 j
->level0
= match_free_if_empty(j
->level0
);
334 _public_
int sd_journal_add_conjunction(sd_journal
*j
) {
335 assert_return(j
, -EINVAL
);
336 assert_return(!journal_pid_changed(j
), -ECHILD
);
344 if (!j
->level1
->matches
)
353 _public_
int sd_journal_add_disjunction(sd_journal
*j
) {
354 assert_return(j
, -EINVAL
);
355 assert_return(!journal_pid_changed(j
), -ECHILD
);
366 if (!j
->level2
->matches
)
373 static char *match_make_string(Match
*m
) {
376 bool enclose
= false;
379 return strdup("none");
381 if (m
->type
== MATCH_DISCRETE
)
382 return cescape_length(m
->data
, m
->size
);
384 LIST_FOREACH(matches
, i
, m
->matches
) {
387 t
= match_make_string(i
);
392 k
= strjoin(p
, m
->type
== MATCH_OR_TERM
? " OR " : " AND ", t
);
407 r
= strjoin("(", p
, ")");
415 char *journal_make_match_string(sd_journal
*j
) {
418 return match_make_string(j
->level0
);
421 _public_
void sd_journal_flush_matches(sd_journal
*j
) {
426 match_free(j
->level0
);
428 j
->level0
= j
->level1
= j
->level2
= NULL
;
433 _pure_
static int compare_with_location(const JournalFile
*f
, const Location
*l
, const JournalFile
*current_file
) {
438 assert(f
->location_type
== LOCATION_SEEK
);
439 assert(IN_SET(l
->type
, LOCATION_DISCRETE
, LOCATION_SEEK
));
441 if (l
->monotonic_set
&&
442 sd_id128_equal(f
->current_boot_id
, l
->boot_id
) &&
444 f
->current_realtime
== l
->realtime
&&
446 f
->current_xor_hash
== l
->xor_hash
&&
448 sd_id128_equal(f
->header
->seqnum_id
, l
->seqnum_id
) &&
449 f
->current_seqnum
== l
->seqnum
&&
454 sd_id128_equal(f
->header
->seqnum_id
, l
->seqnum_id
)) {
456 r
= CMP(f
->current_seqnum
, l
->seqnum
);
461 if (l
->monotonic_set
&&
462 sd_id128_equal(f
->current_boot_id
, l
->boot_id
)) {
464 r
= CMP(f
->current_monotonic
, l
->monotonic
);
469 if (l
->realtime_set
) {
471 r
= CMP(f
->current_realtime
, l
->realtime
);
476 if (l
->xor_hash_set
) {
478 r
= CMP(f
->current_xor_hash
, l
->xor_hash
);
486 static int next_for_match(
490 uint64_t after_offset
,
491 direction_t direction
,
503 if (m
->type
== MATCH_DISCRETE
) {
507 /* If the keyed hash logic is used, we need to calculate the hash fresh per file. Otherwise
508 * we can use what we pre-calculated. */
509 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
510 hash
= journal_file_hash_data(f
, m
->data
, m
->size
);
514 r
= journal_file_find_data_object_with_hash(f
, m
->data
, m
->size
, hash
, &d
, NULL
);
518 return journal_file_move_to_entry_by_offset_for_data(f
, d
, after_offset
, direction
, ret
, offset
);
520 } else if (m
->type
== MATCH_OR_TERM
) {
523 /* Find the earliest match beyond after_offset */
525 LIST_FOREACH(matches
, i
, m
->matches
) {
528 r
= next_for_match(j
, i
, f
, after_offset
, direction
, NULL
, &cp
);
532 if (np
== 0 || (direction
== DIRECTION_DOWN
? cp
< np
: cp
> np
))
540 } else if (m
->type
== MATCH_AND_TERM
) {
541 Match
*i
, *last_moved
;
543 /* Always jump to the next matching entry and repeat
544 * this until we find an offset that matches for all
550 r
= next_for_match(j
, m
->matches
, f
, after_offset
, direction
, NULL
, &np
);
554 assert(direction
== DIRECTION_DOWN
? np
>= after_offset
: np
<= after_offset
);
555 last_moved
= m
->matches
;
557 LIST_LOOP_BUT_ONE(matches
, i
, m
->matches
, last_moved
) {
560 r
= next_for_match(j
, i
, f
, np
, direction
, NULL
, &cp
);
564 assert(direction
== DIRECTION_DOWN
? cp
>= np
: cp
<= np
);
565 if (direction
== DIRECTION_DOWN
? cp
> np
: cp
< np
) {
574 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, np
, &n
);
586 static int find_location_for_match(
590 direction_t direction
,
600 if (m
->type
== MATCH_DISCRETE
) {
604 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
605 hash
= journal_file_hash_data(f
, m
->data
, m
->size
);
609 r
= journal_file_find_data_object_with_hash(f
, m
->data
, m
->size
, hash
, &d
, &dp
);
613 /* FIXME: missing: find by monotonic */
615 if (j
->current_location
.type
== LOCATION_HEAD
)
616 return journal_file_next_entry_for_data(f
, d
, DIRECTION_DOWN
, ret
, offset
);
617 if (j
->current_location
.type
== LOCATION_TAIL
)
618 return journal_file_next_entry_for_data(f
, d
, DIRECTION_UP
, ret
, offset
);
619 if (j
->current_location
.seqnum_set
&& sd_id128_equal(j
->current_location
.seqnum_id
, f
->header
->seqnum_id
))
620 return journal_file_move_to_entry_by_seqnum_for_data(f
, d
, j
->current_location
.seqnum
, direction
, ret
, offset
);
621 if (j
->current_location
.monotonic_set
) {
622 r
= journal_file_move_to_entry_by_monotonic_for_data(f
, d
, j
->current_location
.boot_id
, j
->current_location
.monotonic
, direction
, ret
, offset
);
626 /* The data object might have been invalidated. */
627 r
= journal_file_move_to_object(f
, OBJECT_DATA
, dp
, &d
);
631 if (j
->current_location
.realtime_set
)
632 return journal_file_move_to_entry_by_realtime_for_data(f
, d
, j
->current_location
.realtime
, direction
, ret
, offset
);
634 return journal_file_next_entry_for_data(f
, d
, direction
, ret
, offset
);
636 } else if (m
->type
== MATCH_OR_TERM
) {
641 /* Find the earliest match */
643 LIST_FOREACH(matches
, i
, m
->matches
) {
646 r
= find_location_for_match(j
, i
, f
, direction
, NULL
, &cp
);
650 if (np
== 0 || (direction
== DIRECTION_DOWN
? np
> cp
: np
< cp
))
658 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, np
, &n
);
673 assert(m
->type
== MATCH_AND_TERM
);
675 /* First jump to the last match, and then find the
676 * next one where all matches match */
681 LIST_FOREACH(matches
, i
, m
->matches
) {
684 r
= find_location_for_match(j
, i
, f
, direction
, NULL
, &cp
);
688 if (np
== 0 || (direction
== DIRECTION_DOWN
? cp
> np
: cp
< np
))
692 return next_for_match(j
, m
, f
, np
, direction
, ret
, offset
);
696 static int find_location_with_matches(
699 direction_t direction
,
711 /* No matches is simple */
713 if (j
->current_location
.type
== LOCATION_HEAD
)
714 return journal_file_next_entry(f
, 0, DIRECTION_DOWN
, ret
, offset
);
715 if (j
->current_location
.type
== LOCATION_TAIL
)
716 return journal_file_next_entry(f
, 0, DIRECTION_UP
, ret
, offset
);
717 if (j
->current_location
.seqnum_set
&& sd_id128_equal(j
->current_location
.seqnum_id
, f
->header
->seqnum_id
))
718 return journal_file_move_to_entry_by_seqnum(f
, j
->current_location
.seqnum
, direction
, ret
, offset
);
719 if (j
->current_location
.monotonic_set
) {
720 r
= journal_file_move_to_entry_by_monotonic(f
, j
->current_location
.boot_id
, j
->current_location
.monotonic
, direction
, ret
, offset
);
724 if (j
->current_location
.realtime_set
)
725 return journal_file_move_to_entry_by_realtime(f
, j
->current_location
.realtime
, direction
, ret
, offset
);
727 return journal_file_next_entry(f
, 0, direction
, ret
, offset
);
729 return find_location_for_match(j
, j
->level0
, f
, direction
, ret
, offset
);
732 static int next_with_matches(
735 direction_t direction
,
744 /* No matches is easy. We simple advance the file
747 return journal_file_next_entry(f
, f
->current_offset
, direction
, ret
, offset
);
749 /* If we have a match then we look for the next matching entry
750 * with an offset at least one step larger */
751 return next_for_match(j
, j
->level0
, f
,
752 direction
== DIRECTION_DOWN
? f
->current_offset
+ 1
753 : f
->current_offset
- 1,
754 direction
, ret
, offset
);
757 static int next_beyond_location(sd_journal
*j
, JournalFile
*f
, direction_t direction
) {
759 uint64_t cp
, n_entries
;
765 n_entries
= le64toh(f
->header
->n_entries
);
767 /* If we hit EOF before, we don't need to look into this file again
768 * unless direction changed or new entries appeared. */
769 if (f
->last_direction
== direction
&& f
->location_type
== LOCATION_TAIL
&&
770 n_entries
== f
->last_n_entries
)
773 f
->last_n_entries
= n_entries
;
775 if (f
->last_direction
== direction
&& f
->current_offset
> 0) {
776 /* LOCATION_SEEK here means we did the work in a previous
777 * iteration and the current location already points to a
778 * candidate entry. */
779 if (f
->location_type
!= LOCATION_SEEK
) {
780 r
= next_with_matches(j
, f
, direction
, &c
, &cp
);
784 journal_file_save_location(f
, c
, cp
);
787 f
->last_direction
= direction
;
789 r
= find_location_with_matches(j
, f
, direction
, &c
, &cp
);
793 journal_file_save_location(f
, c
, cp
);
796 /* OK, we found the spot, now let's advance until an entry
797 * that is actually different from what we were previously
798 * looking at. This is necessary to handle entries which exist
799 * in two (or more) journal files, and which shall all be
800 * suppressed but one. */
805 if (j
->current_location
.type
== LOCATION_DISCRETE
) {
808 k
= compare_with_location(f
, &j
->current_location
, j
->current_file
);
810 found
= direction
== DIRECTION_DOWN
? k
> 0 : k
< 0;
817 r
= next_with_matches(j
, f
, direction
, &c
, &cp
);
821 journal_file_save_location(f
, c
, cp
);
825 static int real_journal_next(sd_journal
*j
, direction_t direction
) {
826 JournalFile
*new_file
= NULL
;
832 assert_return(j
, -EINVAL
);
833 assert_return(!journal_pid_changed(j
), -ECHILD
);
835 r
= iterated_cache_get(j
->files_cache
, NULL
, &files
, &n_files
);
839 for (i
= 0; i
< n_files
; i
++) {
840 JournalFile
*f
= (JournalFile
*)files
[i
];
843 r
= next_beyond_location(j
, f
, direction
);
845 log_debug_errno(r
, "Can't iterate through %s, ignoring: %m", f
->path
);
846 remove_file_real(j
, f
);
849 f
->location_type
= LOCATION_TAIL
;
858 k
= journal_file_compare_locations(f
, new_file
);
860 found
= direction
== DIRECTION_DOWN
? k
< 0 : k
> 0;
870 r
= journal_file_move_to_object(new_file
, OBJECT_ENTRY
, new_file
->current_offset
, &o
);
874 set_location(j
, new_file
, o
);
879 _public_
int sd_journal_next(sd_journal
*j
) {
880 return real_journal_next(j
, DIRECTION_DOWN
);
883 _public_
int sd_journal_previous(sd_journal
*j
) {
884 return real_journal_next(j
, DIRECTION_UP
);
887 static int real_journal_next_skip(sd_journal
*j
, direction_t direction
, uint64_t skip
) {
890 assert_return(j
, -EINVAL
);
891 assert_return(!journal_pid_changed(j
), -ECHILD
);
892 assert_return(skip
<= INT_MAX
, -ERANGE
);
895 /* If this is not a discrete skip, then at least
896 * resolve the current location */
897 if (j
->current_location
.type
!= LOCATION_DISCRETE
) {
898 r
= real_journal_next(j
, direction
);
907 r
= real_journal_next(j
, direction
);
921 _public_
int sd_journal_next_skip(sd_journal
*j
, uint64_t skip
) {
922 return real_journal_next_skip(j
, DIRECTION_DOWN
, skip
);
925 _public_
int sd_journal_previous_skip(sd_journal
*j
, uint64_t skip
) {
926 return real_journal_next_skip(j
, DIRECTION_UP
, skip
);
929 _public_
int sd_journal_get_cursor(sd_journal
*j
, char **cursor
) {
933 assert_return(j
, -EINVAL
);
934 assert_return(!journal_pid_changed(j
), -ECHILD
);
935 assert_return(cursor
, -EINVAL
);
937 if (!j
->current_file
|| j
->current_file
->current_offset
<= 0)
938 return -EADDRNOTAVAIL
;
940 r
= journal_file_move_to_object(j
->current_file
, OBJECT_ENTRY
, j
->current_file
->current_offset
, &o
);
945 "s=%s;i=%"PRIx64
";b=%s;m=%"PRIx64
";t=%"PRIx64
";x=%"PRIx64
,
946 SD_ID128_TO_STRING(j
->current_file
->header
->seqnum_id
), le64toh(o
->entry
.seqnum
),
947 SD_ID128_TO_STRING(o
->entry
.boot_id
), le64toh(o
->entry
.monotonic
),
948 le64toh(o
->entry
.realtime
),
949 le64toh(o
->entry
.xor_hash
)) < 0)
955 _public_
int sd_journal_seek_cursor(sd_journal
*j
, const char *cursor
) {
956 unsigned long long seqnum
, monotonic
, realtime
, xor_hash
;
957 bool seqnum_id_set
= false,
960 monotonic_set
= false,
961 realtime_set
= false,
962 xor_hash_set
= false;
963 sd_id128_t seqnum_id
, boot_id
;
966 assert_return(j
, -EINVAL
);
967 assert_return(!journal_pid_changed(j
), -ECHILD
);
968 assert_return(!isempty(cursor
), -EINVAL
);
970 for (const char *p
= cursor
;;) {
971 _cleanup_free_
char *word
= NULL
;
973 r
= extract_first_word(&p
, &word
, ";", EXTRACT_DONT_COALESCE_SEPARATORS
);
979 if (word
[0] == '\0' || word
[1] != '=')
984 seqnum_id_set
= true;
985 r
= sd_id128_from_string(word
+ 2, &seqnum_id
);
992 if (sscanf(word
+ 2, "%llx", &seqnum
) != 1)
998 r
= sd_id128_from_string(word
+ 2, &boot_id
);
1002 monotonic_set
= true;
1003 if (sscanf(word
+ 2, "%llx", &monotonic
) != 1)
1008 realtime_set
= true;
1009 if (sscanf(word
+ 2, "%llx", &realtime
) != 1)
1014 xor_hash_set
= true;
1015 if (sscanf(word
+ 2, "%llx", &xor_hash
) != 1)
1021 if ((!seqnum_set
|| !seqnum_id_set
) &&
1022 (!monotonic_set
|| !boot_id_set
) &&
1027 j
->current_location
= (Location
) {
1028 .type
= LOCATION_SEEK
,
1032 j
->current_location
.realtime
= (uint64_t) realtime
;
1033 j
->current_location
.realtime_set
= true;
1036 if (seqnum_set
&& seqnum_id_set
) {
1037 j
->current_location
.seqnum
= (uint64_t) seqnum
;
1038 j
->current_location
.seqnum_id
= seqnum_id
;
1039 j
->current_location
.seqnum_set
= true;
1042 if (monotonic_set
&& boot_id_set
) {
1043 j
->current_location
.monotonic
= (uint64_t) monotonic
;
1044 j
->current_location
.boot_id
= boot_id
;
1045 j
->current_location
.monotonic_set
= true;
1049 j
->current_location
.xor_hash
= (uint64_t) xor_hash
;
1050 j
->current_location
.xor_hash_set
= true;
1056 _public_
int sd_journal_test_cursor(sd_journal
*j
, const char *cursor
) {
1060 assert_return(j
, -EINVAL
);
1061 assert_return(!journal_pid_changed(j
), -ECHILD
);
1062 assert_return(!isempty(cursor
), -EINVAL
);
1064 if (!j
->current_file
|| j
->current_file
->current_offset
<= 0)
1065 return -EADDRNOTAVAIL
;
1067 r
= journal_file_move_to_object(j
->current_file
, OBJECT_ENTRY
, j
->current_file
->current_offset
, &o
);
1072 _cleanup_free_
char *item
= NULL
;
1073 unsigned long long ll
;
1077 r
= extract_first_word(&cursor
, &item
, ";", EXTRACT_DONT_COALESCE_SEPARATORS
);
1084 if (strlen(item
) < 2 || item
[1] != '=')
1090 k
= sd_id128_from_string(item
+2, &id
);
1093 if (!sd_id128_equal(id
, j
->current_file
->header
->seqnum_id
))
1098 if (sscanf(item
+2, "%llx", &ll
) != 1)
1100 if (ll
!= le64toh(o
->entry
.seqnum
))
1105 k
= sd_id128_from_string(item
+2, &id
);
1108 if (!sd_id128_equal(id
, o
->entry
.boot_id
))
1113 if (sscanf(item
+2, "%llx", &ll
) != 1)
1115 if (ll
!= le64toh(o
->entry
.monotonic
))
1120 if (sscanf(item
+2, "%llx", &ll
) != 1)
1122 if (ll
!= le64toh(o
->entry
.realtime
))
1127 if (sscanf(item
+2, "%llx", &ll
) != 1)
1129 if (ll
!= le64toh(o
->entry
.xor_hash
))
1138 _public_
int sd_journal_seek_monotonic_usec(sd_journal
*j
, sd_id128_t boot_id
, uint64_t usec
) {
1139 assert_return(j
, -EINVAL
);
1140 assert_return(!journal_pid_changed(j
), -ECHILD
);
1144 j
->current_location
= (Location
) {
1145 .type
= LOCATION_SEEK
,
1148 .monotonic_set
= true,
1154 _public_
int sd_journal_seek_realtime_usec(sd_journal
*j
, uint64_t usec
) {
1155 assert_return(j
, -EINVAL
);
1156 assert_return(!journal_pid_changed(j
), -ECHILD
);
1160 j
->current_location
= (Location
) {
1161 .type
= LOCATION_SEEK
,
1163 .realtime_set
= true,
1169 _public_
int sd_journal_seek_head(sd_journal
*j
) {
1170 assert_return(j
, -EINVAL
);
1171 assert_return(!journal_pid_changed(j
), -ECHILD
);
1175 j
->current_location
= (Location
) {
1176 .type
= LOCATION_HEAD
,
1182 _public_
int sd_journal_seek_tail(sd_journal
*j
) {
1183 assert_return(j
, -EINVAL
);
1184 assert_return(!journal_pid_changed(j
), -ECHILD
);
1188 j
->current_location
= (Location
) {
1189 .type
= LOCATION_TAIL
,
1195 static void check_network(sd_journal
*j
, int fd
) {
1201 j
->on_network
= fd_is_network_fs(fd
);
1204 static bool file_has_type_prefix(const char *prefix
, const char *filename
) {
1205 const char *full
, *tilded
, *atted
;
1207 full
= strjoina(prefix
, ".journal");
1208 tilded
= strjoina(full
, "~");
1209 atted
= strjoina(prefix
, "@");
1211 return STR_IN_SET(filename
, full
, tilded
) ||
1212 startswith(filename
, atted
);
1215 static bool file_type_wanted(int flags
, const char *filename
) {
1218 if (!endswith(filename
, ".journal") && !endswith(filename
, ".journal~"))
1221 /* no flags set → every type is OK */
1222 if (!(flags
& (SD_JOURNAL_SYSTEM
| SD_JOURNAL_CURRENT_USER
)))
1225 if (flags
& SD_JOURNAL_SYSTEM
&& file_has_type_prefix("system", filename
))
1228 if (flags
& SD_JOURNAL_CURRENT_USER
) {
1229 char prefix
[5 + DECIMAL_STR_MAX(uid_t
) + 1];
1231 xsprintf(prefix
, "user-"UID_FMT
, getuid());
1233 if (file_has_type_prefix(prefix
, filename
))
1240 static bool path_has_prefix(sd_journal
*j
, const char *path
, const char *prefix
) {
1245 if (j
->toplevel_fd
>= 0)
1248 return path_startswith(path
, prefix
);
1251 static void track_file_disposition(sd_journal
*j
, JournalFile
*f
) {
1255 if (!j
->has_runtime_files
&& path_has_prefix(j
, f
->path
, "/run"))
1256 j
->has_runtime_files
= true;
1257 else if (!j
->has_persistent_files
&& path_has_prefix(j
, f
->path
, "/var"))
1258 j
->has_persistent_files
= true;
1261 static const char *skip_slash(const char *p
) {
1272 static int add_any_file(
1277 bool close_fd
= false;
1283 assert(fd
>= 0 || path
);
1286 if (j
->toplevel_fd
>= 0)
1287 /* If there's a top-level fd defined make the path relative, explicitly, since otherwise
1288 * openat() ignores the first argument. */
1290 fd
= openat(j
->toplevel_fd
, skip_slash(path
), O_RDONLY
|O_CLOEXEC
|O_NONBLOCK
);
1292 fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_NONBLOCK
);
1294 r
= log_debug_errno(errno
, "Failed to open journal file %s: %m", path
);
1300 r
= fd_nonblock(fd
, false);
1302 r
= log_debug_errno(errno
, "Failed to turn off O_NONBLOCK for %s: %m", path
);
1307 if (fstat(fd
, &st
) < 0) {
1308 r
= log_debug_errno(errno
, "Failed to fstat file '%s': %m", path
);
1312 r
= stat_verify_regular(&st
);
1314 log_debug_errno(r
, "Refusing to open '%s', as it is not a regular file.", path
);
1318 f
= ordered_hashmap_get(j
->files
, path
);
1320 if (f
->last_stat
.st_dev
== st
.st_dev
&&
1321 f
->last_stat
.st_ino
== st
.st_ino
) {
1323 /* We already track this file, under the same path and with the same device/inode numbers, it's
1324 * hence really the same. Mark this file as seen in this generation. This is used to GC old
1325 * files in process_q_overflow() to detect journal files that are still there and discern them
1326 * from those which are gone. */
1328 f
->last_seen_generation
= j
->generation
;
1333 /* So we tracked a file under this name, but it has a different inode/device. In that case, it got
1334 * replaced (probably due to rotation?), let's drop it hence from our list. */
1335 remove_file_real(j
, f
);
1339 if (ordered_hashmap_size(j
->files
) >= JOURNAL_FILES_MAX
) {
1340 log_debug("Too many open journal files, not adding %s.", path
);
1345 r
= journal_file_open(fd
, path
, O_RDONLY
, 0, false, 0, false, NULL
, j
->mmap
, NULL
, &f
);
1347 log_debug_errno(r
, "Failed to open journal file %s: %m", path
);
1351 /* journal_file_dump(f); */
1353 r
= ordered_hashmap_put(j
->files
, f
->path
, f
);
1355 f
->close_fd
= false; /* make sure journal_file_close() doesn't close the caller's fd (or our own). We'll let the caller do that, or ourselves */
1356 (void) journal_file_close(f
);
1360 close_fd
= false; /* the fd is now owned by the JournalFile object */
1362 f
->last_seen_generation
= j
->generation
;
1364 track_file_disposition(j
, f
);
1365 check_network(j
, f
->fd
);
1367 j
->current_invalidate_counter
++;
1369 log_debug("File %s added.", f
->path
);
1378 k
= journal_put_error(j
, r
, path
);
1386 static int add_file_by_name(
1389 const char *filename
) {
1397 if (j
->no_new_files
)
1400 if (!file_type_wanted(j
->flags
, filename
))
1403 path
= prefix_roota(prefix
, filename
);
1404 return add_any_file(j
, -1, path
);
1407 static void remove_file_by_name(
1410 const char *filename
) {
1419 path
= prefix_roota(prefix
, filename
);
1420 f
= ordered_hashmap_get(j
->files
, path
);
1424 remove_file_real(j
, f
);
1427 static void remove_file_real(sd_journal
*j
, JournalFile
*f
) {
1431 (void) ordered_hashmap_remove(j
->files
, f
->path
);
1433 log_debug("File %s removed.", f
->path
);
1435 if (j
->current_file
== f
) {
1436 j
->current_file
= NULL
;
1437 j
->current_field
= 0;
1440 if (j
->unique_file
== f
) {
1441 /* Jump to the next unique_file or NULL if that one was last */
1442 j
->unique_file
= ordered_hashmap_next(j
->files
, j
->unique_file
->path
);
1443 j
->unique_offset
= 0;
1444 if (!j
->unique_file
)
1445 j
->unique_file_lost
= true;
1448 if (j
->fields_file
== f
) {
1449 j
->fields_file
= ordered_hashmap_next(j
->files
, j
->fields_file
->path
);
1450 j
->fields_offset
= 0;
1451 if (!j
->fields_file
)
1452 j
->fields_file_lost
= true;
1455 (void) journal_file_close(f
);
1457 j
->current_invalidate_counter
++;
1460 static int dirname_is_machine_id(const char *fn
) {
1461 sd_id128_t id
, machine
;
1465 /* Returns true if the specified directory name matches the local machine ID */
1467 r
= sd_id128_get_machine(&machine
);
1471 e
= strchr(fn
, '.');
1475 /* Looks like it has a namespace suffix. Verify that. */
1476 if (!log_namespace_name_valid(e
+ 1))
1479 k
= strndupa_safe(fn
, e
- fn
);
1480 r
= sd_id128_from_string(k
, &id
);
1482 r
= sd_id128_from_string(fn
, &id
);
1486 return sd_id128_equal(id
, machine
);
1489 static int dirname_has_namespace(const char *fn
, const char *namespace) {
1492 /* Returns true if the specified directory name matches the specified namespace */
1494 e
= strchr(fn
, '.');
1501 if (!streq(e
+ 1, namespace))
1504 k
= strndupa_safe(fn
, e
- fn
);
1505 return id128_is_valid(k
);
1511 return id128_is_valid(fn
);
1514 static bool dirent_is_journal_file(const struct dirent
*de
) {
1517 /* Returns true if the specified directory entry looks like a journal file we might be interested in */
1519 if (!IN_SET(de
->d_type
, DT_REG
, DT_LNK
, DT_UNKNOWN
))
1522 return endswith(de
->d_name
, ".journal") ||
1523 endswith(de
->d_name
, ".journal~");
1526 static bool dirent_is_journal_subdir(const struct dirent
*de
) {
1530 /* returns true if the specified directory entry looks like a directory that might contain journal
1531 * files we might be interested in, i.e. is either a 128bit ID or a 128bit ID suffixed by a
1534 if (!IN_SET(de
->d_type
, DT_DIR
, DT_LNK
, DT_UNKNOWN
))
1537 e
= strchr(de
->d_name
, '.');
1539 return id128_is_valid(de
->d_name
); /* No namespace */
1541 n
= strndupa_safe(de
->d_name
, e
- de
->d_name
);
1542 if (!id128_is_valid(n
))
1545 return log_namespace_name_valid(e
+ 1);
1548 static int directory_open(sd_journal
*j
, const char *path
, DIR **ret
) {
1555 if (j
->toplevel_fd
< 0)
1558 /* Open the specified directory relative to the toplevel fd. Enforce that the path specified is
1559 * relative, by dropping the initial slash */
1560 d
= xopendirat(j
->toplevel_fd
, skip_slash(path
), 0);
1568 static int add_directory(sd_journal
*j
, const char *prefix
, const char *dirname
);
1570 static void directory_enumerate(sd_journal
*j
, Directory
*m
, DIR *d
) {
1575 FOREACH_DIRENT_ALL(de
, d
, goto fail
) {
1576 if (dirent_is_journal_file(de
))
1577 (void) add_file_by_name(j
, m
->path
, de
->d_name
);
1579 if (m
->is_root
&& dirent_is_journal_subdir(de
))
1580 (void) add_directory(j
, m
->path
, de
->d_name
);
1585 log_debug_errno(errno
, "Failed to enumerate directory %s, ignoring: %m", m
->path
);
1588 static void directory_watch(sd_journal
*j
, Directory
*m
, int fd
, uint32_t mask
) {
1595 /* Watch this directory if that's enabled and if it not being watched yet. */
1597 if (m
->wd
> 0) /* Already have a watch? */
1599 if (j
->inotify_fd
< 0) /* Not watching at all? */
1602 m
->wd
= inotify_add_watch_fd(j
->inotify_fd
, fd
, mask
);
1604 log_debug_errno(errno
, "Failed to watch journal directory '%s', ignoring: %m", m
->path
);
1608 r
= hashmap_put(j
->directories_by_wd
, INT_TO_PTR(m
->wd
), m
);
1610 log_debug_errno(r
, "Directory '%s' already being watched under a different path, ignoring: %m", m
->path
);
1612 log_debug_errno(r
, "Failed to add watch for journal directory '%s' to hashmap, ignoring: %m", m
->path
);
1613 (void) inotify_rm_watch(j
->inotify_fd
, m
->wd
);
1618 static int add_directory(
1621 const char *dirname
) {
1623 _cleanup_free_
char *path
= NULL
;
1624 _cleanup_closedir_
DIR *d
= NULL
;
1631 /* Adds a journal file directory to watch. If the directory is already tracked this updates the inotify watch
1632 * and reenumerates directory contents */
1634 path
= path_join(prefix
, dirname
);
1640 log_debug("Considering directory '%s'.", path
);
1642 /* We consider everything local that is in a directory for the local machine ID, or that is stored in /run */
1643 if ((j
->flags
& SD_JOURNAL_LOCAL_ONLY
) &&
1644 !((dirname
&& dirname_is_machine_id(dirname
) > 0) || path_has_prefix(j
, path
, "/run")))
1648 (!(FLAGS_SET(j
->flags
, SD_JOURNAL_ALL_NAMESPACES
) ||
1649 dirname_has_namespace(dirname
, j
->namespace) > 0 ||
1650 (FLAGS_SET(j
->flags
, SD_JOURNAL_INCLUDE_DEFAULT_NAMESPACE
) && dirname_has_namespace(dirname
, NULL
) > 0))))
1653 r
= directory_open(j
, path
, &d
);
1655 log_debug_errno(r
, "Failed to open directory '%s': %m", path
);
1659 m
= hashmap_get(j
->directories_by_path
, path
);
1661 m
= new(Directory
, 1);
1672 if (hashmap_put(j
->directories_by_path
, m
->path
, m
) < 0) {
1678 path
= NULL
; /* avoid freeing in cleanup */
1679 j
->current_invalidate_counter
++;
1681 log_debug("Directory %s added.", m
->path
);
1683 } else if (m
->is_root
)
1684 return 0; /* Don't 'downgrade' from root directory */
1686 m
->last_seen_generation
= j
->generation
;
1688 directory_watch(j
, m
, dirfd(d
),
1689 IN_CREATE
|IN_MOVED_TO
|IN_MODIFY
|IN_ATTRIB
|IN_DELETE
|
1690 IN_DELETE_SELF
|IN_MOVE_SELF
|IN_UNMOUNT
|IN_MOVED_FROM
|
1693 if (!j
->no_new_files
)
1694 directory_enumerate(j
, m
, d
);
1696 check_network(j
, dirfd(d
));
1701 k
= journal_put_error(j
, r
, path
?: prefix
);
1708 static int add_root_directory(sd_journal
*j
, const char *p
, bool missing_ok
) {
1710 _cleanup_closedir_
DIR *d
= NULL
;
1716 /* Adds a root directory to our set of directories to use. If the root directory is already in the set, we
1717 * update the inotify logic, and renumerate the directory entries. This call may hence be called to initially
1718 * populate the set, as well as to update it later. */
1721 /* If there's a path specified, use it. */
1723 log_debug("Considering root directory '%s'.", p
);
1725 if ((j
->flags
& SD_JOURNAL_RUNTIME_ONLY
) &&
1726 !path_has_prefix(j
, p
, "/run"))
1730 p
= strjoina(j
->prefix
, p
);
1732 r
= directory_open(j
, p
, &d
);
1733 if (r
== -ENOENT
&& missing_ok
)
1736 log_debug_errno(r
, "Failed to open root directory %s: %m", p
);
1740 _cleanup_close_
int dfd
= -1;
1742 /* If there's no path specified, then we use the top-level fd itself. We duplicate the fd here, since
1743 * opendir() will take possession of the fd, and close it, which we don't want. */
1745 p
= "."; /* store this as "." in the directories hashmap */
1747 dfd
= fcntl(j
->toplevel_fd
, F_DUPFD_CLOEXEC
, 3);
1753 d
= take_fdopendir(&dfd
);
1762 m
= hashmap_get(j
->directories_by_path
, p
);
1764 m
= new0(Directory
, 1);
1772 m
->path
= strdup(p
);
1779 if (hashmap_put(j
->directories_by_path
, m
->path
, m
) < 0) {
1786 j
->current_invalidate_counter
++;
1788 log_debug("Root directory %s added.", m
->path
);
1790 } else if (!m
->is_root
)
1793 directory_watch(j
, m
, dirfd(d
),
1794 IN_CREATE
|IN_MOVED_TO
|IN_MODIFY
|IN_ATTRIB
|IN_DELETE
|
1797 if (!j
->no_new_files
)
1798 directory_enumerate(j
, m
, d
);
1800 check_network(j
, dirfd(d
));
1805 k
= journal_put_error(j
, r
, p
);
1812 static void remove_directory(sd_journal
*j
, Directory
*d
) {
1816 hashmap_remove(j
->directories_by_wd
, INT_TO_PTR(d
->wd
));
1818 if (j
->inotify_fd
>= 0)
1819 (void) inotify_rm_watch(j
->inotify_fd
, d
->wd
);
1822 hashmap_remove(j
->directories_by_path
, d
->path
);
1825 log_debug("Root directory %s removed.", d
->path
);
1827 log_debug("Directory %s removed.", d
->path
);
1833 static int add_search_paths(sd_journal
*j
) {
1835 static const char search_paths
[] =
1836 "/run/log/journal\0"
1837 "/var/log/journal\0";
1842 /* We ignore most errors here, since the idea is to only open
1843 * what's actually accessible, and ignore the rest. */
1845 NULSTR_FOREACH(p
, search_paths
)
1846 (void) add_root_directory(j
, p
, true);
1848 if (!(j
->flags
& SD_JOURNAL_LOCAL_ONLY
))
1849 (void) add_root_directory(j
, "/var/log/journal/remote", true);
1854 static int add_current_paths(sd_journal
*j
) {
1858 assert(j
->no_new_files
);
1860 /* Simply adds all directories for files we have open as directories. We don't expect errors here, so we
1861 * treat them as fatal. */
1863 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
1864 _cleanup_free_
char *dir
= NULL
;
1867 dir
= dirname_malloc(f
->path
);
1871 r
= add_directory(j
, dir
, NULL
);
1879 static int allocate_inotify(sd_journal
*j
) {
1882 if (j
->inotify_fd
< 0) {
1883 j
->inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
1884 if (j
->inotify_fd
< 0)
1888 return hashmap_ensure_allocated(&j
->directories_by_wd
, NULL
);
1891 static sd_journal
*journal_new(int flags
, const char *path
, const char *namespace) {
1892 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
1894 j
= new0(sd_journal
, 1);
1898 j
->original_pid
= getpid_cached();
1899 j
->toplevel_fd
= -1;
1902 j
->data_threshold
= DEFAULT_DATA_THRESHOLD
;
1911 if (flags
& SD_JOURNAL_OS_ROOT
)
1918 j
->namespace = strdup(namespace);
1923 j
->files
= ordered_hashmap_new(&path_hash_ops
);
1927 j
->files_cache
= ordered_hashmap_iterated_cache_new(j
->files
);
1928 j
->directories_by_path
= hashmap_new(&path_hash_ops
);
1929 j
->mmap
= mmap_cache_new();
1930 if (!j
->files_cache
|| !j
->directories_by_path
|| !j
->mmap
)
1936 #define OPEN_ALLOWED_FLAGS \
1937 (SD_JOURNAL_LOCAL_ONLY | \
1938 SD_JOURNAL_RUNTIME_ONLY | \
1939 SD_JOURNAL_SYSTEM | \
1940 SD_JOURNAL_CURRENT_USER | \
1941 SD_JOURNAL_ALL_NAMESPACES | \
1942 SD_JOURNAL_INCLUDE_DEFAULT_NAMESPACE)
1944 _public_
int sd_journal_open_namespace(sd_journal
**ret
, const char *namespace, int flags
) {
1945 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
1948 assert_return(ret
, -EINVAL
);
1949 assert_return((flags
& ~OPEN_ALLOWED_FLAGS
) == 0, -EINVAL
);
1951 j
= journal_new(flags
, NULL
, namespace);
1955 r
= add_search_paths(j
);
1963 _public_
int sd_journal_open(sd_journal
**ret
, int flags
) {
1964 return sd_journal_open_namespace(ret
, NULL
, flags
);
1967 #define OPEN_CONTAINER_ALLOWED_FLAGS \
1968 (SD_JOURNAL_LOCAL_ONLY | SD_JOURNAL_SYSTEM)
1970 _public_
int sd_journal_open_container(sd_journal
**ret
, const char *machine
, int flags
) {
1971 _cleanup_free_
char *root
= NULL
, *class = NULL
;
1972 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
1976 /* This is deprecated, people should use machined's OpenMachineRootDirectory() call instead in
1977 * combination with sd_journal_open_directory_fd(). */
1979 assert_return(machine
, -EINVAL
);
1980 assert_return(ret
, -EINVAL
);
1981 assert_return((flags
& ~OPEN_CONTAINER_ALLOWED_FLAGS
) == 0, -EINVAL
);
1982 assert_return(hostname_is_valid(machine
, 0), -EINVAL
);
1984 p
= strjoina("/run/systemd/machines/", machine
);
1985 r
= parse_env_file(NULL
, p
,
1995 if (!streq_ptr(class, "container"))
1998 j
= journal_new(flags
, root
, NULL
);
2002 r
= add_search_paths(j
);
2010 #define OPEN_DIRECTORY_ALLOWED_FLAGS \
2011 (SD_JOURNAL_OS_ROOT | \
2012 SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER )
2014 _public_
int sd_journal_open_directory(sd_journal
**ret
, const char *path
, int flags
) {
2015 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
2018 assert_return(ret
, -EINVAL
);
2019 assert_return(path
, -EINVAL
);
2020 assert_return((flags
& ~OPEN_DIRECTORY_ALLOWED_FLAGS
) == 0, -EINVAL
);
2022 j
= journal_new(flags
, path
, NULL
);
2026 if (flags
& SD_JOURNAL_OS_ROOT
)
2027 r
= add_search_paths(j
);
2029 r
= add_root_directory(j
, path
, false);
2037 _public_
int sd_journal_open_files(sd_journal
**ret
, const char **paths
, int flags
) {
2038 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
2042 assert_return(ret
, -EINVAL
);
2043 assert_return(flags
== 0, -EINVAL
);
2045 j
= journal_new(flags
, NULL
, NULL
);
2049 STRV_FOREACH(path
, paths
) {
2050 r
= add_any_file(j
, -1, *path
);
2055 j
->no_new_files
= true;
2061 #define OPEN_DIRECTORY_FD_ALLOWED_FLAGS \
2062 (SD_JOURNAL_OS_ROOT | \
2063 SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER )
2065 _public_
int sd_journal_open_directory_fd(sd_journal
**ret
, int fd
, int flags
) {
2066 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
2070 assert_return(ret
, -EINVAL
);
2071 assert_return(fd
>= 0, -EBADF
);
2072 assert_return((flags
& ~OPEN_DIRECTORY_FD_ALLOWED_FLAGS
) == 0, -EINVAL
);
2074 if (fstat(fd
, &st
) < 0)
2077 if (!S_ISDIR(st
.st_mode
))
2080 j
= journal_new(flags
, NULL
, NULL
);
2084 j
->toplevel_fd
= fd
;
2086 if (flags
& SD_JOURNAL_OS_ROOT
)
2087 r
= add_search_paths(j
);
2089 r
= add_root_directory(j
, NULL
, false);
2097 _public_
int sd_journal_open_files_fd(sd_journal
**ret
, int fds
[], unsigned n_fds
, int flags
) {
2099 _cleanup_(sd_journal_closep
) sd_journal
*j
= NULL
;
2103 assert_return(ret
, -EINVAL
);
2104 assert_return(n_fds
> 0, -EBADF
);
2105 assert_return(flags
== 0, -EINVAL
);
2107 j
= journal_new(flags
, NULL
, NULL
);
2111 for (i
= 0; i
< n_fds
; i
++) {
2119 if (fstat(fds
[i
], &st
) < 0) {
2124 r
= stat_verify_regular(&st
);
2128 r
= add_any_file(j
, fds
[i
], NULL
);
2133 j
->no_new_files
= true;
2134 j
->no_inotify
= true;
2140 /* If we fail, make sure we don't take possession of the files we managed to make use of successfully, and they
2142 ORDERED_HASHMAP_FOREACH(f
, j
->files
)
2143 f
->close_fd
= false;
2148 _public_
void sd_journal_close(sd_journal
*j
) {
2154 sd_journal_flush_matches(j
);
2156 ordered_hashmap_free_with_destructor(j
->files
, journal_file_close
);
2157 iterated_cache_free(j
->files_cache
);
2159 while ((d
= hashmap_first(j
->directories_by_path
)))
2160 remove_directory(j
, d
);
2162 while ((d
= hashmap_first(j
->directories_by_wd
)))
2163 remove_directory(j
, d
);
2165 hashmap_free(j
->directories_by_path
);
2166 hashmap_free(j
->directories_by_wd
);
2168 safe_close(j
->inotify_fd
);
2171 mmap_cache_stats_log_debug(j
->mmap
);
2172 mmap_cache_unref(j
->mmap
);
2175 hashmap_free_free(j
->errors
);
2180 free(j
->unique_field
);
2181 free(j
->fields_buffer
);
2185 _public_
int sd_journal_get_realtime_usec(sd_journal
*j
, uint64_t *ret
) {
2190 assert_return(j
, -EINVAL
);
2191 assert_return(!journal_pid_changed(j
), -ECHILD
);
2192 assert_return(ret
, -EINVAL
);
2194 f
= j
->current_file
;
2196 return -EADDRNOTAVAIL
;
2198 if (f
->current_offset
<= 0)
2199 return -EADDRNOTAVAIL
;
2201 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, f
->current_offset
, &o
);
2205 *ret
= le64toh(o
->entry
.realtime
);
2209 _public_
int sd_journal_get_monotonic_usec(sd_journal
*j
, uint64_t *ret
, sd_id128_t
*ret_boot_id
) {
2214 assert_return(j
, -EINVAL
);
2215 assert_return(!journal_pid_changed(j
), -ECHILD
);
2217 f
= j
->current_file
;
2219 return -EADDRNOTAVAIL
;
2221 if (f
->current_offset
<= 0)
2222 return -EADDRNOTAVAIL
;
2224 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, f
->current_offset
, &o
);
2229 *ret_boot_id
= o
->entry
.boot_id
;
2233 r
= sd_id128_get_boot(&id
);
2237 if (!sd_id128_equal(id
, o
->entry
.boot_id
))
2242 *ret
= le64toh(o
->entry
.monotonic
);
2247 static bool field_is_valid(const char *field
) {
2255 if (startswith(field
, "__"))
2258 for (p
= field
; *p
; p
++) {
2263 if (*p
>= 'A' && *p
<= 'Z')
2266 if (*p
>= '0' && *p
<= '9')
2275 _public_
int sd_journal_get_data(sd_journal
*j
, const char *field
, const void **data
, size_t *size
) {
2278 size_t field_length
;
2282 assert_return(j
, -EINVAL
);
2283 assert_return(!journal_pid_changed(j
), -ECHILD
);
2284 assert_return(field
, -EINVAL
);
2285 assert_return(data
, -EINVAL
);
2286 assert_return(size
, -EINVAL
);
2287 assert_return(field_is_valid(field
), -EINVAL
);
2289 f
= j
->current_file
;
2291 return -EADDRNOTAVAIL
;
2293 if (f
->current_offset
<= 0)
2294 return -EADDRNOTAVAIL
;
2296 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, f
->current_offset
, &o
);
2300 field_length
= strlen(field
);
2302 n
= journal_file_entry_n_items(o
);
2303 for (i
= 0; i
< n
; i
++) {
2309 p
= le64toh(o
->entry
.items
[i
].object_offset
);
2310 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &d
);
2311 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
)) {
2312 log_debug_errno(r
, "Entry item %"PRIu64
" data object is bad, skipping over it: %m", i
);
2318 l
= le64toh(d
->object
.size
) - offsetof(Object
, data
.payload
);
2320 compression
= d
->object
.flags
& OBJECT_COMPRESSION_MASK
;
2322 #if HAVE_COMPRESSION
2323 r
= decompress_startswith(compression
,
2325 &f
->compress_buffer
,
2326 field
, field_length
, '=');
2328 log_debug_errno(r
, "Cannot decompress %s object of length %"PRIu64
" at offset "OFSfmt
": %m",
2329 object_compressed_to_string(compression
), l
, p
);
2334 r
= decompress_blob(compression
,
2336 &f
->compress_buffer
, &rsize
,
2341 *data
= f
->compress_buffer
;
2342 *size
= (size_t) rsize
;
2347 return -EPROTONOSUPPORT
;
2349 } else if (l
>= field_length
+1 &&
2350 memcmp(d
->data
.payload
, field
, field_length
) == 0 &&
2351 d
->data
.payload
[field_length
] == '=') {
2355 if ((uint64_t) t
!= l
)
2358 *data
= d
->data
.payload
;
2368 static int return_data(
2372 const void **ret_data
,
2382 l
= le64toh(READ_NOW(o
->object
.size
));
2383 if (l
< offsetof(Object
, data
.payload
))
2385 l
-= offsetof(Object
, data
.payload
);
2387 /* We can't read objects larger than 4G on a 32bit machine */
2389 if ((uint64_t) t
!= l
)
2392 compression
= o
->object
.flags
& OBJECT_COMPRESSION_MASK
;
2394 #if HAVE_COMPRESSION
2398 r
= decompress_blob(
2401 &f
->compress_buffer
, &rsize
,
2407 *ret_data
= f
->compress_buffer
;
2409 *ret_size
= (size_t) rsize
;
2411 return -EPROTONOSUPPORT
;
2415 *ret_data
= o
->data
.payload
;
2423 _public_
int sd_journal_enumerate_data(sd_journal
*j
, const void **data
, size_t *size
) {
2428 assert_return(j
, -EINVAL
);
2429 assert_return(!journal_pid_changed(j
), -ECHILD
);
2430 assert_return(data
, -EINVAL
);
2431 assert_return(size
, -EINVAL
);
2433 f
= j
->current_file
;
2435 return -EADDRNOTAVAIL
;
2437 if (f
->current_offset
<= 0)
2438 return -EADDRNOTAVAIL
;
2440 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, f
->current_offset
, &o
);
2444 for (uint64_t n
= journal_file_entry_n_items(o
); j
->current_field
< n
; j
->current_field
++) {
2447 p
= le64toh(o
->entry
.items
[j
->current_field
].object_offset
);
2448 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
2449 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
)) {
2450 log_debug_errno(r
, "Entry item %"PRIu64
" data object is bad, skipping over it: %m", j
->current_field
);
2456 r
= return_data(j
, f
, o
, data
, size
);
2457 if (r
== -EBADMSG
) {
2458 log_debug("Entry item %"PRIu64
" data payload is bad, skipping over it.", j
->current_field
);
2472 _public_
int sd_journal_enumerate_available_data(sd_journal
*j
, const void **data
, size_t *size
) {
2476 r
= sd_journal_enumerate_data(j
, data
, size
);
2479 if (!JOURNAL_ERRNO_IS_UNAVAILABLE_FIELD(r
))
2481 j
->current_field
++; /* Try with the next field */
2485 _public_
void sd_journal_restart_data(sd_journal
*j
) {
2489 j
->current_field
= 0;
2492 static int reiterate_all_paths(sd_journal
*j
) {
2495 if (j
->no_new_files
)
2496 return add_current_paths(j
);
2498 if (j
->flags
& SD_JOURNAL_OS_ROOT
)
2499 return add_search_paths(j
);
2501 if (j
->toplevel_fd
>= 0)
2502 return add_root_directory(j
, NULL
, false);
2505 return add_root_directory(j
, j
->path
, true);
2507 return add_search_paths(j
);
2510 _public_
int sd_journal_get_fd(sd_journal
*j
) {
2513 assert_return(j
, -EINVAL
);
2514 assert_return(!journal_pid_changed(j
), -ECHILD
);
2517 return -EMEDIUMTYPE
;
2519 if (j
->inotify_fd
>= 0)
2520 return j
->inotify_fd
;
2522 r
= allocate_inotify(j
);
2526 log_debug("Reiterating files to get inotify watches established.");
2528 /* Iterate through all dirs again, to add them to the inotify */
2529 r
= reiterate_all_paths(j
);
2533 return j
->inotify_fd
;
2536 _public_
int sd_journal_get_events(sd_journal
*j
) {
2539 assert_return(j
, -EINVAL
);
2540 assert_return(!journal_pid_changed(j
), -ECHILD
);
2542 fd
= sd_journal_get_fd(j
);
2549 _public_
int sd_journal_get_timeout(sd_journal
*j
, uint64_t *timeout_usec
) {
2552 assert_return(j
, -EINVAL
);
2553 assert_return(!journal_pid_changed(j
), -ECHILD
);
2554 assert_return(timeout_usec
, -EINVAL
);
2556 fd
= sd_journal_get_fd(j
);
2560 if (!j
->on_network
) {
2561 *timeout_usec
= UINT64_MAX
;
2565 /* If we are on the network we need to regularly check for
2566 * changes manually */
2568 *timeout_usec
= j
->last_process_usec
+ JOURNAL_FILES_RECHECK_USEC
;
2572 static void process_q_overflow(sd_journal
*j
) {
2578 /* When the inotify queue overruns we need to enumerate and re-validate all journal files to bring our list
2579 * back in sync with what's on disk. For this we pick a new generation counter value. It'll be assigned to all
2580 * journal files we encounter. All journal files and all directories that don't carry it after reenumeration
2581 * are subject for unloading. */
2583 log_debug("Inotify queue overrun, reiterating everything.");
2586 (void) reiterate_all_paths(j
);
2588 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
2590 if (f
->last_seen_generation
== j
->generation
)
2593 log_debug("File '%s' hasn't been seen in this enumeration, removing.", f
->path
);
2594 remove_file_real(j
, f
);
2597 HASHMAP_FOREACH(m
, j
->directories_by_path
) {
2599 if (m
->last_seen_generation
== j
->generation
)
2602 if (m
->is_root
) /* Never GC root directories */
2605 log_debug("Directory '%s' hasn't been seen in this enumeration, removing.", f
->path
);
2606 remove_directory(j
, m
);
2609 log_debug("Reiteration complete.");
2612 static void process_inotify_event(sd_journal
*j
, const struct inotify_event
*e
) {
2618 if (e
->mask
& IN_Q_OVERFLOW
) {
2619 process_q_overflow(j
);
2623 /* Is this a subdirectory we watch? */
2624 d
= hashmap_get(j
->directories_by_wd
, INT_TO_PTR(e
->wd
));
2626 if (!(e
->mask
& IN_ISDIR
) && e
->len
> 0 &&
2627 (endswith(e
->name
, ".journal") ||
2628 endswith(e
->name
, ".journal~"))) {
2630 /* Event for a journal file */
2632 if (e
->mask
& (IN_CREATE
|IN_MOVED_TO
|IN_MODIFY
|IN_ATTRIB
))
2633 (void) add_file_by_name(j
, d
->path
, e
->name
);
2634 else if (e
->mask
& (IN_DELETE
|IN_MOVED_FROM
|IN_UNMOUNT
))
2635 remove_file_by_name(j
, d
->path
, e
->name
);
2637 } else if (!d
->is_root
&& e
->len
== 0) {
2639 /* Event for a subdirectory */
2641 if (e
->mask
& (IN_DELETE_SELF
|IN_MOVE_SELF
|IN_UNMOUNT
))
2642 remove_directory(j
, d
);
2644 } else if (d
->is_root
&& (e
->mask
& IN_ISDIR
) && e
->len
> 0 && id128_is_valid(e
->name
)) {
2646 /* Event for root directory */
2648 if (e
->mask
& (IN_CREATE
|IN_MOVED_TO
|IN_MODIFY
|IN_ATTRIB
))
2649 (void) add_directory(j
, d
->path
, e
->name
);
2655 if (e
->mask
& IN_IGNORED
)
2658 log_debug("Unexpected inotify event.");
2661 static int determine_change(sd_journal
*j
) {
2666 b
= j
->current_invalidate_counter
!= j
->last_invalidate_counter
;
2667 j
->last_invalidate_counter
= j
->current_invalidate_counter
;
2669 return b
? SD_JOURNAL_INVALIDATE
: SD_JOURNAL_APPEND
;
2672 _public_
int sd_journal_process(sd_journal
*j
) {
2673 bool got_something
= false;
2675 assert_return(j
, -EINVAL
);
2676 assert_return(!journal_pid_changed(j
), -ECHILD
);
2678 if (j
->inotify_fd
< 0) /* We have no inotify fd yet? Then there's noting to process. */
2681 j
->last_process_usec
= now(CLOCK_MONOTONIC
);
2682 j
->last_invalidate_counter
= j
->current_invalidate_counter
;
2685 union inotify_event_buffer buffer
;
2686 struct inotify_event
*e
;
2689 l
= read(j
->inotify_fd
, &buffer
, sizeof(buffer
));
2691 if (ERRNO_IS_TRANSIENT(errno
))
2692 return got_something
? determine_change(j
) : SD_JOURNAL_NOP
;
2697 got_something
= true;
2699 FOREACH_INOTIFY_EVENT(e
, buffer
, l
)
2700 process_inotify_event(j
, e
);
2704 _public_
int sd_journal_wait(sd_journal
*j
, uint64_t timeout_usec
) {
2708 assert_return(j
, -EINVAL
);
2709 assert_return(!journal_pid_changed(j
), -ECHILD
);
2711 if (j
->inotify_fd
< 0) {
2714 /* This is the first invocation, hence create the
2716 r
= sd_journal_get_fd(j
);
2720 /* Server might have done some vacuuming while we weren't watching.
2721 Get rid of the deleted files now so they don't stay around indefinitely. */
2722 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
2723 r
= journal_file_fstat(f
);
2725 remove_file_real(j
, f
);
2727 log_debug_errno(r
,"Failed to fstat() journal file '%s' : %m", f
->path
);
2732 /* The journal might have changed since the context
2733 * object was created and we weren't watching before,
2734 * hence don't wait for anything, and return
2736 return determine_change(j
);
2739 r
= sd_journal_get_timeout(j
, &t
);
2743 if (t
!= UINT64_MAX
) {
2744 t
= usec_sub_unsigned(t
, now(CLOCK_MONOTONIC
));
2746 if (timeout_usec
== UINT64_MAX
|| timeout_usec
> t
)
2751 r
= fd_wait_for_event(j
->inotify_fd
, POLLIN
, timeout_usec
);
2752 } while (r
== -EINTR
);
2757 return sd_journal_process(j
);
2760 _public_
int sd_journal_get_cutoff_realtime_usec(sd_journal
*j
, uint64_t *from
, uint64_t *to
) {
2763 uint64_t fmin
= 0, tmax
= 0;
2766 assert_return(j
, -EINVAL
);
2767 assert_return(!journal_pid_changed(j
), -ECHILD
);
2768 assert_return(from
|| to
, -EINVAL
);
2769 assert_return(from
!= to
, -EINVAL
);
2771 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
2774 r
= journal_file_get_cutoff_realtime_usec(f
, &fr
, &t
);
2787 fmin
= MIN(fr
, fmin
);
2788 tmax
= MAX(t
, tmax
);
2797 return first
? 0 : 1;
2800 _public_
int sd_journal_get_cutoff_monotonic_usec(
2806 uint64_t from
= UINT64_MAX
, to
= UINT64_MAX
;
2811 assert_return(j
, -EINVAL
);
2812 assert_return(!journal_pid_changed(j
), -ECHILD
);
2813 assert_return(ret_from
!= ret_to
, -EINVAL
);
2815 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
2818 r
= journal_file_get_cutoff_monotonic_usec(f
, boot_id
, &ff
, &tt
);
2827 from
= MIN(ff
, from
);
2844 void journal_print_header(sd_journal
*j
) {
2846 bool newline
= false;
2850 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
2856 journal_file_print_header(f
);
2860 _public_
int sd_journal_get_usage(sd_journal
*j
, uint64_t *ret
) {
2864 assert_return(j
, -EINVAL
);
2865 assert_return(!journal_pid_changed(j
), -ECHILD
);
2866 assert_return(ret
, -EINVAL
);
2868 ORDERED_HASHMAP_FOREACH(f
, j
->files
) {
2872 if (fstat(f
->fd
, &st
) < 0)
2875 b
= (uint64_t) st
.st_blocks
;
2876 if (b
> UINT64_MAX
/ 512)
2880 if (sum
> UINT64_MAX
- b
)
2889 _public_
int sd_journal_query_unique(sd_journal
*j
, const char *field
) {
2892 assert_return(j
, -EINVAL
);
2893 assert_return(!journal_pid_changed(j
), -ECHILD
);
2894 assert_return(!isempty(field
), -EINVAL
);
2895 assert_return(field_is_valid(field
), -EINVAL
);
2897 r
= free_and_strdup(&j
->unique_field
, field
);
2901 j
->unique_file
= NULL
;
2902 j
->unique_offset
= 0;
2903 j
->unique_file_lost
= false;
2908 _public_
int sd_journal_enumerate_unique(
2910 const void **ret_data
,
2915 assert_return(j
, -EINVAL
);
2916 assert_return(!journal_pid_changed(j
), -ECHILD
);
2917 assert_return(j
->unique_field
, -EINVAL
);
2919 k
= strlen(j
->unique_field
);
2921 if (!j
->unique_file
) {
2922 if (j
->unique_file_lost
)
2925 j
->unique_file
= ordered_hashmap_first(j
->files
);
2926 if (!j
->unique_file
)
2929 j
->unique_offset
= 0;
2940 /* Proceed to next data object in the field's linked list */
2941 if (j
->unique_offset
== 0) {
2942 r
= journal_file_find_field_object(j
->unique_file
, j
->unique_field
, k
, &o
, NULL
);
2946 j
->unique_offset
= r
> 0 ? le64toh(o
->field
.head_data_offset
) : 0;
2948 r
= journal_file_move_to_object(j
->unique_file
, OBJECT_DATA
, j
->unique_offset
, &o
);
2952 j
->unique_offset
= le64toh(o
->data
.next_field_offset
);
2955 /* We reached the end of the list? Then start again, with the next file */
2956 if (j
->unique_offset
== 0) {
2957 j
->unique_file
= ordered_hashmap_next(j
->files
, j
->unique_file
->path
);
2958 if (!j
->unique_file
)
2964 /* We do not use OBJECT_DATA context here, but OBJECT_UNUSED
2965 * instead, so that we can look at this data object at the same
2966 * time as one on another file */
2967 r
= journal_file_move_to_object(j
->unique_file
, OBJECT_UNUSED
, j
->unique_offset
, &o
);
2971 /* Let's do the type check by hand, since we used 0 context above. */
2972 if (o
->object
.type
!= OBJECT_DATA
)
2973 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2974 "%s:offset " OFSfmt
": object has type %d, expected %d",
2975 j
->unique_file
->path
,
2977 o
->object
.type
, OBJECT_DATA
);
2979 r
= return_data(j
, j
->unique_file
, o
, &odata
, &ol
);
2983 /* Check if we have at least the field name and "=". */
2985 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2986 "%s:offset " OFSfmt
": object has size %zu, expected at least %zu",
2987 j
->unique_file
->path
,
2988 j
->unique_offset
, ol
, k
+ 1);
2990 if (memcmp(odata
, j
->unique_field
, k
) != 0 || ((const char*) odata
)[k
] != '=')
2991 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2992 "%s:offset " OFSfmt
": object does not start with \"%s=\"",
2993 j
->unique_file
->path
,
2997 /* OK, now let's see if we already returned this data object by checking if it exists in the
2998 * earlier traversed files. */
3000 ORDERED_HASHMAP_FOREACH(of
, j
->files
) {
3001 if (of
== j
->unique_file
)
3004 /* Skip this file it didn't have any fields indexed */
3005 if (JOURNAL_HEADER_CONTAINS(of
->header
, n_fields
) && le64toh(of
->header
->n_fields
) <= 0)
3008 /* We can reuse the hash from our current file only on old-style journal files
3009 * without keyed hashes. On new-style files we have to calculate the hash anew, to
3010 * take the per-file hash seed into consideration. */
3011 if (!JOURNAL_HEADER_KEYED_HASH(j
->unique_file
->header
) && !JOURNAL_HEADER_KEYED_HASH(of
->header
))
3012 r
= journal_file_find_data_object_with_hash(of
, odata
, ol
, le64toh(o
->data
.hash
), NULL
, NULL
);
3014 r
= journal_file_find_data_object(of
, odata
, ol
, NULL
, NULL
);
3026 r
= return_data(j
, j
->unique_file
, o
, ret_data
, ret_size
);
3034 _public_
int sd_journal_enumerate_available_unique(sd_journal
*j
, const void **data
, size_t *size
) {
3038 r
= sd_journal_enumerate_unique(j
, data
, size
);
3041 if (!JOURNAL_ERRNO_IS_UNAVAILABLE_FIELD(r
))
3043 /* Try with the next field. sd_journal_enumerate_unique() modifies state, so on the next try
3044 * we will access the next field. */
3048 _public_
void sd_journal_restart_unique(sd_journal
*j
) {
3052 j
->unique_file
= NULL
;
3053 j
->unique_offset
= 0;
3054 j
->unique_file_lost
= false;
3057 _public_
int sd_journal_enumerate_fields(sd_journal
*j
, const char **field
) {
3060 assert_return(j
, -EINVAL
);
3061 assert_return(!journal_pid_changed(j
), -ECHILD
);
3062 assert_return(field
, -EINVAL
);
3064 if (!j
->fields_file
) {
3065 if (j
->fields_file_lost
)
3068 j
->fields_file
= ordered_hashmap_first(j
->files
);
3069 if (!j
->fields_file
)
3072 j
->fields_hash_table_index
= 0;
3073 j
->fields_offset
= 0;
3077 JournalFile
*f
, *of
;
3085 if (j
->fields_offset
== 0) {
3088 /* We are not yet positioned at any field. Let's pick the first one */
3089 r
= journal_file_map_field_hash_table(f
);
3093 m
= le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
);
3095 if (j
->fields_hash_table_index
>= m
) {
3096 /* Reached the end of the hash table, go to the next file. */
3101 j
->fields_offset
= le64toh(f
->field_hash_table
[j
->fields_hash_table_index
].head_hash_offset
);
3103 if (j
->fields_offset
!= 0)
3106 /* Empty hash table bucket, go to next one */
3107 j
->fields_hash_table_index
++;
3111 /* Proceed with next file */
3112 j
->fields_file
= ordered_hashmap_next(j
->files
, f
->path
);
3113 if (!j
->fields_file
) {
3118 j
->fields_offset
= 0;
3119 j
->fields_hash_table_index
= 0;
3124 /* We are already positioned at a field. If so, let's figure out the next field from it */
3126 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, j
->fields_offset
, &o
);
3130 j
->fields_offset
= le64toh(o
->field
.next_hash_offset
);
3131 if (j
->fields_offset
== 0) {
3132 /* Reached the end of the hash table chain */
3133 j
->fields_hash_table_index
++;
3138 /* We use OBJECT_UNUSED here, so that the iterator below doesn't remove our mmap window */
3139 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, j
->fields_offset
, &o
);
3143 /* Because we used OBJECT_UNUSED above, we need to do our type check manually */
3144 if (o
->object
.type
!= OBJECT_FIELD
)
3145 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
3146 "%s:offset " OFSfmt
": object has type %i, expected %i",
3147 f
->path
, j
->fields_offset
,
3148 o
->object
.type
, OBJECT_FIELD
);
3150 sz
= le64toh(o
->object
.size
) - offsetof(Object
, field
.payload
);
3152 /* Let's see if we already returned this field name before. */
3154 ORDERED_HASHMAP_FOREACH(of
, j
->files
) {
3158 /* Skip this file it didn't have any fields indexed */
3159 if (JOURNAL_HEADER_CONTAINS(of
->header
, n_fields
) && le64toh(of
->header
->n_fields
) <= 0)
3162 if (!JOURNAL_HEADER_KEYED_HASH(f
->header
) && !JOURNAL_HEADER_KEYED_HASH(of
->header
))
3163 r
= journal_file_find_field_object_with_hash(of
, o
->field
.payload
, sz
,
3164 le64toh(o
->field
.hash
), NULL
, NULL
);
3166 r
= journal_file_find_field_object(of
, o
->field
.payload
, sz
, NULL
, NULL
);
3178 /* Check if this is really a valid string containing no NUL byte */
3179 if (memchr(o
->field
.payload
, 0, sz
))
3182 if (j
->data_threshold
> 0 && sz
> j
->data_threshold
)
3183 sz
= j
->data_threshold
;
3185 if (!GREEDY_REALLOC(j
->fields_buffer
, sz
+ 1))
3188 memcpy(j
->fields_buffer
, o
->field
.payload
, sz
);
3189 j
->fields_buffer
[sz
] = 0;
3191 if (!field_is_valid(j
->fields_buffer
))
3194 *field
= j
->fields_buffer
;
3199 _public_
void sd_journal_restart_fields(sd_journal
*j
) {
3203 j
->fields_file
= NULL
;
3204 j
->fields_hash_table_index
= 0;
3205 j
->fields_offset
= 0;
3206 j
->fields_file_lost
= false;
3209 _public_
int sd_journal_reliable_fd(sd_journal
*j
) {
3210 assert_return(j
, -EINVAL
);
3211 assert_return(!journal_pid_changed(j
), -ECHILD
);
3213 return !j
->on_network
;
3216 static char *lookup_field(const char *field
, void *userdata
) {
3217 sd_journal
*j
= userdata
;
3225 r
= sd_journal_get_data(j
, field
, &data
, &size
);
3227 size
> REPLACE_VAR_MAX
)
3228 return strdup(field
);
3230 d
= strlen(field
) + 1;
3232 return strndup((const char*) data
+ d
, size
- d
);
3235 _public_
int sd_journal_get_catalog(sd_journal
*j
, char **ret
) {
3239 _cleanup_free_
char *text
= NULL
, *cid
= NULL
;
3243 assert_return(j
, -EINVAL
);
3244 assert_return(!journal_pid_changed(j
), -ECHILD
);
3245 assert_return(ret
, -EINVAL
);
3247 r
= sd_journal_get_data(j
, "MESSAGE_ID", &data
, &size
);
3251 cid
= strndup((const char*) data
+ 11, size
- 11);
3255 r
= sd_id128_from_string(cid
, &id
);
3259 r
= catalog_get(CATALOG_DATABASE
, id
, &text
);
3263 t
= replace_var(text
, lookup_field
, j
);
3271 _public_
int sd_journal_get_catalog_for_message_id(sd_id128_t id
, char **ret
) {
3272 assert_return(ret
, -EINVAL
);
3274 return catalog_get(CATALOG_DATABASE
, id
, ret
);
3277 _public_
int sd_journal_set_data_threshold(sd_journal
*j
, size_t sz
) {
3278 assert_return(j
, -EINVAL
);
3279 assert_return(!journal_pid_changed(j
), -ECHILD
);
3281 j
->data_threshold
= sz
;
3285 _public_
int sd_journal_get_data_threshold(sd_journal
*j
, size_t *sz
) {
3286 assert_return(j
, -EINVAL
);
3287 assert_return(!journal_pid_changed(j
), -ECHILD
);
3288 assert_return(sz
, -EINVAL
);
3290 *sz
= j
->data_threshold
;
3294 _public_
int sd_journal_has_runtime_files(sd_journal
*j
) {
3295 assert_return(j
, -EINVAL
);
3297 return j
->has_runtime_files
;
3300 _public_
int sd_journal_has_persistent_files(sd_journal
*j
) {
3301 assert_return(j
, -EINVAL
);
3303 return j
->has_persistent_files
;