From: Stefan Schantl Date: Wed, 25 Nov 2015 08:07:55 +0000 (+0100) Subject: Rework handling of monitored files. X-Git-Tag: 2.0~68 X-Git-Url: http://git.ipfire.org/?p=people%2Fstevee%2Fguardian.git;a=commitdiff_plain;h=3111df627870b496ce35c30192098db316a64da4 Rework handling of monitored files. From now a hash is used to store which files should be monitored and to store their current cursor position. The entire hash is shared between the main process and the worker threads. A benefit of this is, to keep the current cursor position of each monitored file during thread restarts and the hash is also designed to be re-generated in case of a service reload without loosing any relevant data. Signed-off-by: Stefan Schantl --- diff --git a/guardian b/guardian index fd16b3a..cf759b0 100644 --- a/guardian +++ b/guardian @@ -27,6 +27,7 @@ use Thread::Queue; use Linux::Inotify2; use Time::HiRes qw[ time sleep ]; +require Guardian::Base; require Guardian::Config; require Guardian::Parser; require Guardian::Socket; @@ -36,11 +37,6 @@ use warnings; # Define version. my $version ="2.0"; -# Array to store the monitored logfiles. -my @monitored_files = ( - "/var/log/snort/alert", -); - # Get and store the given command line arguments in a hash. my %cmdargs = (); @@ -69,6 +65,10 @@ if (defined($cmdargs{"help"})) { # Push the may be given config file argument. my %mainsettings = &Guardian::Config::UseConfig($cmdargs{"config"}); +# Shared hash between the main process and all threads. It will store all +# monitored files and their current file position. +my %monitored_files :shared = (); + # Create the main queue. It is used to store and process all events which are # reported and enqueued by the worker threads. my $queue :shared = new Thread::Queue or die "Could not create new, empty queue. $!\n";; @@ -114,6 +114,9 @@ sub Init () { # Setup IPC mechanism via Socket in an own thread. threads->create(\&Socket); + # Generate hash of monitored files. + %monitored_files = &Guardian::Base::GenerateMonitoredFiles(\%mainsettings, \%monitored_files); + # Start worker threads. &StartWorkers(); } @@ -144,9 +147,6 @@ sub Worker ($) { # Signal handler to kill worker. $SIG{'KILL'} = sub { threads->exit(); }; - # Get the fileposition. - my $fileposition = &Init_fileposition("$file"); - # Create inotify watcher. my $watcher = new Linux::Inotify2 or die "Could not use inotify. $!\n"; @@ -163,6 +163,9 @@ sub Worker ($) { if ($watcher->read) { my @message = (); + # Obtain fileposition from hash. + my $fileposition = $monitored_files{$file}; + # Open the file. open (FILE, $file) or die "Could not open $file. $!\n"; @@ -178,8 +181,13 @@ sub Worker ($) { push (@message, $line); } - # Update fileposition. - $fileposition = tell(FILE); + { + # Lock shared hash. + lock(%monitored_files); + + # Update fileposition. + $monitored_files{$file} = tell(FILE); + } # Close file. close(FILE); @@ -262,19 +270,16 @@ sub SignalHandler { # ## Function to start the workers (threads) for all monitored files. # -## This function will loop through the array of monitored files and will +## This function will loop through the hash of monitored files and will ## spawn an own thread based worker for each file. Every created worker will ## be added to the array of running workers. # sub StartWorkers () { - # Loop through the array of which files should be monitored and - # create a worker thread for each single one. - foreach my $monitored_file (@monitored_files) { - # Check if the file exists and is readable. - if (-r "$monitored_file") { - # Create worker thread for the file. - push @running_workers, threads->create(\&Worker,$monitored_file); - } + # Loop through the hash which contains the monitored files and start + # a worker thread for each single one. + foreach my $file (keys %monitored_files) { + # Create worker thread for the file. + push @running_workers, threads->create(\&Worker,$file); } }