From a7fb5630d1a1eca802d2bf714dde6a50b325a2cf Mon Sep 17 00:00:00 2001 From: ms Date: Wed, 23 Aug 2006 19:19:22 +0000 Subject: [PATCH] Update: * QoS ist funktionsfig (hoffentlich). * "Aktualisieren" aus Log entfernt. * In der header.pl aufgeraeumt. git-svn-id: http://svn.ipfire.org/svn/ipfire/trunk@255 ea5c0bd1-69bd-2848-81d8-4f18e57aeed8 --- config/cfgroot/header.pl | 11 +- config/qos/RRD-func.pl | 197 ++++++++++++++ config/qos/event-func.pl | 137 ++++++++++ config/qos/parse-func.pl | 488 ++++++++++++++++++++++++++++++++++ doc/ChangeLog | 7 + html/cgi-bin/logs.cgi/log.dat | 6 +- html/cgi-bin/qos.cgi | 11 +- make.sh | 2 +- src/ROOTFILES.i386 | 1 + src/misc-progs/qosctrl.c | 41 ++- src/rc.d/rc.updatered | 2 +- src/scripts/qosd | 116 ++++++++ 12 files changed, 982 insertions(+), 37 deletions(-) create mode 100644 config/qos/RRD-func.pl create mode 100644 config/qos/event-func.pl create mode 100644 config/qos/parse-func.pl create mode 100644 src/scripts/qosd diff --git a/config/cfgroot/header.pl b/config/cfgroot/header.pl index 336ac194f1..a8a8e4ad07 100644 --- a/config/cfgroot/header.pl +++ b/config/cfgroot/header.pl @@ -88,11 +88,12 @@ if ( -d "/var/ipfire/langs/${language}/" ) { }; ### Read IPFire Buildversion -$FIREBUILD = "Datei firebuild nicht gefunden\n"; +$FIREBUILD = "File not found: firebuild\n"; if (open(MYFile, "<${swroot}/firebuild")) { $FIREBUILD = ; + chomp($FIREBUILD); $FIREBUILD = "(Build: $FIREBUILD)"; - close(_File); + close(MYFile); }; require "${swroot}/langs/en.pl"; @@ -819,12 +820,6 @@ END ; &showsubsubsection($menu); - - eval { - require 'ipfire-network.pl'; - $supported = check_support(); - warn_unsupported($supported); - }; } sub openpagewithoutmenu { diff --git a/config/qos/RRD-func.pl b/config/qos/RRD-func.pl new file mode 100644 index 0000000000..4642f1333f --- /dev/null +++ b/config/qos/RRD-func.pl @@ -0,0 +1,197 @@ + +########################################## +## +## DESCRIPTION +## +## RRD function for tc-graph. +## Which is part of the ADSL-optimizer. +## +## REQUIRES +## +## +## AUTHOR +## Jesper Dangaard Brouer , d.15/4-2004 +## +## CHANGELOG +## 2004-04-15: Initial version. +## +## $Id: RRD-func.pl,v 1.10 2004/05/27 17:02:12 hawk Exp $ +########################################## + +use RRDs; + +if (not defined $rrd_datadir) { + our $rrd_datadir = "/var/spool/rrdqueues/"; +} + +if (not defined $STEP) { + my $STEP=10; +} + +my $heartbeat=$STEP*2; + +# Update script samples every 10 seconds. +# 24*60*60 = 86400 seconds (== one day) +# 8640 *10 = 86400 seconds (== one day) +# 8640 * 5days = 43200 seconds with 10 sec samples +# +my @rrd_data_sources = + ("-s", $STEP, + "DS:bytes:COUNTER:$heartbeat:0:U", + "DS:bits:COUNTER:$heartbeat:0:U", + "DS:pkts:COUNTER:$heartbeat:0:U", + "DS:dropped:COUNTER:$heartbeat:0:U", + "DS:overlimits:COUNTER:$heartbeat:0:U", + "DS:lended:COUNTER:$heartbeat:0:U", + "DS:borrowed:COUNTER:$heartbeat:0:U", + "DS:giants:COUNTER:$heartbeat:0:U", + "DS:backlog:GAUGE:$heartbeat:0:U", + "RRA:AVERAGE:0.5:1:43200", + "RRA:AVERAGE:0.5:7:8640", + "RRA:AVERAGE:0.5:31:8640", + "RRA:AVERAGE:0.5:372:8640", + "RRA:MAX:0.5:7:8640", + "RRA:MAX:0.5:31:8640", + "RRA:MAX:0.5:372:8640" + ); + + +sub get_filename_rrd($) { + my $class_device = "$_[0]"; + my $filename = "${rrd_datadir}class_${class_device}.rrd"; + return $filename; +} + +sub create_rrdfile($) { + my $class_device = "$_[0]"; + my $filename = get_filename_rrd($class_device); + RRDs::create $filename, @rrd_data_sources; + my $ERROR = RRDs::error; + if ($ERROR) { + my $timestamp = time; + die "$timestamp: ERROR - Unable to create RRDfile \"$filename\": $ERROR\n"; + } +} + +sub format_class_data($) { + my $class = $_[0]; + my ($rrd_template, $rrd_data); + my (@array_template, @array_data); + #print "Ref:". ref($class) ."\n"; + + # Select and correct undef values and key + while ( (my $key, my $value) = each %{$class}) { + # Skip timestamps + if ( ($key eq "last_update") || + ($key eq "file_update") || + ($key =~ /hfsc_/ )) {next} + + push @array_template, $key; + + if ( (not defined $value) || + ("$value" eq "") ) { + $value = "U"; + } + push @array_data, $value; + } + + # Makes a RRD suitable input format + $rrd_template = join(":",@array_template); + $rrd_data = join(":",@array_data); + + return ($rrd_template, $rrd_data); +} + +sub update_rrds { + + my $res=0; + + my @test = keys %classes_data; + if ( $#test <= 0) { + print time, " [update_rrds] WARNING: classes_data empty!\n"; + return "classes_data empty"; + } + + # Find the class_device (keys) in %classes_data + for my $class_device ( keys %classes_data ) { + + if ("last_update" eq "$class_device") {next} + + # Verify file exist (else create it) + my $filename = get_filename_rrd($class_device); + if ( ! -f $filename ) { + print "Creating RRDfile: $filename\n"; + create_rrdfile($class_device); + } + #print "$class_device\n"; + + # Make a RRD suitable input format + my ($rrd_template, $rrd_data) = format_class_data($classes_data{$class_device}); + #print "rrd_template: $rrd_template\n"; + #print "rrd_data: $rrd_data\n"; + + + # WHAT ABOUT: + # $classes_data{$device}{last_update} ???? + my ($tmp, $device) = split /_/, $class_device; + #print "device: $device $classes_data{last_update}{$device} \n"; + if ( (exists $classes_data{last_update}{$device}) ) { + if ((($classes_data{$class_device}{last_update} + $heartbeat) < + $classes_data{last_update}{$device})) { + print "WARNING: the class $class_device was"; + print "not updated in lastrun + heartbeat...\n"; + print "Assuming $class_device is removed,"; + print " thus deleteing from hash table."; +# # ??? MAYBE DELETE THE OLD HASH ??? + $res="Deleting class $class_device"; + for my $key ( keys %{ $classes_data{$class_device} } ) { + delete( $classes_data{$class_device}{$key}); + print " Deleting key: $key from: $class_device \n"; + } + delete $classes_data{$class_device}; + next; + } + } + + # Verifies that it is new data, + # and not old data which already have been updated + # FIXME +# print "$0 FIXME update_rrds \n"; + if ( exists $classes_data{$class_device}{file_update} ) { + if (($classes_data{$class_device}{file_update} >= + $classes_data{$class_device}{last_update})) { + print "Warning ($class_device):"; + print " data already updated... old data or deleted class?\n"; + $res="Old data or deleted class"; + # ??? MAYBE DELETE THE OLD HASH ??? + next; + } + } + + + # Update the RRD file + my $update_time = $classes_data{$class_device}{last_update}; +# print "Updates: $filename time:$update_time\n"; +# print " --template=$rrd_template\n"; +# print " $update_time:$rrd_data\n"; + +# `rrdtool update $filename --template=$rrd_template $update_time:$rrd_data`; + RRDs::update ($filename, "--template=$rrd_template", + "N:$rrd_data"); + + my $ERROR = RRDs::error; + if ($ERROR) { + my $timestamp = time; + print "$timestamp: WARNING - "; + print "Unable to update RRDfile \"$filename\": $ERROR\n"; + $res="Unable to update RRDfile \"$filename\""; + } else { + $classes_data{$class_device}{file_update} = time; + } + } + return $res; +} + + +return 1; + diff --git a/config/qos/event-func.pl b/config/qos/event-func.pl new file mode 100644 index 0000000000..d6ffcd64ae --- /dev/null +++ b/config/qos/event-func.pl @@ -0,0 +1,137 @@ +#!/usr/bin/perl + +########################################## +## +## NAME +## +## DESCRIPTION +## +## Which is part of the ADSL-optimizer. +## +## USAGE / FUNCTIONS +## +## +## +## +## +## REQUIRES +## +## +## AUTHOR +## Jesper Dangaard Brouer , d.21/4-2004 +## +## CHANGELOG +## 2004-04-21: Initial version. +## +## $Id: event-func.pl,v 1.10 2004/08/10 16:05:46 hawk Exp $ +########################################## + +our $event_file_all = "${event_datadir}changes.evt"; +sub get_filename_event($) { + my $class_device = "$_[0]"; + my $filename = "${event_datadir}class_${class_device}.evt"; + return $filename; +} + +sub get_filename_bandwidth_info($) { + my $class_device = "$_[0]"; + my $filename = "${event_datadir}class_${class_device}_bandwidth.evt"; + return $filename; +} + +sub update_event_file($$$) { + my $filename = $_[0]; + my $information = $_[1]; + my $timestamp = $_[2]; + + if ("$information" ne "") { + # Append to file + open( OUTPUT, ">>$filename") + or print "ERROR: Opening/updating event file $filename\n"; + print OUTPUT "$timestamp $information\n"; + close(OUTPUT); + } +} + +sub update_info_file($$$) { + my $filename = $_[0]; + my $information = $_[1]; + my $timestamp = $_[2]; + # Truncate file + open( OUTPUT, ">$filename") + or print "ERROR: Opening/updating info event file $filename\n"; + print OUTPUT "$timestamp $information\n"; + close(OUTPUT); + +} + +sub process_events { + + my @test = keys %classes_info; + if ( $#test < 0) { + print time, " [process_events] WARNING: classes_info empty!\n"; + return "classes_info empty"; + } + + my @bandwidth_items = ( "type", "prio", "rate", "ceil" ); + + my $event_reduced = ""; + my $last_update; + + # Find the class_device (keys) in %classes_info + for my $class_device ( sort keys %classes_info ) { + + if ("$class_device" eq "last_update") {next} + + my $event_class = ""; + my $bandwidth_info = ""; + + # Tests if something has changed + if ((not exists $classes_info{$class_device}{file_update}) || + ($classes_info{$class_device}{last_update} > + $classes_info{$class_device}{file_update})) { + + $last_update = $classes_info{$class_device}{last_update}; + + $event_class .= "($class_device)"; + if ( "$event_reduced" eq "" ) {$event_reduced="Class changed:"} + $event_reduced .= " ($class_device)"; + # The list of changed keys + while( $changed_key = + shift @{ $classes_info{$class_device}{changed} }) + { + my $value = $classes_info{$class_device}{$changed_key}; + $event_class .= " $changed_key=$value"; + } + + # When something changed always update all the bandwidth info + foreach my $item (@bandwidth_items) { + if (exists $classes_info{$class_device}{$item}) { + my $value = $classes_info{$class_device}{$item}; + if (defined $value) { + $bandwidth_info .= " $item:$value"; + } + } + } + + print time . "($class_device) changes... ($last_update) \"$bandwidth_info\" \n"; + + $classes_info{$class_device}{file_update}=$last_update; + + my $event_file = get_filename_event($class_device); + update_event_file($event_file , $event_class, $last_update); + + my $info_file = get_filename_bandwidth_info($class_device); + update_info_file($info_file, $bandwidth_info, $last_update); + } + + } + # Only one line per process_events call + # (notice $last_update is the latest timestamp assignment) + if (defined $last_update) { + update_event_file($event_file_all, $event_reduced, $last_update); + } +} + + +1; diff --git a/config/qos/parse-func.pl b/config/qos/parse-func.pl new file mode 100644 index 0000000000..0a25a35978 --- /dev/null +++ b/config/qos/parse-func.pl @@ -0,0 +1,488 @@ +#!/usr/bin/perl + +########################################## +## +## NAME +## +## DESCRIPTION +## +## Which is part of the ADSL-optimizer. +## +## USAGE / FUNCTIONS +## +## +## +## +## +## REQUIRES +## +## +## AUTHOR +## Jesper Dangaard Brouer , d.15/4-2004 +## +## CHANGELOG +## 2004-04-15: Initial version. +## 2005-04-18: Remove some warnings. +## +## $Id: parse-func.pl,v 1.15 2005/04/18 12:37:17 hawk Exp $ +########################################## + +#use Data::Dumper; + +#our %classes_data; +#our %classes_info; +#our $tc_command="/sbin/tc"; + +my @input_htb = (<<"END_OF_HERE_HTB" =~ m/^\s*(.+)/gm); +class tbf 4220:1 parent 4220: +class htb 1:1 root rate 400Kbit ceil 400Kbit burst 2111b cburst 2111b + Sent 12369084336 bytes 80967118 pkts (dropped 0, overlimits 0) + rate 45020bps 258pps + lended: 23353805 borrowed: 0 giants: 0 + tokens: 30210 ctokens: 30210 + +class htb 1:10 parent 1:1 prio 0 rate 80Kbit ceil 320Kbit burst 1701b cburst 2008b + Sent 80640087 bytes 247988 pkts (dropped 0, overlimits 0) + backlog 42p + lended: 230876 borrowed: 17112 giants: 0 + tokens: 127200 ctokens: 37940 + +class htb 1:20 parent 1:1 leaf 4220: prio 1 rate 100Kbit ceil 200Kbit burst 1727b cburst 1855b + Sent 2495181573 bytes 44034303 pkts (dropped 5837, overlimits 0) + lended: 43825585 borrowed: 208718 giants: 0 + tokens: 103424 ctokens: 55808 + +class htb 1:30 parent 1:1 leaf 4230: prio 3 rate 80Kbit ceil 400Kbit burst 1701b cburst 2111b + Sent 2060213567 bytes 5465574 pkts (dropped 121, overlimits 0) + rate 16851bps 35pps + lended: 4556992 borrowed: 908582 giants: 0 + tokens: -25364 ctokens: 32897 + +class htb 1:50 parent 1:1 leaf 4250: prio 5 rate 40Kbit ceil 120Kbit burst 1650b cburst 1752b + Sent 6071486687 bytes 24448436 pkts (dropped 8086739, overlimits 0) + rate 15801bps 85pps backlog 126p + lended: 8324530 borrowed: 16123780 giants: 0 + tokens: -202717 ctokens: -172499 + +class htb 1:666 parent 1:1 leaf 666: prio 7 rate 4Kbit ceil 40Kbit burst 1604b cburst 1650b + Sent 2148626078 bytes 6771069 pkts (dropped 2078536, overlimits 0) + rate 5221bps 17pps backlog 125p + lended: 675330 borrowed: 6095613 giants: 0 + tokens: -1149121 ctokens: -293386 + +END_OF_HERE_HTB + + +my @input_hfsc = (<<"END_OF_HERE_HFSC" =~ m/^\s*(.+)/gm); +class hfsc 1: root + Sent 0 bytes 0 pkts (dropped 0, overlimits 0) + period 0 level 2 + +class hfsc 1:1 parent 1: ls m1 0bps d 0us m2 250Kbit ul m1 0bps d 0us m2 250Kbit + Sent 0 bytes 0 pkts (dropped 0, overlimits 0) + period 6 work 131770097 bytes level 1 + +class hfsc 1:10 parent 1:1 rt m1 250Kbit d 30.0ms m2 50Kbit ls m1 250Kbit d 50.0ms m2 50Kbit + Sent 1300885 bytes 7052 pkts (dropped 0, overlimits 0) + period 6502 work 1300885 bytes rtwork 1245495 bytes level 0 + +class hfsc 1:20 parent 1: rt m1 0bps d 64.0ms m2 75Kbit ls m1 0bps d 0us m2 250Kbit + Sent 19144279 bytes 325503 pkts (dropped 46, overlimits 0) + backlog 3p + period 20242 work 19143778 bytes level 0 + +class hfsc 1:30 parent 1:1 leaf 4230: ls m1 0bps d 150.0ms m2 50Kbit + Sent 45139930 bytes 74200 pkts (dropped 1664, overlimits 0) + backlog 24p + period 140 work 44885232 bytes level 0 + +class hfsc 1:50 parent 1:1 leaf 4250: ls m1 0bps d 235.7ms m2 72Kbit + Sent 73910198 bytes 301294 pkts (dropped 104807, overlimits 0) + backlog 62p + period 115 work 64625490 bytes level 0 + +class hfsc 1:666 parent 1:1 leaf 666: ls m1 0bps d 1.0s m2 2Kbit + Sent 2217104 bytes 17018 pkts (dropped 74526, overlimits 0) + backlog 22p + period 1 work 1814712 bytes level 0 + +END_OF_HERE_HFSC + +sub parse_class($) { + my $device = "$_[0]"; + my $return_val = 1; + + my $timestamp = time; + my @tc_output = `$tc_command -statistics class show dev $device`; +# my @tc_output = @input_hfsc; +# my @tc_output = @input_htb; + my $result = $?; + if ( $result != 0 ) { + print "Error executing $tc_command\n"; + return $result; + } + + $classes_data{last_update}{$device} = $timestamp; + $classes_info{last_update}{$device} = $timestamp; + + #for my $line (@tc_output) { + for my $i (0 .. $#tc_output) { + + my $line=$tc_output[$i]; + # Parsing HTB: + # ------------ + if ( $line =~ m/class htb (\d+):(\d+)( root| parent )?(\d+:\d?)?( leaf )?(\d+)?:?( prio )?(\d+)? rate (.*) ceil (.*) burst (.*) cburst (.*)/ ) { + my $type = "htb"; + my $major = $1; + my $minor = $2; + my $class = "${major}-${minor}"; + #my $hash = "${class}_${device}"; + my $parent= $4; + my $leaf = $6; + my $prio = $8; + my $rate = $9; + my $ceil = $10; + my $burst = $11; + my $cburst= $12; + +# print "class: $class\n"."parent: $parent\n"."leaf: $leaf\n"."prio: $prio\n"; +# print "rate: $rate\n"."ceil: $ceil\n"."burst: $burst\n"."cburst: $cburst\n"; + + my ($bytes, $pkts, $dropped, $overlimits); + if ($tc_output[$i + 1] =~ m/Sent (\d+) bytes (\d+) pkts \(dropped (\d+), overlimits (\d+)\)/ ) { + $bytes = $1; + $pkts = $2; + $dropped = $3; + $overlimits = $4; +# print "bytes: $bytes\n"."pkts: $pkts\n"; +# print "dropped: $dropped\n"."overlimits: $overlimits\n"; + } else { + print "$timestamp: ERROR(+1) - Unable to parse (class ${class}_$device): "; + print "\"$tc_output[$i + 1]\"\n"; + $return_val=""; + next; + } + + # Problem: + # Sometimes the "rate" line is not shown (when a rate cannot be calculated) + # And sometimes only "backlog"... + # Use $next_index to specify the next line to parse + # + my $next_index = 3; + my ($backlog); + if ($tc_output[$i + 2] =~ m/((rate (\d+\w+) )|backlog )(\d+)?(pps )?(backlog )?(\d+)?p?/ ) { + $backlog = $7; + #print "backlog: $backlog\n"; + } else { +# Too verbose: +# print "$timestamp: WARNING \"rate\" line missing"; +# print " very inactive class ${class}_$device).\n"; + $next_index = 2; + } + + my ($lended, $borrowed, $giants); + if ($tc_output[$i + $next_index] =~ m/lended: (\d+) borrowed: (\d+) giants: (\d+)/ ) { + $lended = $1; + $borrowed = $2; + $giants = $3; + #print "lended: $lended\n"."borrowed: $borrowed\n"."giants: $giants\n"; + } else { + print "$timestamp: ERROR(+$next_index) - Unable to parse (class ${class}_$device): "; + print "\"$tc_output[$i + $next_index]\"\n"; + $return_val=""; + next; + } + + # Update the hash tables + my $hash="${class}_$device"; + + # Tests if previous data have been updated to file + if ( (exists $classes_data{$hash}{last_update}) && + (exists $classes_data{$hash}{file_update})) { + if ( $classes_data{$hash}{last_update} > + $classes_data{$hash}{file_update} ){ + print "Warning: old data from $hash has not been updated to file!\n"; + } + } + + # Update the statistics data + # (need a function call for error checking) + $classes_data{$hash}{last_update} = $timestamp; + update_counter( $hash, $timestamp, "bytes" , $bytes); + #(yes I know its bad/redundant, but it makes in easier elsewhere) + update_counter( $hash, $timestamp, "bits" , $bytes*8); + update_counter( $hash, $timestamp, "pkts" , $pkts); + update_counter( $hash, $timestamp, "dropped" , $dropped); + update_counter( $hash, $timestamp, "overlimits", $overlimits); + update_counter( $hash, $timestamp, "lended" , $lended); + update_counter( $hash, $timestamp, "borrowed" , $borrowed); + update_counter( $hash, $timestamp, "giants" , $giants); + # Not a counter value... + $classes_data{$hash}{backlog} = $backlog; + + # Update the info data + # (remember to update the "type" first) + update_info( $hash, $timestamp, "type" , $type); + update_info( $hash, $timestamp, "parent", $parent); + update_info( $hash, $timestamp, "leaf" , $leaf); + update_info( $hash, $timestamp, "prio" , $prio); + update_info( $hash, $timestamp, "rate" , $rate); + update_info( $hash, $timestamp, "ceil" , $ceil); + update_info( $hash, $timestamp, "burst" , $burst); + update_info( $hash, $timestamp, "cburst", $cburst); + + #print "\n"; + } + + # Parsing HFSC: + # ------------- + if ( $line =~ m/class hfsc (\d+):(\d+)( root| parent )?(\d+:\d?)?( leaf )?(\d+)?:?( rt m1 (\d+\w+?) d (\d+.?\d?\w+) m2 (\d+\w+?))?( ls m1 (\d+\w+?) d (\d+.?\d?\w+) m2 (\d+\w+?))?( ul m1 (\d+\w+?) d (\d+.?\d?\w+) m2 (\d+\w+?))? / ){ + + my $type = "hfsc"; + my $major = $1; + my $minor = $2; + my $class = "${major}-${minor}"; + #my $hash = "${class}_${device}"; + my $parent= $4; + my $leaf = $6; + + my $realtime_m1; if (defined $8 && $8 ne '0bps') {$realtime_m1 = $8;} + my $realtime_d; if (defined $9 && $9 ne '0us' ) {$realtime_d = $9;} + my $realtime_m2 = $10; + + my $linkshare_m1; if (defined $12 && $12 ne '0bps') { $linkshare_m1 = $12;} + my $linkshare_d ; if (defined $13 && $13 ne '0us' ) { $linkshare_d = $13;} + my $linkshare_m2 = $14; + + my $upperlimit_m1; if (defined $16 && $16 ne '0bps') { $upperlimit_m1 = $16;} + my $upperlimit_d ; if (defined $17 && $17 ne '0us' ) { $upperlimit_d = $17;} + my $upperlimit_m2 = $18; + + #print "\nType: $type\n"; + my ($bytes, $pkts, $dropped, $overlimits); + if ($tc_output[$i + 1] =~ m/Sent (\d+) bytes (\d+) pkts \(dropped (\d+), overlimits (\d+)\)/ ) { + $bytes = $1; + $pkts = $2; + $dropped = $3; + $overlimits = $4; + #print "bytes: $bytes\n"."pkts: $pkts\n"; + #print "dropped: $dropped\n"."overlimits: $overlimits\n"; + } else { + print "$timestamp: ERROR(+1) - Unable to parse (class ${class}_$device): "; + print "\"$tc_output[$i + 1]\"\n"; + $return_val=""; + next; + } + + # Sometimes the "backlog" line is not shown (when there is no backlog...) + # Use $next_index to specify the next line to parse + # + my $next_index = 3; + my ($backlog); + if ($tc_output[$i + 2] =~ m/backlog (\d+)?p?/ ) { + $backlog = $1; + #print "backlog: $backlog\n"; + } else { + $next_index = 2; + } + + my ($period, $work, $rtwork, $level); + if ($tc_output[$i + $next_index] =~ m/period (\d+) (work (\d+) bytes )?(rtwork (\d+) bytes )?level (\d+)/ ) { + $period = $1; + $work = $3; + $rtwork = $5; + $level = $6 + } else { + print "$timestamp: ERROR(+$next_index) - Unable to parse (class ${class}_$device): "; + print "\"$tc_output[$i + $next_index]\"\n"; + $return_val=""; + next; + } + + + # Update the hash tables + my $hash="${class}_$device"; + + # Tests if previous data have been updated to file + if ( (exists $classes_data{$hash}{last_update}) && + (exists $classes_data{$hash}{file_update})) { + if ( $classes_data{$hash}{last_update} > + $classes_data{$hash}{file_update} ){ + print "Warning: old data from $hash has not been updated to file!\n"; + } + } + + # HFSC - Update the statistics data + # (need a function call for error checking) + $classes_data{$hash}{last_update} = $timestamp; + update_counter( $hash, $timestamp, "bytes" , $bytes); + #(yes I know its bad/redundant, but it makes in easier elsewhere) + update_counter( $hash, $timestamp, "bits" , $bytes*8); + update_counter( $hash, $timestamp, "pkts" , $pkts); + update_counter( $hash, $timestamp, "dropped" , $dropped); + update_counter( $hash, $timestamp, "overlimits", $overlimits); + # Not a counter value... + $classes_data{$hash}{backlog} = $backlog; + # + # Extra HFSC counters + $classes_data{$hash}{hfsc_period} = $period; + update_counter( $hash, $timestamp, "hfsc_work" , $work); + update_counter( $hash, $timestamp, "hfsc_rtwork" , $rtwork); + + + # HFSC - Update the info data + # (remember to update the "type" first) + update_info( $hash, $timestamp, "type" , $type); + update_info( $hash, $timestamp, "parent", $parent); + update_info( $hash, $timestamp, "leaf" , $leaf); + # + # Extra HFSC information + update_info( $hash, $timestamp, "level" , $level); + update_info( $hash, $timestamp, "realtime_m1", $realtime_m1); + update_info( $hash, $timestamp, "realtime_d" , $realtime_d); + update_info( $hash, $timestamp, "realtime_m2", $realtime_m2); + + update_info( $hash, $timestamp, "linkshare_m1", $linkshare_m1); + update_info( $hash, $timestamp, "linkshare_d" , $linkshare_d); + update_info( $hash, $timestamp, "linkshare_m2", $linkshare_m2); + + update_info( $hash, $timestamp, "upperlimit_m1", $upperlimit_m1); + update_info( $hash, $timestamp, "upperlimit_d" , $upperlimit_d); + update_info( $hash, $timestamp, "upperlimit_m2", $upperlimit_m2); + + + } + + # Parsing XXX: + # ------------ + if ( $line =~ m/class XXX/ ) { + print "Matching class XXX\n"; + } + + } + return $return_val; +} + +# The main purpose of this function is to detect counter resets +# and avoid parsing them on to RRDtool which interprets them +# as counter overflows, thus updating with a very large number. +sub update_counter ($$$$) { + my $class_hash = "$_[0]"; + my $timestamp = "$_[1]"; + my $data_key = "$_[2]"; + my $new_value; + if ( defined $_[3]) { + $new_value = "$_[3]"; + } + # + my $max_allowed_wrap_increase = 100000000; + my $old_value; + if (exists $classes_data{$class_hash}{$data_key}) { + $old_value = $classes_data{$class_hash}{$data_key}; + #print "old_value: $old_value\n"; + } + +# # If the new and old value is not defined, nothing is done +# if ((not defined $new_value) && (not defined $old_value)) { +# return ""; +# } + + # Argh... the tc program outputs in unsigned long long (64 bit). + # but perls integers should be 32 bit, but some how perl + # manages to store numbers larger than 32 bit numbers. + my $MAX_VALUE=0xFFFFFFFF; + + if ((defined $new_value) && (defined $old_value)) { + my $delta = $new_value - $old_value; + if ( $delta < 0 ) { + # Counter wrap around... + my $real_delta = $delta + $MAX_VALUE + 1; + if ($real_delta < 0) { + print "($class_hash:$data_key): Perl-Magic using numbers bigger than 32bit "; + print "new:$new_value - old:$old_value = delta:$delta, real_delta:$real_delta.\n"; + } + print time . " ($class_hash:$data_key) Info: Counter wrap around (real delta:$real_delta)\n"; + if ( ($real_delta > $max_allowed_wrap_increase) || + ($real_delta < 0)) { + # Properly a counter reset and not a wrap around + # A counter reset normally a result of a reload of the classes + $classes_data{$class_hash}{$data_key} = undef; + $classes_info{$class_hash}{counter_reset} = $timestamp; + $classes_info{$class_hash}{last_update} = $timestamp; + print time . "Warning: Real_delta too big, assuming Counter reset"; + print "($class_hash:$data_key)\n"; + return "Counter reset"; + } + } + } + + $classes_data{$class_hash}{$data_key} = $new_value; + return 1; +} + +sub update_info ($$$$) { + my $class_hash = "$_[0]"; + my $timestamp = "$_[1]"; + my $info_key = "$_[2]"; + my $new_value; + if ( defined $_[3]) { + $new_value = "$_[3]"; + } + my $old_value; + if (exists $classes_info{$class_hash}{$info_key}) { + $old_value = $classes_info{$class_hash}{$info_key}; + #print "old_value: $old_value\n"; + } + + # If the new and old value is not defined, nothing is done + if ((not defined $new_value) && (not defined $old_value)) { + return ""; + } + + # An update is needed + # - if the old_value is not defined and new_value is defined + # - if the new_value is not defined and old_value is defined + # - if the old_value differs from the new, + # + if ( ((not defined $old_value) and (defined $new_value)) || + ((not defined $new_value) and (defined $old_value)) || + ("$old_value" ne "$new_value")) { + + # Special case: If the "type" changes the hash should be cleared + if ( "$info_key" eq "type") { + #print "Type has changed clearing hash \n"; + for my $key ( keys %{ $classes_info{$class_hash} } ) { + delete( $classes_info{$class_hash}{$key}); + print " Deleting key: $key from: $class_hash \n"; + } + } + + if (defined $new_value) { + $classes_info{$class_hash}{$info_key} = $new_value; + } else { + #print "New value undef -> Deleting key: $info_key from: $class_hash\n"; + delete($classes_info{$class_hash}{$info_key}); + } + + # Mark the class for an info-file update + $classes_info{$class_hash}{last_update} = $timestamp; + + # Update list/array of "changed" keys + push @{ $classes_info{$class_hash}{changed} }, $info_key; + + # Print debug info + #print "Update class:$class_hash $info_key="; + #if (defined $new_value) {print "$new_value"}; + #print "\n"; + return 1; + } + return ""; +} + +# test +#parse_class(eth1); + +#print Dumper(%classes_data); +#print Dumper(%classes_info); + +return 1; diff --git a/doc/ChangeLog b/doc/ChangeLog index 3d5b9aad31..35f094bcb4 100644 --- a/doc/ChangeLog +++ b/doc/ChangeLog @@ -1,4 +1,11 @@ ------------------------------------------------------------------------ +r254 | ms | 2006-08-21 21:15:32 +0200 (Mon, 21 Aug 2006) | 4 lines + +Programmupdate: + * Samba 3.0.23a --> 3.0.23b +Geändert: + * ConnectionScheduler kann jetzt VPNs starten/beenden. +------------------------------------------------------------------------ r253 | ms | 2006-08-20 22:12:57 +0200 (Sun, 20 Aug 2006) | 5 lines Fixes: diff --git a/html/cgi-bin/logs.cgi/log.dat b/html/cgi-bin/logs.cgi/log.dat index e2eaa84991..d6139580a4 100644 --- a/html/cgi-bin/logs.cgi/log.dat +++ b/html/cgi-bin/logs.cgi/log.dat @@ -59,8 +59,7 @@ my %sections = ( 'kernel' => '(kernel)', 'ipsec' => '(ipsec_[\w_]+|pluto\[.*\])', 'snort' => '(snort)', - 'openvpn' => '(openvpnserver)\[.*\]', - 'installpackage' => '(installpackage\[.*\])' + 'openvpn' => '(openvpnserver)\[.*\]' ); # Translations for the %sections array. @@ -76,8 +75,7 @@ my %trsections = ( 'kernel' => "$Lang::tr{'kernel'}", 'ipsec' => 'IPSec', 'openvpn' => 'OpenVPN', - 'snort' => 'Snort', - 'installpackage' => "$Lang::tr{'update transcript'}" + 'snort' => 'Snort' ); diff --git a/html/cgi-bin/qos.cgi b/html/cgi-bin/qos.cgi index fd8fee4516..1ecc84fe0d 100644 --- a/html/cgi-bin/qos.cgi +++ b/html/cgi-bin/qos.cgi @@ -324,22 +324,29 @@ END if ($qossettings{'ACTION'} eq 'Start') { - system("sleep 2 && /usr/bin/perl /var/ipfire/qos/bin/makeqosscripts.pl > /var/ipfire/qos/bin/qos.sh &"); + system("sleep 1 && /usr/bin/perl /var/ipfire/qos/bin/makeqosscripts.pl > /var/ipfire/qos/bin/qos.sh &"); system("/bin/touch /var/ipfire/qos/enable"); + system("sleep 2 && /usr/local/bin/qosctrl start >/dev/null 2>&1"); + system("logger -t ipfire 'QoS started'"); $qossettings{'ENABLED'} = 'on'; &General::writehash("${General::swroot}/qos/settings", \%qossettings); } elsif ($qossettings{'ACTION'} eq 'Stop') { + system("/usr/local/bin/qosctrl stop >/dev/null 2>&1"); unlink "/var/ipfire/qos/bin/qos.sh"; unlink "/var/ipfire/qos/enable"; + system("logger -t ipfire 'QoS stopped'"); $qossettings{'ENABLED'} = 'off'; &General::writehash("${General::swroot}/qos/settings", \%qossettings); } elsif ($qossettings{'ACTION'} eq 'Neustart') { if ($qossettings{'ENABLED'} eq 'on'){ - system("sleep 2 && /usr/bin/perl /var/ipfire/qos/bin/makeqosscripts.pl > /var/ipfire/qos/bin/qos.sh &"); + system("/usr/local/bin/qosctrl stop >/dev/null 2>&1"); + system("sleep 1 && /usr/bin/perl /var/ipfire/qos/bin/makeqosscripts.pl > /var/ipfire/qos/bin/qos.sh &"); + system("sleep 5 && /usr/local/bin/qosctrl start >/dev/null 2>&1"); + system("logger -t ipfire 'QoS restarted'"); } } elsif ($qossettings{'ACTION'} eq $Lang::tr{'save'}) diff --git a/make.sh b/make.sh index 666f4adb38..8a138dc73a 100644 --- a/make.sh +++ b/make.sh @@ -1194,7 +1194,7 @@ svn) $0 svn up ;; dist) - $0 svn up + #$0 svn up echo -ne "Download source package from svn..." svn export http://svn.ipfire.eu/svn/ipfire ipfire-source/ --force > /dev/null if [ "$?" -eq "0" ]; then diff --git a/src/ROOTFILES.i386 b/src/ROOTFILES.i386 index f4e413638c..3b8be9e714 100644 --- a/src/ROOTFILES.i386 +++ b/src/ROOTFILES.i386 @@ -1359,6 +1359,7 @@ usr/local/bin/httpscert usr/local/bin/hddshutdown usr/local/bin/hddshutdown-state usr/local/bin/makegraphs +usr/local/bin/qosd usr/local/bin/readhash usr/local/bin/setddns.pl usr/local/bin/setreservedports diff --git a/src/misc-progs/qosctrl.c b/src/misc-progs/qosctrl.c index ac9fd37504..f3b288bcda 100644 --- a/src/misc-progs/qosctrl.c +++ b/src/misc-progs/qosctrl.c @@ -26,28 +26,27 @@ int main(int argc, char *argv[]) { exit(1); } - - if (strcmp(argv[1], "start") == 0) { - if ((fd = open("/var/ipfire/qos/enable", O_RDONLY)) != -1) - { - close(fd); - enable = 1; - } - - if (enable) - { - safe_system("/var/ipfire/qos/bin/qos.sh start"); - } - } else if (strcmp(argv[1], "stop") == 0) { - safe_system("/var/ipfire/qos/bin/qos.sh clear"); - } else if (strcmp(argv[1], "status") == 0) { - safe_system("/var/ipfire/qos/bin/qos.sh status"); - } else if (strcmp(argv[1], "restart") == 0) { - safe_system("/var/ipfire/qos/bin/qos.sh restart"); - } else { - fprintf(stderr, "\nBad argument given.\n\nqosctrl (start|stop|restart|status)\n\n"); - exit(1); + safe_system("chmod 755 /var/ipfire/qos/bin/qos.sh"); + if (strcmp(argv[1], "start") == 0) { + if ((fd = open("/var/ipfire/qos/bin/qos.sh", O_RDONLY)) != -1) + { + close(fd); + enable = 1; } + if (enable) + { + safe_system("/var/ipfire/qos/bin/qos.sh start"); + } + } else if (strcmp(argv[1], "stop") == 0) { + safe_system("/var/ipfire/qos/bin/qos.sh clear"); + } else if (strcmp(argv[1], "status") == 0) { + safe_system("/var/ipfire/qos/bin/qos.sh status"); + } else if (strcmp(argv[1], "restart") == 0) { + safe_system("/var/ipfire/qos/bin/qos.sh restart"); + } else { + fprintf(stderr, "\nBad argument given.\n\nqosctrl (start|stop|restart|status)\n\n"); + exit(1); + } return 0; } diff --git a/src/rc.d/rc.updatered b/src/rc.d/rc.updatered index e20a2b1c09..c4745edc28 100644 --- a/src/rc.d/rc.updatered +++ b/src/rc.d/rc.updatered @@ -134,7 +134,7 @@ if [ -e "CONFIG_ROOT/red/active" ]; then /etc/rc.d/rc.firewall reload /usr/local/bin/setfilters /usr/local/bin/restartsnort red - # Add QoS-Call here! + /usr/local/bin/qosctrl start /usr/local/bin/setportfw /usr/local/bin/setxtaccess /usr/local/bin/setddns.pl -f diff --git a/src/scripts/qosd b/src/scripts/qosd new file mode 100644 index 0000000000..2bae22ab46 --- /dev/null +++ b/src/scripts/qosd @@ -0,0 +1,116 @@ +#!/usr/bin/perl -w +use strict; + +########################################## +## +## DESCRIPTION +## +## The tc-graph daemon script: "tc-collector" +## Which is part of the ADSL-optimizer. +## +## The script will become a daemon and periodically collect data +## from the Linux traffic control system. The collected data is +## stored in some RRD-data files, which is created automatically by +## the script if they don't exist. +## +## GRAPHs +## +## How the RRD-data is displayed as graphs is not part of the +## tc-collector tool. But we recommend using the RRD-frontend 'ddraw'. +## We have included some 'ddraw' examples (which is hardcoded to use +## files from '/var/spool/rrdqueues'). +## +## drraw: http://web.taranis.org/drraw/ +## +## +## REQUIRES +## +## RRDtools Perl interface RRDs +## The "tc" command. +## +## +## AUTHOR +## Jesper Dangaard Brouer , d.16/4-2004 +## +## CHANGELOG +## 2004-04-16: Initial version. +## 2004-05-27: Daemon version. +## +## $Id: tc-collector.pl,v 1.12 2005/03/19 19:31:08 hawk Exp $ +########################################## + +# TODO: +# * Calc time used to parse, use to make time steps more precise +# * Device list support +# * Detecting the correct devices + +# Configuration options: +# +my $device = "imq0"; +our $rrd_datadir = "/var/log/rrd"; +our $event_datadir = $rrd_datadir; +our $STEP = 10; +our $tc_command = "/sbin/tc"; + +# A trick is to set the environment PERL5LIB to include $GRAPHDIR +# This is done by the init-script +# ($GRAPHDIR is obtained from /usr/local/etc/ADSL-optimizer.conf) +my $include_dir = '/var/ipfire/qos/bin'; + + +# Create the $rrd_datadir if it doesn't exists +if ( ! -d $rrd_datadir ) { + print "RRD-datadir not found, creating it: $rrd_datadir \n"; + my $status = system("mkdir $rrd_datadir"); + die "\nERROR cannot create \"$rrd_datadir\"\n" unless $status == 0; +} + +# use POSIX; +# +#POSIX::setsid() +# or die "Can't become a daemon: $!"; + +# The init scripts will do the right "daemon" thing... +# Become a daemon +print "Becoming a daemon...\n"; +my $pid = fork; +exit if $pid; +die "Couldn't fork: $!" unless defined($pid); + +my $time_to_die = 0; +sub signal_handler { + $time_to_die = 1; +} +# Trap signals +$SIG{INT} = $SIG{TERM} = $SIG{HUP} = \&signal_handler; +$SIG{PIPE} = 'IGNORE'; + +our %classes_data; +our %classes_info; +require "$include_dir/parse-func.pl"; +require "$include_dir/event-func.pl"; +require "$include_dir/RRD-func.pl"; + +until ($time_to_die) { + + #print "Parsing tc statistics on $device\n"; + my $res = parse_class($device); + if ( ! $res ) { + print " Error when parsing classes on $device\n"; + } + + #print "Updating RRD data-files\n"; + $res = update_rrds(); + #if ( $res ) { + # print " Error updating RRDs: \"$res\"\n"; + #} + + process_events(); + +# my $timestamp = time; +# print "$timestamp\n"; + + sleep($STEP); +} + +print "tc-collector daemon exiting ... bye bye!\n"; -- 2.39.2