[prev in list] [next in list] [prev in thread] [next in thread] 

List:       helix-server-cvs
Subject:    [Server-cvs] common/analysis/extra realtimegraph, 1.3,
From:       dcollins () helixcommunity ! org
Date:       2008-10-20 22:36:34
Message-ID: 200810202243.m9KMh803012996 () mailer ! progressive-comp ! com
[Download RAW message or body]

Update of /cvsroot/server/common/analysis/extra
In directory cvs01.internal.helixcommunity.org:/tmp/cvs-serv18540/extra

Modified Files:
	realtimegraph updatelink.pl uptime_resolve.pl uptimes.pl 
Log Message:
sync-up


Index: realtimegraph
===================================================================
RCS file: /cvsroot/server/common/analysis/extra/realtimegraph,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -d -r1.3 -r1.4
--- realtimegraph	23 Feb 2008 14:19:56 -0000	1.3
+++ realtimegraph	20 Oct 2008 22:36:31 -0000	1.4
@@ -1,64 +1,246 @@
 #!  /usr/bin/perl
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Source last modified: $Id$
+#
+# Portions Copyright (c) 1995-2008 RealNetworks, Inc. All Rights Reserved.
+#
+# The contents of this file, and the files included with this file,
+# are subject to the current version of the RealNetworks Public
+# Source License (the "RPSL") available at
+# http://www.helixcommunity.org/content/rpsl unless you have licensed
+# the file under the current version of the RealNetworks Community
+# Source License (the "RCSL") available at
+# http://www.helixcommunity.org/content/rcsl, in which case the RCSL
+# will apply. You may also obtain the license terms directly from
+# RealNetworks.  You may not use this file except in compliance with
+# the RPSL or, if you have a valid RCSL with RealNetworks applicable
+# to this file, the RCSL.  Please see the applicable RPSL or RCSL for
+# the rights, obligations and limitations governing use of the
+# contents of the file.
+#
+# This file is part of the Helix DNA Technology. RealNetworks is the
+# developer of the Original Code and owns the copyrights in the
+# portions it created.
+#
+# This file, and the files included with this file, is distributed
+# and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
+# ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
+# ENJOYMENT OR NON-INFRINGEMENT.
+#
+# Technology Compatibility Kit Test Suite(s) Location:
+#    http://www.helixcommunity.org/content/tck
+#
+# Contributor(s):
+#
+# ***** END LICENSE BLOCK *****
+#
+######################################################################
+#
+# realtimegraph - updates a directory used for realtime server/proxy graphing
+#
+# Note: This does not update the graphs themselves, that occurs
+# via 'updategraphs', which is typically called from the
+# 'uptime.pl' script (aka 'go').  (Perhaps the name 'realtimegraph'
+# is a little misleading.)
+#
+######################################################################
 
-#if (!$ARGV[0] || ($ARGV[0] != "--resolve" || $ARGV[0] != "--noresolve"))
-#{
-#    print "Error: must specify '--resolve' or '--noresolve'.\n";
-#}
-
-$bResolve = 1;
-#$bResolve = 0;
-#if ($ARGV[0] == "--resolve")
-#{
-#    $bResolve = 1;
-#    shift;
-#}
+use Getopt::Long;
+use Pod::Usage;
 
-$bUpdateLink = 1;
-if ($ARGV[0] eq "--noupdatelink")
+#######################################################################
+#
+# Config
+#
+sub Config
 {
+    $| = 1; #flush stdout after writes
+
+    $szProgName = `basename $0`;
+    chomp($szProgName);
+    $ServGraph::szProgName = $szProgName;
+
+    ($szVerInfo) = '$Id$'
+        =~ /,v ([0-9.]+ [^\s]+ [^\s]+)/;
+    if (!$szVerInfo) { $szVerInfo = "0.0"; }
+    printf "%s version %s\n", $szProgName, $szVerInfo;
+
+    $szProgDir=`dirname $0`;
+    chomp $szProgDir;
+    require "$szProgDir/../servgraph.pm";
+
+
+    $bResolve = 1;
     $bUpdateLink = 1;
-    shift;
+    $nDelay = 60;
+    @deletefiles = ("log", "server-rss.dat", "res.txt",
+                    "unique.txt", "rmstdout-copy.txt",
+                    "rmstdout-summary.txt");
+
+    ($szVerInfo) = '$Id$'
+        =~ /,v ([0-9.]+ [^\s]+ [^\s]+)/;
+    if (!$szVerInfo) { $szVerInfo = "0.0"; }
+    printf "%s version %s\n", $szProgName, $szVerInfo;
 }
 
-if ($ARGV[0] eq "--updatelink")
+#######################################################################
+#
+# CommandLineHandler - Command-line parsing
+#
+sub CommandLineHandler
 {
-    shift;
+    GetOptions(
+        "resolve!"   => \$bResolve,
+        "updatelink!" => \$bUpdateLink,
+        "files=s"    => \$szDeleteFiles,
+        "delay=i"    => \$nDelay,
+        "once!"      => \$bOnce,
+        "help|?"     => \$bShowHelp,
+        "man"        => \$bShowMan,
+    ) || pod2usage(2);
+
+    pod2usage(1) if ($bShowHelp);
+    pod2usage("-exitstatus" => 0, "-verbose" => 2) if ($bShowMan);
+
+    if ($szDeleteFiles)
+    {
+        @deletefiles = split(/,/, $szDeleteFiles);
+    }
+
+    $szDir = $ARGV[0];
+    if (!$szDir)
+    {
+        ServGraph::ReportStatus("Error: must specify directory to watch.  See \
--help.\n"); +        exit;
+    }
 }
 
-if (!$ARGV[0])
+
+######################################################################
+#
+# CleanupFiles
+#
+sub CleanupFiles
 {
-    print "Error: must specify directory to watch.\n";
-    exit;
+    my (@files) = @_;
+
+    foreach $x (@files)
+    {
+        if (-e "$szDir/$x")
+        {
+            ServGraph::ReportStatus ("Init: unlinking '$szDir/$x'\n");
+            unlink "$szDir/$x";
+        }
+        else
+        {
+            ServGraph::ReportStatus ("Init: '$szDir/$x' not found, skipping\n");
+        }
+    }
 }
 
 
-$szDir = $ARGV[0];
+######################################################################
+#
+# MAIN
+#
+Config();
+CommandLineHandler();
+ServGraph::SetupSigHandler();
+CleanupFiles(@deletefiles);
 
-unlink "$szDir/log";
-unlink "$szDir/server-rss.dat";
-unlink "$szDir/res.txt";
-unlink "$szDir/unique.txt";
-unlink "$szDir/rmstdout-copy.txt";
-unlink "$szDir/rmstdout-summary.txt";
+ServGraph::ForkCmd ("updatelink.pl --dir $szDir >>log 2>&1");
 
 while (1)
 {
-    if ($bUpdateLink)
-    {
-        system "cd $szDir && updatelink.pl >>log";
-    }
-    #$x = `ls -l $szDir/current`;
-    #($x,$y) = $x =~ /([^\S]+)\S+->\S+([^\S]+)/;
-    #printf "$x --> $y\n";
-
     if ($bResolve)
     {
-        system "cd $szDir && exec logwatch --once rmstdout.txt uptime_resolve.pl \
>>log 2>&1"; +        ServGraph::RunCmd ("cd $szDir && logwatch --once rmstdout.txt \
> > -- uptime_resolve.pl >>log 2>&1");
     }
     else
     {
-        system "cd $szDir && exec logwatch --once rmstdout.txt logspit >>log 2>&1";
+        ServGraph::RunCmd ("cd $szDir && logwatch --once rmstdout.txt -- logspit \
>>log 2>&1");  }
 
-    sleep(60);
+    sleep($nDelay);
 }
+
+
+######################################################################
+#
+# DOCUMENTATION
+#
+__END__
+=head1 NAME
+
+realtimegraph - updates a directory used for real-time server/proxy graphing
+
+=head1 SYNOPSIS
+
+realtimegraph [options] [directory]
+
+ Options:
+   --[no]resolve      Resolve the stdout log (or not)
+   --[no]updatelink   Update the 'current' symlink (or not)
+   --delay [sec]      Frequency at which the directory will be scanned
+   --once             Try to update the link once, then exit
+   --help             brief help message
+   --man              full documentation
+   [directory]        The directory to update
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--resolve>
+
+Indicates whether or not to resolve the stdout log.  Default: enabled.
+
+=item B<--updatelink>
+
+Indicates whether or not to update the 'current' symlink in
+the monitored directory.  Default: enabled.
+
+=item B<--files>
+
+A comma-seperated list of files (no whitespace) which will be
+removed each time the directory is refreshed.  Do not include
+spaces in this list unless they appear in the filenames, in
+which case this argument must be shell-quoted.
+This is for cleaning up the auto-generated files which will
+be replaced when the additional scripts are run.
+
+=item B<--delay>
+
+The frequency at which the directory will be scanned.
+The default is 60 seconds.
+
+=item B<--once>
+
+Perform updates once, then exit.  By default, the 
+directory is repeatedly scanned every 'delay' seconds
+until the script is terminated.
+
+=item B<--help>
+
+Print a brief help message and exits.
+
+=item B<--man>
+
+Prints the manual page and exits.
+
+=item B<[directory]>
+
+The directory to update.  This must exist as a subdirectory
+of the current working directory.
+
+=back
+
+=head1 DESCRIPTION
+
+B<realtimegraph> will update the directory used for generating
+realtime graphs.  It does not regenerate the graphs, this occurs via
+'updategraphs', which in turn is typically run from the 'uptimes.pl"
+script (aka 'go').

Index: updatelink.pl
===================================================================
RCS file: /cvsroot/server/common/analysis/extra/updatelink.pl,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -d -r1.2 -r1.3
--- updatelink.pl	2 Mar 2008 03:06:23 -0000	1.2
+++ updatelink.pl	20 Oct 2008 22:36:31 -0000	1.3
@@ -1,8 +1,48 @@
 #!/usr/bin/perl
-# $Id$
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Source last modified: $Id$
+#
+# Portions Copyright (c) 1995-2008 RealNetworks, Inc. All Rights Reserved.
+#
+# The contents of this file, and the files included with this file,
+# are subject to the current version of the RealNetworks Public
+# Source License (the "RPSL") available at
+# http://www.helixcommunity.org/content/rpsl unless you have licensed
+# the file under the current version of the RealNetworks Community
+# Source License (the "RCSL") available at
+# http://www.helixcommunity.org/content/rcsl, in which case the RCSL
+# will apply. You may also obtain the license terms directly from
+# RealNetworks.  You may not use this file except in compliance with
+# the RPSL or, if you have a valid RCSL with RealNetworks applicable
+# to this file, the RCSL.  Please see the applicable RPSL or RCSL for
+# the rights, obligations and limitations governing use of the
+# contents of the file.
+#
+# This file is part of the Helix DNA Technology. RealNetworks is the
+# developer of the Original Code and owns the copyrights in the
+# portions it created.
+#
+# This file, and the files included with this file, is distributed
+# and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
+# ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
+# ENJOYMENT OR NON-INFRINGEMENT.
+#
+# Technology Compatibility Kit Test Suite(s) Location:
+#    http://www.helixcommunity.org/content/tck
+#
+# Contributor(s):
+#
+# ***** END LICENSE BLOCK *****
+#
+######################################################################
 #
 # updatelink.pl - regenerates the link for an uptime directory
 #
+######################################################################
+#
 # Note: An 'automate' softlink must exist, pointing to automate's home
 # directory on the uptime system.  This script then essentially does
 #    ls -ltr automate/*/Logs/rmstdout.txt
@@ -11,58 +51,73 @@
 # Intended for use with the uptimes, though maybe its useful in other
 # situations, too.
 #
-# TODO: The commented-out code at the bottom was intended to allow
-# this script to monitor this continuously, recreating the link
-# whenever a new set of server logs started to be updated.  The
-# rest of this has not been fully worked out so it is disabled.
-#
+######################################################################
 
 use Getopt::Long;
 use Pod::Usage;
 
 #######################################################################
-#Defaults
-$szTopDir = "automate";
-$nDelay = 60;
-$szLink = "current";
-$szLogPattern = "*/Logs/rmstdout.txt";
-$szProgName = `basename $0`;
-chomp($szProgName);
+#
+# Config
+#
+sub Config
+{
+    $| = 1; #flush stdout after writes
 
-#######################################################################
-#Command-line parsing
+    $szProgName = `basename $0`;
+    chomp($szProgName);
+    $ServGraph::szProgName = $szProgName;
 
-GetOptions(
-    "topdir=s"   => \$szTopDir,
-    "delay=i"    => \$nDelay,
-    "linkname=s" => \$szLink,
-    "pattern=s"  => \$szLogPattern,
-    "once"       => \$bOnce,
-    "help|?"     => \$bShowHelp,
-    "man"        => \$bShowMan,
-) || pod2usage(2);
+    ($szVerInfo) = '$Id$'
+        =~ /,v ([0-9.]+ [^\s]+ [^\s]+)/;
+    if (!$szVerInfo) { $szVerInfo = "0.0"; }
+    printf "%s version %s\n", $szProgName, $szVerInfo;
 
-pod2usage(1) if ($bShowHelp);
-pod2usage("-exitstatus" => 0, "-verbose" => 2) if ($bShowMan);
+    $szProgDir=`dirname $0`;
+    chomp $szProgDir;
+    require "$szProgDir/../servgraph.pm";
 
 
-sub ReportStatus
+    $szTopLink = "automate";
+    $nDelay = 60;
+    $szLink = "current";
+    $szLogPattern = "*/Logs/rmstdout.txt";
+}
+
+
+#######################################################################
+#
+# CommandLineHandler - Command-line parsing
+#
+sub CommandLineHandler
 {
-    my (@args) = @_;
-    $szTimestamp = `date`;
-    chomp $szTimestamp;
-    printf "$szTimestamp : $szProgName\[$$\]: ";
-    printf @args;
+    GetOptions(
+        "dir=s"      => \$szWorkDir,
+        "toplink=s"  => \$szTopLink,
+        "delay=i"    => \$nDelay,
+        "linkname=s" => \$szLink,
+        "pattern=s"  => \$szLogPattern,
+        "once!"      => \$bOnce,
+        "help|?"     => \$bShowHelp,
+        "man"        => \$bShowMan,
+    ) || pod2usage(2);
+
+    pod2usage(1) if ($bShowHelp);
+    pod2usage("-exitstatus" => 0, "-verbose" => 2) if ($bShowMan);
 }
 
 
+######################################################################
+#
+# GetCurrentDir
+#
 sub GetCurrentDir
 {
     my ($szFile, $szDir);
-    $szFile = `ls -tr $szTopDir/$szLogPattern 2>/dev/null | tail -1`;
+    $szFile = `ls -tr $szTopLink/$szLogPattern 2>/dev/null | tail -1`;
     if (!$szFile)
     {
-        ReportStatus("Logs directory not found\n");
+        ServGraph::ReportStatus("Logs directory not found\n");
         return "";
     }
 
@@ -75,11 +130,15 @@
     return $szDir;
 }
 
+######################################################################
+#
+# RecreateLogsLink
+#
 sub RecreateLogsLink
 {
     if (-e $szLink && ! -l $szLink)
     {
-        ReportStatus("$szLink is not a symlink, unable to recreate\n");
+        ServGraph::ReportStatus("$szLink is not a symlink, unable to recreate\n");
         return;
     }
 
@@ -87,11 +146,49 @@
     if ($szCurrentDir)
     {
         symlink $szCurrentDir, $szLink;
-        ReportStatus("Recreated symlink: $szLink --> $szCurrentDir\n");
+        ServGraph::ReportStatus("Recreated symlink: $szLink --> $szCurrentDir\n");
     }
 }
 
-ReportStatus("Monitoring '$szTopDir/$szLogPattern'...\n");
+######################################################################
+#
+# SetWorkDir
+#
+sub SetWorkDir
+{
+    if ($szWorkDir)
+    {
+        if (!($szWorkDir =~ /^\//))  #relative path
+        {
+            my ($x) = `pwd`;
+            chomp($x);
+            $szWorkDir = $x . "/" . $szWorkDir;
+            $szWorkDir =~ s/\/\.\//\//g; # change "/./" to "/"
+            $szWorkDir =~ s/\/\//\//g; # change "//" to "/"
+            $szWorkDir =~ s/\/[^\/]+\/\.\.//g; #delete "blah/.."
+        }
+   }
+   else
+   {
+       $szWorkDir = ".";
+   }
+   xxx
+}
+
+######################################################################
+#
+# MAIN
+#
+Config();
+CommandLineHandler();
+
+SetWorkDir();
+chdir $szWorkDir;
+$x = `pwd`;
+chomp $x;
+ServGraph::ReportStatus("Changing directories to: '$x'...\n");
+
+ServGraph::ReportStatus("Monitoring './$szTopLink/$szLogPattern'...\n");
 
 $szCurrentDir = ''; #undefined at first
 while (1)
@@ -113,6 +210,10 @@
 
 
 
+######################################################################
+#
+# DOCUMENTATION
+#
 __END__
 =head1 NAME
 
@@ -123,7 +224,8 @@
 updatelink.pl [options]
 
  Options:
-   --topdir [dir]     Name of symlink to top of directory tree.
+   --dir [dir]        Name of work directory.
+   --toplink [dir]    Name of symlink to top of directory tree.
    --delay [sec]      Frequency at which the directory will be scanned
    --linkname [name]  Name of symlink that will be created 
    --pattern [pat]    Wild-card pattern for finding log files 
@@ -135,7 +237,11 @@
 
 =over 8
 
-=item B<--topdir>
+=item B<--dir>
+
+Name of directory to perform work in.
+
+=item B<--toplink>
 
 The name of the symlink which points to the top directory to scan.  Typically this
 points to the user's home directory on the remote system.  For example, 'automate'
@@ -181,7 +287,7 @@
 This directory is typically a symlink to a user's home directory on a
 remote system.  The directory should contain server/proxy installation
 directories.  B<updatelink.pl> will scan  the subdirectories of this
-top-level directory, looking for topdir/*/Logs/rmstdout.txt files.
+top-level directory, looking for toplink/*/Logs/rmstdout.txt files.
 
 If it finds one or more of these, it will determine which has been
 updated the most recently.  If this is different than the location that

Index: uptime_resolve.pl
===================================================================
RCS file: /cvsroot/server/common/analysis/extra/uptime_resolve.pl,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -d -r1.1 -r1.2
--- uptime_resolve.pl	7 Feb 2007 21:20:59 -0000	1.1
+++ uptime_resolve.pl	20 Oct 2008 22:36:31 -0000	1.2
@@ -1,233 +1,447 @@
 #!/usr/bin/perl
 #
-# uptime_resolve.pl
+# ***** BEGIN LICENSE BLOCK *****
+# Source last modified: $Id$
+#
+# Portions Copyright (c) 1995-2008 RealNetworks, Inc. All Rights Reserved.
+#
+# The contents of this file, and the files included with this file,
+# are subject to the current version of the RealNetworks Public
+# Source License (the "RPSL") available at
+# http://www.helixcommunity.org/content/rpsl unless you have licensed
+# the file under the current version of the RealNetworks Community
+# Source License (the "RCSL") available at
+# http://www.helixcommunity.org/content/rcsl, in which case the RCSL
+# will apply. You may also obtain the license terms directly from
+# RealNetworks.  You may not use this file except in compliance with
+# the RPSL or, if you have a valid RCSL with RealNetworks applicable
+# to this file, the RCSL.  Please see the applicable RPSL or RCSL for
+# the rights, obligations and limitations governing use of the
+# contents of the file.
+#
+# This file is part of the Helix DNA Technology. RealNetworks is the
+# developer of the Original Code and owns the copyrights in the
+# portions it created.
+#
+# This file, and the files included with this file, is distributed
+# and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
+# ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
+# ENJOYMENT OR NON-INFRINGEMENT.
+#
+# Technology Compatibility Kit Test Suite(s) Location:
+#    http://www.helixcommunity.org/content/tck
+#
+# Contributor(s):
+#
+# ***** END LICENSE BLOCK *****
 #
 ######################################################################
 #
-# XXXDC: 
-# This is a quick hack to:
-#    1) clean up and initialize the graphing work directory
-#    2) automatically pull down the symbols files
-#    3) tail the rmstdout.txt file creating res.txt
-#    4) tail the rmstdout.txt file creating rmstdout-summary.txt
-#    5) pass log to 'logspit' which in turn uses rss2tdf to create
-#           the tab-delimited RSS data.
+# uptime_resolve.pl - A resolve.pl wrapper used by graphing system.
 #
+######################################################################
 #
 # TODO: 
-#    * close the log between reads to ensure we don't keep the
-#          mount point blocked
-#    * fix the "v11" hardcoded string
 #    * pull down the individual win32 *.map files since this script
 #          incorrectly tries to pull down symbols.zip on win32.
+#          (Didn't we ever create zip'd symbol files??)
 #          Right now you need to manually create the win32 symbols
 #          directory before running this script for the rest to work.
 #    * Running resolve.pl twice is expensive but it's not clear
 #          how to avoid this to get the two types of output files.
-#    * misc cleanup
-#
-# NOTES:
-#
-#    This only works with horton builds for which symbols exist
-#    on the build farm.  
 #
-#    This is used in conjunciton with the Ploticus/updategraphs tools.
+######################################################################
+
+use Getopt::Long;
+use Pod::Usage;
+
+######################################################################
 #
-# Usage example:
-#    $ logwatch rmstdout.txt uptime_resolve.pl
+# Config
 #
-######################################################################
+sub Config
+{
+    $| = 1; #flush stdout after writes
 
+    $szProgName = `basename $0`;
+    chomp($szProgName);
+    $ServGraph::szProgName = $szProgName;
 
-#Cleanup
-unlink "res.txt";
-unlink "unique.txt";
-unlink "rmstdout-summary.txt";
-system "rm -rf symbols symbols.tgz";
+    ($szVerInfo) = '$Id$'
+        =~ /,v ([0-9.]+ [^\s]+ [^\s]+)/;
+    if (!$szVerInfo) { $szVerInfo = "0.0"; }
+    printf "%s version %s\n", $szProgName, $szVerInfo;
 
-#Setup
-$nReadSize = 32768;
-$bResolve = 0;
-$nDelay = 30;
+    $szProgDir=`dirname $0`;
+    chomp $szProgDir;
+    require "$szProgDir/../servgraph.pm";
 
 
-# Read the first hunk
-$nReadCount = sysread STDIN, $szLogText, $nReadSize;
+    @cleanupList = ("res.txt", "unique.txt", "rmstdout-summary.txt", "symbols", \
"symbols.tgz");  
-if (!$nReadCount)
-{
-    printf "error: read failed\n";
-    exit;
+    $nReadSize = 32768;
+    $bResolve = 0;
+    $nDelay = 30;
+    $szFilterCmd = "logspit";
+    $bCarbonCopy = 1;
 }
-#printf "Read %d bytes:\n--------------------%s\n---------------------\n", \
$nReadCount, $szLogText;  
-# Parse out the SYSTEM_ID
-($szSystemID) = $szLogText =~ /Platform: *([^\s]*)\s/s;
-if ($szSystemID eq "win32") { $szSystemID = "win32-i386-vc7"; } #FIXME
-printf "SystemID: '$szSystemID'\n";
-
-# Parse out the BuildID
-($szBuildID) = $szLogText =~ /Version:.*\(Build (\d+)/;
-printf "BuildID: '$szBuildID'\n";
+#######################################################################
+#
+# CommandLineHandler - Command-line parsing
+#
+sub CommandLineHandler
+{
+    GetOptions(
+        "filter=s"   => \$szFilterCmd,
+        "cc!"        => \$bCarbonCopy,
+        "delay=i"    => \$nDelay,
+        "debug!"     => \$ServGraph::bDebug,
+        "help|?"     => \$bShowHelp,
+        "man"        => \$bShowMan,
+    ) || pod2usage(2);
 
-# Pull down the horton build page for later parsing...
-$szBuildInfoPage = `wget -O - \
http://horton.dev.prognet.com/~build/report.cgi?id=$szBuildID 2>/dev/null`; +    \
pod2usage(1) if ($bShowHelp); +    pod2usage("-exitstatus" => 0, "-verbose" => 2) if \
($bShowMan); +}
 
-# Parse out the TAG
-($szBuildTag) = $szBuildInfoPage =~ /Tag:[^\s]+<td>([^<]*)<\/td>/s;
-printf "TAG: '$szBuildTag'\n";
+######################################################################
+# InitialCleanup
+#
+sub InitialCleanup
+{
+    my ($x);
+    foreach $x (@cleanuplist)
+    {
+        if (-d $x) { system "rm -rf $x"; }
+        else { unlink $x; }
+    }
+}
 
-#Parse out the Target:
-($szTarget) = $szBuildInfoPage =~ /Target:[^\s]+<td>([^<]*)<\/td>/s;
-printf "Target: '$szBuildTag'\n";
+######################################################################
+#
+# HandleFirstHunk
+#
+sub HandleFirstHunk
+{
+    # Read the first hunk
+    $nReadCount = sysread STDIN, $szLogText, $nReadSize;
 
-#Figure out the BRANCH
-($szBranch) = $szBuildInfoPage =~ /Branch:.*(SERVER[^\/]*)/s;
-printf "BRANCH: $szBranch\n";
+    if (!$nReadCount)
+    {
+        printf "error: read failed\n";
+        exit;
+    }
+    #printf "Read %d bytes:\n--------------------%s\n---------------------\n", \
$nReadCount, $szLogText;  
-#Parse out the build type:
-($szBuildType) = $szBuildInfoPage =~ /Type:[^\s]+<td>([^<]*)<\/td>/s;
-printf "Type: '$szBuildType'\n";
+    # Parse out the SYSTEM_ID
+    ($szSystemID) = $szLogText =~ /Platform: *([^\s]*)\s/s;
+    if ($szSystemID eq "win32") { $szSystemID = "win32-i386-vc7"; } #FIXME
+    printf "SystemID: '$szSystemID'\n";
 
+    # Parse out the BuildID
+    ($szBuildID) = $szLogText =~ /Version:.*\(Build (\d+)/;
+    printf "BuildID: '$szBuildID'\n";
 
-# Parse out the installer type
-if ($szLogText =~ /Helix Mobile Server/)
-{
-    $szInstaller = "servinst_mobile";
-    $szExeName = "rmserver";
-}
-elsif ($szLogText =~ /Helix Mobile Gateway/ ||
-       $szLogText =~ /Helix Mobile Proxy/)
-{
-    $szInstaller = "prxyinst_mobile";
-    $szExeName = "rmproxy";
-}
-elsif ($szLogText =~ /Helix Server/)
-{
-    $szInstaller = "servinst_retail";
-    $szExeName = "rmserver";
-}
-elsif ($szLogText =~ /Helix Proxy/)
-{
-    $szInstaller = "prxyinst_retail";
-    $szExeName = "rmproxy";
-}
-printf "Installer: $szInstaller\n";
+    # Pull down the horton build page for later parsing...
+    $szBuildInfoPage = `wget -O - \
http://horton.dev.prognet.com/~build/report.cgi?id=$szBuildID 2>/dev/null`;  
+    # Parse out the TAG
+    ($szBuildTag) = $szBuildInfoPage =~ /Tag:[^\s]+<td>([^<]*)<\/td>/s;
+    printf "TAG: '$szBuildTag'\n";
 
-$szExt = "tgz";
-if ($szSystemID =~ /win32/)
-{
-    $szExt = "zip";
-}
+    #Parse out the Target:
+    ($szTarget) = $szBuildInfoPage =~ /Target:[^\s]+<td>([^<]*)<\/td>/s;
+    printf "Target: '$szBuildTag'\n";
 
-#FIXME fix the "v11"
-$szSymbolsBaseDir = "../symbols";
-$szSymbolsFile = "v11-${szBuildTag}-${szInstaller}_${szSystemID}_sym.${szExt}";
-$szSymbolsDir = "v11-${szBuildTag}-${szInstaller}_${szSystemID}_sym";
-$szSymbolsURL = "http://horton.dev.prognet.com/~build/build/${szBranch}/${szTarget}/$ \
{szBuildTag}/${szSystemID}/${szBuildType}/${szBuildType}/${szInstaller}_${szSystemID}_sym.${szExt}";
 +    #Figure out the BRANCH
+    ($szBranch) = $szBuildInfoPage =~ /Branch:.*(SERVER[^\/]*)/s;
+    printf "BRANCH: $szBranch\n";
 
-#Build Farm Directory:
-$szBuildFarmDir = "/mnt/build/$szBranch/$szTarget/$szBuildTag/$szSystemID/$szBuildType/$szBuildType";
 +    #Parse out the build type:
+    ($szBuildType) = $szBuildInfoPage =~ /Type:[^\s]+<td>([^<]*)<\/td>/s;
+    printf "Type: '$szBuildType'\n";
 
-printf "looking for: $szBuildFarmDir\n";
 
-if (!($szSystemID =~ /win32/) && ! -e "$szSymbolsBaseDir/$szSymbolsFile")
-{
-    if (-e "$szBuildFarmDir/$szSymbolsFile")
+    # Parse out the installer type
+    if ($szLogText =~ /Helix Mobile Server/)
     {
-        printf "Copying symbols archive file from the build farm via SMBFS...\n";
-        system "cp $szBuildFarmDir/$szSymbolsFile $szSymbolsBaseDir/$szSymbolsFile";
+        $szInstaller = "servinst_mobile";
+        $szExeName = "rmserver";
     }
-    else
+    elsif ($szLogText =~ /Helix Mobile Gateway/ ||
+           $szLogText =~ /Helix Mobile Proxy/)
     {
-        printf "looking for: $szSymbolsURL\n";
-        printf "Getting symbols archive file from the build farm via HTTP...\n";
-        system "wget -O $szSymbolsBaseDir/$szSymbolsFile $szSymbolsURL 2> \
/dev/null"; +        $szInstaller = "prxyinst_mobile";
+        $szExeName = "rmproxy";
     }
-    #if it's zero length, nuke it:
-    if (! -s "$szSymbolsBaseDir/$szSymbolsFile") { unlink \
"$szSymbolsBaseDir/$szSymbolsFile"; } +    elsif ($szLogText =~ /Helix Server/)
+    {
+        $szInstaller = "servinst_retail";
+        $szExeName = "rmserver";
+    }
+    elsif ($szLogText =~ /Helix Proxy/)
+    {
+        $szInstaller = "prxyinst_retail";
+        $szExeName = "rmproxy";
+    }
+    printf "Installer: $szInstaller\n";
+
 }
 
-if (-e "$szSymbolsBaseDir/$szSymbolsFile" && ! -e "$szSymbolsBaseDir/$szSymbolsDir")
+######################################################################
+#
+# GetSymbols
+#
+sub GetSymbols
 {
-    printf "Extracting symbols from archive...\n";
-    mkdir "$szSymbolsBaseDir/$szSymbolsDir";
-    if ($szExt eq "tgz")
+    my ($szExt) = "tgz";
+    if ($szSystemID =~ /win32/)
     {
-        system "cd $szSymbolsBaseDir/$szSymbolsDir && tar xzvf ../$szSymbolsFile \
2>/dev/null"; +        $szExt = "zip";
     }
-    elsif ($szExt eq "zip")
+
+    $szSymbolsBaseDir = "../symbols";
+    $szSymbolsFile = "${szBuildTag}-${szInstaller}_${szSystemID}_sym.${szExt}";
+    $szSymbolsDir = "${szBuildTag}-${szInstaller}_${szSystemID}_sym";
+    $szSymbolsURL = \
"http://horton.dev.prognet.com/~build/build/${szBranch}/${szTarget}/${szBuildTag}/${sz \
SystemID}/${szBuildType}/${szBuildType}/${szInstaller}_${szSystemID}_sym.${szExt}"; +
+    #Build Farm Directory:
+    $szBuildFarmDir = \
"/mnt/build/$szBranch/$szTarget/$szBuildTag/$szSystemID/$szBuildType/$szBuildType"; +
+    printf "looking for: $szBuildFarmDir\n";
+
+    if (!($szSystemID =~ /win32/) && ! -e "$szSymbolsBaseDir/$szSymbolsFile")
     {
-        system "cd $szSymbolsBaseDir/$szSymbolsDir && unzip ../$szSymbolsFile \
2>/dev/null"; +        if (-e "$szBuildFarmDir/$szSymbolsFile")
+        {
+            printf "Copying symbols archive file from the build farm via \
SMBFS...\n"; +            system "cp $szBuildFarmDir/$szSymbolsFile \
$szSymbolsBaseDir/$szSymbolsFile"; +        }
+        else
+        {
+            printf "looking for: $szSymbolsURL\n";
+            printf "Getting symbols archive file from the build farm via HTTP...\n";
+            system "wget -O $szSymbolsBaseDir/$szSymbolsFile $szSymbolsURL 2> \
/dev/null"; +        }
+        #if it's zero length, nuke it:
+        if (! -s "$szSymbolsBaseDir/$szSymbolsFile") { unlink \
"$szSymbolsBaseDir/$szSymbolsFile"; }  }
-    system "cd $szSymbolsBaseDir/$szSymbolsDir && mv symbols/* .; rm -rf symbols";
-}
 
-if (($szSystemID =~ /win32/) && ! -e "$szSymbolsBaseDir/$szSymbolsDir" && -e \
                $szBuildFarmDir)
-{
-    printf "Copying *.map symbols files from build farm...\n";
-    mkdir "$szSymbolsBaseDir/$szSymbolsDir";
-    system "cp $szBuildFarmDir/*.map $szSymbolsBaseDir/$szSymbolsDir; ls \
                $szSymbolsBaseDir/$szSymbolsDir";
-}
+    if (-e "$szSymbolsBaseDir/$szSymbolsFile" && ! -e \
"$szSymbolsBaseDir/$szSymbolsDir") +    {
+        printf "Extracting symbols from archive...\n";
+        mkdir "$szSymbolsBaseDir/$szSymbolsDir";
+        if ($szExt eq "tgz")
+        {
+            system "cd $szSymbolsBaseDir/$szSymbolsDir && tar xzvf ../$szSymbolsFile \
2>/dev/null"; +        }
+        elsif ($szExt eq "zip")
+        {
+            system "cd $szSymbolsBaseDir/$szSymbolsDir && unzip ../$szSymbolsFile \
2>/dev/null"; +        }
+        system "cd $szSymbolsBaseDir/$szSymbolsDir && mv symbols/* .; rm -rf \
symbols"; +    }
+
+    if (($szSystemID =~ /win32/) && ! -e "$szSymbolsBaseDir/$szSymbolsDir" && -e \
$szBuildFarmDir) +    {
+        printf "Copying *.map symbols files from build farm...\n";
+        mkdir "$szSymbolsBaseDir/$szSymbolsDir";
+        system "cp $szBuildFarmDir/*.map $szSymbolsBaseDir/$szSymbolsDir; ls \
$szSymbolsBaseDir/$szSymbolsDir"; +    }
+
+    if (-e "$szSymbolsBaseDir/$szSymbolsFile")
+    {
+        symlink "$szSymbolsBaseDir/$szSymbolsFile", "symbols.$szExt";
+    }
+    if (-e "$szSymbolsBaseDir/$szSymbolsDir")
+    {
+        symlink "$szSymbolsBaseDir/$szSymbolsDir", "symbols";
+    }
 
-if (-e "$szSymbolsBaseDir/$szSymbolsFile")
-{
-    symlink "$szSymbolsBaseDir/$szSymbolsFile", "symbols.$szExt";
-}
-if (-e "$szSymbolsBaseDir/$szSymbolsDir")
-{
-    symlink "$szSymbolsBaseDir/$szSymbolsDir", "symbols";
 }
 
 
-# Assume's we're running from the uptime/{host} directory, and that
-# the symbols are available.
-# Will read stdin to get the files.
+######################################################################
 #
-
-if (-e "symbols" && `ls symbols/* 2>/dev/null`)
+# StartResolverProcesses
+#
+# Opens *two* resolve.pl instances.  
+# They resolve from stdin.
+#
+sub StartResolverProcesses
 {
-    $bResolve = 1;
+    # Assume's we're running from the uptime/{host} directory, and that
+    # the symbols are available.
+    # Will read stdin to get the files.
+    #
 
-    $szResolveCmd = "resolve.pl symbols - $szExeName 2>&1 | c++filt >res.txt";
-    printf "running: $szResolveCmd\n";
-    open RES, "| $szResolveCmd";
+    if (-e "symbols" && `ls symbols/* 2>/dev/null`)
+    {
+        $bResolve = 1;
 
-    $szSummaryCmd = "resolve.pl symbols - $szExeName --echoall 2>&1 | c++filt | \
                logsummary.pl > rmstdout-summary.txt 2>&1";
-    printf "running: $szSummaryCmd\n";
-    open SUM, "| $szSummaryCmd";
+        # Open the normal resolve.pl instance
+        $szResolveCmd = "resolve.pl symbols - $szExeName 2>&1 | c++filt >res.txt";
+        printf "running: $szResolveCmd\n";
+        open RES, "| $szResolveCmd";
+
+        # Open a second resolve.pl to generate the rmstdout-summary.txt file
+        $szSummaryCmd = "resolve.pl symbols - $szExeName --echoall 2>&1 | c++filt | \
logsummary.pl > rmstdout-summary.txt 2>&1"; +        printf "running: \
$szSummaryCmd\n"; +        open SUM, "| $szSummaryCmd";
+    }
 }
 
-printf "running: logspit\n";
-open RSS, "| logspit";
-open RSSCOPY, ">rmstdout-copy.txt";
 
+######################################################################
+#
+# StartFilterProcesses
+#
+# Opens *two* outputs:
+#  1) the RSS filter process
+#  2) a local carbon-copy of rmstdout.txt for easier debugging
+#
+sub StartFilterProcesses
+{
+    printf "running filter: $szFilterCmd\n";
+    open RSSFILTER, "| $szFilterCmd";
+
+    if ($bCarbonCopy)
+    {
+        open RSSCOPY, ">rmstdout-copy.txt";
+    }
+}
 
+######################################################################
+#
+# ProcessLogHunk
+#
 sub ProcessLogHunk
 {
-    syswrite RSS, $szLogText;
-    syswrite RSSCOPY, $szLogText;
+    syswrite(RSSFILTER, $szLogText);
+    syswrite(RSSCOPY, $szLogText) if $bCarbonCopy;
     if ($bResolve)
     {
-        syswrite RES, $szLogText;
-        syswrite SUM, $szLogText;
+        syswrite(RES, $szLogText);
+        syswrite(SUM, $szLogText);
     }
 }
 
 
-ProcessLogHunk();
-
-while (1)
+######################################################################
+#
+# LogFilterLoop
+#
+sub LogFilterLoop
 {
-    $nReadCount = sysread STDIN, $szLogText, $nReadSize;
-    if ($nReadCount)
-    {
-        ProcessLogHunk();
-    }
-    else
+    while (1)
     {
-        sleep $nDelay;
+        $nReadCount = sysread STDIN, $szLogText, $nReadSize;
+        if ($nReadCount)
+        {
+            ProcessLogHunk();
+        }
+        else
+        {
+            sleep $nDelay;
+        }
     }
-    
 }
+
+
+######################################################################
+#
+# MAIN
+#
+Config();
+CommandLineHandler();
+InitialCleanup();
+HandleFirstHunk();
+GetSymbols();
+StartResolverProcesses();
+StartFilterProcesses();
+ProcessLogHunk();  #Now resolve the first hunk we read earlier
+LogFilterLoop();
+
+
+######################################################################
+#
+# DOCUMENTATION
+#
+__END__
+=head1 NAME
+
+uptime_resolve.pl - A resolve.pl wrapper used by graphing system.
+
+=head1 SYNOPSIS
+
+uptime_resolve.pl [options]
+
+ Options:
+   --filter [cmd]     Output command filter.
+   --delay [n]        Input read delay, in seconds.
+   --nocc             Don't create a local carbon-copy of rmstdout.txt.
+   --debug            Enable extra messages for debugging.
+   --help             Brief help message.
+   --man              Full documentation.
+   [subdirs]          Subdirectories to graph, typically hostname-based.
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--filter>
+
+The output command filter.  By default, this is "logspit".
+
+=item B<--delay>
+
+Input read delay, in seconds.  When a file read returns no data (meaning
+the file has not been updated recently) B<uptime_resolve.pl> goes to
+sleep for this many seconds, after which it will attempt to read from
+the input source again.  The default inactivity delay is 30 seconds.
+
+=item B<--nocc>
+
+Don't create a local copy of the stdout log.  This log is created by default.
+It is only used for easier debugging and is entirely optional.
+
+=item B<--debug>
+
+Enable extra messages for debugging.
+
+=item B<--help>
+
+Print a brief help message and exits.
+
+=item B<--man>
+
+Prints the manual page and exits.
+
+=back
+
+=head1 DESCRIPTION
+
+B<uptime_resolve.pl> does the following:
+
+1) clean up and initialize the graphing work directory
+
+2) automatically pull down the symbols files
+
+3) tail the rmstdout.txt file creating res.txt
+
+4) tail the rmstdout.txt file creating rmstdout-summary.txt
+
+5) pass log to 'logspit' which in turn uses rss2tdf to create the tab-delimited RSS \
data. +
+=head1 NOTES
+
+This only works with horton builds for which symbols exist on the build farm.  
+
+This is used in conjunciton with the Ploticus/updategraphs tools.
+
+Usage example:
+
+$ logwatch rmstdout.txt uptime_resolve.pl

Index: uptimes.pl
===================================================================
RCS file: /cvsroot/server/common/analysis/extra/uptimes.pl,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -d -r1.1 -r1.2
--- uptimes.pl	7 Feb 2007 21:20:59 -0000	1.1
+++ uptimes.pl	20 Oct 2008 22:36:31 -0000	1.2
@@ -1,159 +1,260 @@
 #!/usr/bin/perl
 #
-# A wrapper for updategraphs used for graphing real-time uptime results
+# ***** BEGIN LICENSE BLOCK *****
+# Source last modified: $Id$
+#
+# Portions Copyright (c) 1995-2008 RealNetworks, Inc. All Rights Reserved.
+#
+# The contents of this file, and the files included with this file,
+# are subject to the current version of the RealNetworks Public
+# Source License (the "RPSL") available at
+# http://www.helixcommunity.org/content/rpsl unless you have licensed
+# the file under the current version of the RealNetworks Community
+# Source License (the "RCSL") available at
+# http://www.helixcommunity.org/content/rcsl, in which case the RCSL
+# will apply. You may also obtain the license terms directly from
+# RealNetworks.  You may not use this file except in compliance with
+# the RPSL or, if you have a valid RCSL with RealNetworks applicable
+# to this file, the RCSL.  Please see the applicable RPSL or RCSL for
+# the rights, obligations and limitations governing use of the
+# contents of the file.
+#
+# This file is part of the Helix DNA Technology. RealNetworks is the
+# developer of the Original Code and owns the copyrights in the
+# portions it created.
+#
+# This file, and the files included with this file, is distributed
+# and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
+# ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
+# ENJOYMENT OR NON-INFRINGEMENT.
+#
+# Technology Compatibility Kit Test Suite(s) Location:
+#    http://www.helixcommunity.org/content/tck
+#
+# Contributor(s):
+#
+# ***** END LICENSE BLOCK *****
 #
-
 ######################################################################
-# CONFIG
-# XXXDC - This needs to move to an external config file
 #
+# uptimes.pl - A wrapper for updategraphs used for graphing real-time uptime results
+#
+######################################################################
 
-$szPubDir = "/var/www/html/servgraph";
-
-@servers = (
-            #Solaris8    Solaris9      RHEL4       Win2k3
-            'bethany',   'milesdavis', 'champloo', 'rainy',  #Edge
-            'marsden',   'mingus',     'twisp',    'sunny',  #Proxy/Publisher
-            'maelstrom', 'coltrane',   'mugen',    'catsby', #Transmitter
-            );
-
-$nDelay = 120; #seconds btw checking logs
-$nHTMLRefresh = 300; #seconds btw web page refreshes
-$bOnce = 0; #if 1, don't continuously regenerate the graphs for this directory
+use Getopt::Long;
+use Pod::Usage;
+use File::stat;
 
-#
 ######################################################################
+#
+# Config
+#
+sub Config
+{
+    $| = 1; #flush stdout after writes
 
+    $szProgName = `basename $0`;
+    chomp($szProgName);
+    $ServGraph::szProgName = $szProgName;
 
-$szHelpText = "
-Usage: $0 [flags] [subdirs]
+    ($szVerInfo) = '$Id$'
+        =~ /,v ([0-9.]+ [^\s]+ [^\s]+)/;
+    if (!$szVerInfo) { $szVerInfo = "0.0"; }
+    printf "%s version %s\n", $szProgName, $szVerInfo;
 
-Options:
-    --update [n]   : Delay between log file scans to look for updates.
-    --refresh [n]  : HTTP auto-refresh frequency for generated web pages.
-    --once         : Perform a single log graph and exit (such as for
-                       a non-real-time log)
-    --pubdir [dir] : Root directory to copy generated graphs/html
-                       Each host will go in it's own subdirectory.
+    $szProgDir=`dirname $0`;
+    chomp $szProgDir;
+    require "$szProgDir/../servgraph.pm";
 
-[subdirs]
-    These are subdirectories of the current directory, one per system,
-    each containing a rmstdout.txt file and related files.
-    These override the built-in defaults if provided, otherwise
-    the default set of systems will be used.  
+    $szPubDir = "/var/www/html/servgraph";
+    $nDelay = 120; #seconds btw checking logs
+    $nHTMLRefresh = 300; #seconds btw web page refreshes
 
-Note: The output is a little verbose, you may want to run it like this:
-    $ $0 > log 2>&1 &
-    $ tail -f log | grep ====
+    $szLogDir = `pwd`;
+    chomp($szLogDir);
+    $szLogDir .= "/graphlogs";
+}
 
-";
 
-while ($#ARGV >= 0)
+#######################################################################
+#
+# CommandLineHandler - Command-line parsing
+#
+sub CommandLineHandler
 {
-    if ($ARGV[0] eq "--help")
-    {
-        printf $szHelpText;
-        exit;
-    }
-    elsif ($ARGV[0] eq "--update")
-    {
-        $nDelay = $ARGV[1];
-        shift;
-        shift;
-    }
-    elsif ($ARGV[0] eq "--refresh")
-    {
-        $nHTMLRefresh = $ARGV[1];
-        shift;
-        shift;
-    }
-    elsif ($ARGV[0] eq "--once")
+    GetOptions(
+        "update=i"   => \$nDelay,  #should use '--delay' like other scripts
+        "refresh=i"  => \$nHTMLRefresh,
+        "pubdir=s"   => \$szPubDir,
+        "once!"      => \$bOnce,
+        "logdir=s"   => \$szLogDir,
+        "debug!"     => \$ServGraph::bDebug,
+        "help|?"     => \$bShowHelp,
+        "man"        => \$bShowMan,
+    ) || pod2usage(2);
+
+    pod2usage(1) if ($bShowHelp);
+    pod2usage("-exitstatus" => 0, "-verbose" => 2) if ($bShowMan);
+
+    if ($bOnce)
     {
-        $bOnce = 1;
         $nHTMLRefresh = 0;
-        shift;
     }
-    elsif ($ARGV[0] eq "--pubdir")
+
+    #Remaining command-line arguments are the servers to graph
+    if ($#ARGV >= 0)
     {
-        $szPubDir = $ARGV[1];
-        shift;
-        shift;
+        @servers = @ARGV;
     }
-    else
+
+    $szLogBase = "${szLogDir}/" . `date +%Y_%M_%d.%H:%M:%S`;
+    chomp($szLogBase);
+    $szLog = "${szLogBase}.log";
+    if (! -e $szLogDir)
     {
-        push @userservers, $ARGV[0];
-        shift;
+        mkdir $szLogDir, 0755 or die "Unable to create log directory '$szLogDir'!";
     }
+    ServGraph::ReportStatus("Logging to: $szLog\n");
 }
 
-if ($#userservers >= 0)
+
+######################################################################
+#
+# FileModified
+#
+# Note: return will always be true for a given file the first time it's checked.
+# It ignores files that go missing and does not report them as "modified".
+#
+sub FileModified
 {
-    @servers = @userservers;
-}
+    my ($szFile) = @_;
+    my $bFileModified = 0;
+    my ($sb);
 
+    if (-f $szFile)
+    {
+        $sb = stat($szFile);
+        if ($inodes{$szFile} != $sb->ino ||
+            $sizes{$szFile}  != $sb->size ||
+            $mtimes{$szFile} != $sb->mtime)
+        {
+            $bFileModified = 1;
+            $inodes{$szFile} = $sb->ino;
+            $sizes{$szFile}  = $sb->size;
+            $mtimes{$szFile} = $sb->mtime;
+        }
+    }
 
-sub GenerateGraphs
-{
-    my @hosts= @_;
+    printf ("FileModified('%s'): %s\n", $szFile, ($bFileModified ? "TRUE" : \
"FALSE")); +    return $bFileModified;
+}
 
-    #my $cmd = "nice updategraphs --refresh 60 --fast --pubdir $szPubDir";
-    #my $cmd = "nice updategraphs --daygraphs --refresh 300 --fast --pubdir \
                $szPubDir";
-    #my $cmd = "nice updategraphs --daygraphs --refresh $nHTMLRefresh --fast \
                --pubdir $szPubDir";
-    my $cmd, $dir, $x;
+#######################################################################
+#
+# UpdateFiles 
+#
+sub UpdateFiles
+{
+    my (@hosts) = @_;
+    my $host;
 
     if ($#hosts >= 0)
     {
-        printf "Found %d Changes, Updating...\n", $#hosts + 1;
-        foreach $x (@hosts)
+        ServGraph::ReportStatus("Found %d Changes, Updating...\n", $#hosts + 1);
+        foreach $host (@hosts)
         {
-            $szNow = localtime();
-            printf "Updating $x at %s\n", $szNow;
-            $cmd = "";
-            $dir = $szPubDir . "/" . $x;
-            $basename = $x;
-            $basename =~ s/\//_/g;
-            if (! -d $dir) { system "mkdir -m 0755 -p $dir; ln -s $basename.html \
                $dir/index.html"; }
-            if (-f "$x/res.txt") { $cmd .= "uniquestacks.pl < $x/res.txt > \
                $x/unique.txt; "; }
-            if (-f "$x/server-rss.dat") { $cmd .= "(cd $x; rss_summary > \
                rss_summary.txt); "; }
-            $cmd .= "nice updategraphs --daygraphs --refresh $nHTMLRefresh --fast \
                --pubdir $dir";
-            $cmd .= " " . $x;
-            print "executing: $cmd\n";
-            system $cmd;
+            ServGraph::ReportStatus("Updating Files for $host...\n");
+
+            if (FileModified("$host/res.txt"))
+            {
+                ServGraph::RunCmd("(uniquestacks.pl < $host/res.txt > \
$host/unique.txt)"); +            }
+
+            $bRegenerateSummary = 0;
+            if (-e "$host/server-rss.dat" && !-e "$host/rss_summary.txt")
+            {
+                $bRegenerateSummary = 1;
+            }
+            elsif (-e "$host/server-rss.dat" && -e "$host/rss_summary.txt")
+            {
+                $sb = stat("$host/server-rss.dat");
+                $mtime1 = $sb->mtime;
+                $sb = stat("$host/rss_summary.txt");
+                $mtime2 = $sb->mtime;
+                if ($mtime1 > $mtime2)
+                {
+                    $bRegenerateSummary = 1;
+                }
+            }
+            if ($bRegenerateSummary)
+            {
+                ServGraph::RunCmd("(cd $host; rss_summary > rss_summary.txt)");
+            }
         }
     }
 }
 
-
-#Note: return will always be true for a given file the first time it's checked.
-sub FileModified
+#######################################################################
+#
+# GenerateGraphs 
+#
+sub GenerateGraphs
 {
-    my ($szFile) = @_;
-    my $bFileModified = 0;
+    my (@hosts) = @_;
+    my $dir, $basename, $host;
 
-    if (-f $szFile)
+    if ($#hosts >= 0)
     {
-        my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,
-            $mtime,$ctime,$blksize,$blocks)
-                = stat($szFile);
-        if ($inodes{$szFile} != $ino ||
-            $sizes{$szFile}  != $size ||
-            $mtimes{$szFile} != $mtime)
+        foreach $host (@hosts)
         {
-            $bFileModified = 1;
-            $inodes{$szFile} = $ino;
-            $sizes{$szFile}  = $size;
-            $mtimes{$szFile} = $mtime;
+            ServGraph::ReportStatus("Updating Graphs for $host...\n");
+
+            $dir = $szPubDir . "/" . $host;
+            $basename = $host;
+            $basename =~ s/\//_/g;
+
+            if (! -d $dir)
+            {
+                ServGraph::RunCmd("mkdir -m 0755 -p $dir; ln -s $basename.html \
$dir/index.html"); +            }
+
+            if ($ServGraph::bDebug)
+            {
+                ServGraph::RunCmd("nice updategraphs --daygraphs --refresh \
$nHTMLRefresh --fast --pubdir $dir --debug $host"); +            }
+            else
+            {
+                ServGraph::RunCmd("nice updategraphs --daygraphs --refresh \
$nHTMLRefresh --fast --pubdir $dir $host"); +            }
         }
     }
+}
 
-    return $bFileModified;
+######################################################################
+#
+# MAIN
+#
+Config();
+CommandLineHandler();
+
+#Not yet needed, so skip:
+#ServGraph::SetupSigHandler();
+
+if ($bOnce)
+{
+    UpdateFiles(@servers);
+    GenerateGraphs(@servers);
+    exit;
 }
 
 
 while (1)
 {
-    $szNow = localtime();
-    printf "Checking Logs... (%s)\n", $szNow;
+    my ($bModified, @recentservers);
+
+    ServGraph::ReportStatus("Checking Logs...\n");
 
     @recentservers = ();
     foreach $x (@servers)
@@ -172,12 +273,93 @@
         }
     }
 
-    GenerateGraphs @recentservers;
-
-    if ($bOnce)
-    {
-        exit(0);
-    }
+    UpdateFiles(@recentservers);
+    GenerateGraphs(@recentservers);
 
     sleep($nDelay);
 }
+
+
+
+######################################################################
+#
+# DOCUMENTATION
+#
+__END__
+=head1 NAME
+
+uptimes.pl - A wrapper for updategraphs used for graphing real-time uptime results
+
+=head1 SYNOPSIS
+
+uptimes.pl [options] [subdirs]
+
+ Options:
+   --update [n]       Delay between log file scans to look for updates.
+   --refresh [n]      HTTP auto-refresh frequency for generated web pages.
+   --[no]once         Perform a single log graph for each of [subdirs] and exit.
+   --pubdir [dir]     Directory to publish (copy) the generated graphs.
+   --logdir [dir]     Directory to save debug logs for this script.
+   --debug            Enable extra messages for debugging.
+   --help             Brief help message.
+   --man              Full documentation.
+   [subdirs]          Subdirectories to graph, typically hostname-based.
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--update>
+
+The frequency at which the directories will be scanned.
+The default is 120 seconds.
+
+=item B<--refresh>
+
+The HTTP auto-refresh frequency to be used for the generated web pages.
+The default is 300 seconds.
+
+=item B<--once>
+
+Perform a single log graph for each of [subdirs] and exit
+(such as for a non-real-time log).
+
+=item B<--pubdir>
+
+Directory to publish (copy) the generated graphs.
+Default: "/var/www/html/servgraph".
+
+=item B<--logdir>
+
+Directory to save debug logs for this script.
+Default: "graphlogs" in the current directory.
+
+=item B<--debug>
+
+Enable extra messages for debugging.
+
+=item B<--help>
+
+Print a brief help message and exits.
+
+=item B<--man>
+
+Prints the manual page and exits.
+
+=back
+
+=head1 DESCRIPTION
+
+B<uptimes.pl> will regenerate the graphs for each subdirectory specified.
+It will then periodically regenerate the graphs using the most-recent data.
+
+These graphs consist of html pages and png images.  These will
+be published (copied) to the specified --pubdir output directory.
+They will be placed in subdirectories identically-named to the [subdirs]
+which are being monitored/graphed.
+
+Note: This script could be named better, and should not be confused with
+QA's "uptime.py".  Additionally, other scripts are used to update the data
+file this script uses, such as "realtimegraph".  Conventionally, this
+script is run via a symlink in the graphing work directory called 'go'
+(again, not the best name! :)


_______________________________________________
Server-cvs mailing list
Server-cvs@helixcommunity.org
http://lists.helixcommunity.org/mailman/listinfo/server-cvs


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic