#!/usr/bin/env perl
#------------------------------------------------------------------------------
#
# PgCluu - PostgreSQL monitoring tool with statistics collector and grapher
#
# This program is open source, licensed under the PostgreSQL license.
# For license terms, see the LICENSE file.
#
# Author: Gilles Darold
# Copyright: (C) 2012-2015 Gilles Darold - All rights reserved.
#------------------------------------------------------------------------------
use vars qw($VERSION $PROGRAM);

use strict qw(vars subs);

use File::Basename;
use POSIX;
use Getopt::Long qw(:config bundling no_ignore_case_always);
use POSIX qw(locale_h sys_wait_h);
setlocale(LC_ALL, 'C');

$| = 1;

$VERSION = '2.4';
$PROGRAM = 'pgcluu_collectd';

# Default path to the external programs
# They can be specified into command line options
my $SAR_PROG    = 'sar';
my $PSQL_PROG   = 'psql';
my $CAT_PROG    = 'cat ';
my $SAR_FILE    = 'sar_stats.dat';
my $PIDFILE     = "/tmp/$PROGRAM.pid";
my $SQL_PROBE   = '';
my %DB_INFO     = ();
my $HELP        = 0;
my $DAEMONIZE   = 0;
my $DISABLE_SAR = 0;
my $INTERVAL    = 60; # wait 60 seconds between runs
my $DEBUG       = 0;
my $DBNAME      = '';
my $DBUSER      = '';
my $DBHOST      = '';
my $DBPORT      = '';
my $DBPASS      = '';
my @METRICS     = ();
my $LIST_METRIC = 0;
my $FILE_REDIR  = '>>';
my $PG_VERSION  = 0;
my $DBSERVICE   = '';
my $STAT_TYPE   = 'user';
my $KILL        = 0;
my $OS_INFO     = 0;
my $NOTABLESPACE= 0;
my $PGBOUNCER_ARGS = '';
my $INCLUDED_DB = ();
my $SKIP_HOURS  = '';
my @SKIP_BEGIN  = ();
my @SKIP_END    = ();
my $USE_BUFFERCACHE = 0;
my $NO_STATEMENTS  = 0;
my $NO_SYSINFO  = 0;
my $SHOW_VER    = 0;
my $END_AFTER   = 0;
my $MAX_SIZE    = 0;
my $END_AFTER_COUNTER = 0;
my $NO_DATABASE = 0;
my $D_ROTATE    = 0;
my $H_ROTATE    = 0;
my $OUT_DIR     = '.';
my $COMPRESS    = 0;
my $OLD_OUT_DIR = '';
my $GZIP_PID    = 0;
my $GZIP_PROG   = '/bin/gzip';
my $INCREMENTAL = 0;
my $CAPTURE     = 0;
my $TAR_PROG    = 'tar -czf';
my $CAPTURE_DIR = '/tmp';

# Stores starting time
my $START_TIME = time();

# Variables related to remote ssh connection
my $USE_SSH = '';
my $SSH_COMMAND = '';
my $SSH_BIN = 'ssh';
my $SSH_IDENTITY = '';
my $SSH_USER = '';
my $SSH_TIMEOUT = 10;
my $SSH_OPTIONS = "-o ConnectTimeout=$SSH_TIMEOUT -o PreferredAuthentications=hostbased,publickey";

# Definition to collect metrics from the database
my %METRICS_COMMANDS = (
	'database_stats' => {
		'output' => 'pg_stat_database.csv',
		'command' => 'dump_pgstatdatabase'
	},
	'tablespace_size_stats' => {
		'output' => 'pg_tablespace_size.csv',
		'command' => 'dump_pgtablespace_size'
	},
	'bgwriter_stats' => {
		'output' => 'pg_stat_bgwriter.csv',
		'command' => 'dump_pgstatbgwriter'
	},
	'conflict_stats' => {
		'output' => 'pg_stat_database_conflicts.csv',
		'command' => 'dump_pgstatdatabaseconflicts'
	},
	'replication_stats' => {
		'output' => 'pg_stat_replication.csv',
		'command' => 'dump_pgstatreplication'
	},
	'all_tables_stats' => {
		'output' => 'pg_stat_all_tables.csv',
		'command' => 'dump_pgstattables',
		'repeat'  => 1,
	},
	'user_tables_stats' => {
		'output' => 'pg_stat_user_tables.csv',
		'command' => 'dump_pgstattables_user',
		'repeat'  => 1,
	},
	'all_tables_io_stats' => {
		'output' => 'pg_statio_all_tables.csv',
		'command' => 'dump_pgstatiotables',
		'repeat'  => 1,
	},
	'user_tables_io_stats' => {
		'output' => 'pg_statio_user_tables.csv',
		'command' => 'dump_pgstatiotables_user',
		'repeat'  => 1,
	},
	'all_indexes_stats' => {
		'output' => 'pg_stat_all_indexes.csv',
		'command' => 'dump_pgstatindexes',
		'repeat'  => 1,
	},
	'user_indexes_stats' => {
		'output' => 'pg_stat_user_indexes.csv',
		'command' => 'dump_pgstatindexes_user',
		'repeat'  => 1,
	},
	'all_indexes_io_stats' => {
		'output' => 'pg_statio_all_indexes.csv',
		'command' => 'dump_pgstatioindexes',
		'repeat'  => 1,
	},
	'user_indexes_io_stats' => {
		'output' => 'pg_statio_user_indexes.csv',
		'command' => 'dump_pgstatioindexes_user',
		'repeat'  => 1,
	},
	'all_sequences_io_stats' => {
		'output' => 'pg_statio_all_sequences.csv',
		'command' => 'dump_pgstatiosequences',
		'repeat'  => 1,
	},
	'user_sequences_io_stats' => {
		'output' => 'pg_statio_user_sequences.csv',
		'command' => 'dump_pgstatiosequences_user',
		'repeat'  => 1,
	},
	'functions_stats' => {
		'output' => 'pg_stat_user_functions.csv',
		'command' => 'dump_pgstatuserfunctions',
		'repeat'  => 1,
	},
	'xact_functions_stats' => {
		'output' => 'pg_stat_xact_user_functions.csv',
		'command' => 'dump_pgstatxactuserfunctions',
		'repeat'  => 1,
	},
	'all_xact_tables_stats' => {
		'output' => 'pg_stat_xact_all_tables.csv',
		'command' => 'dump_pgstatxacttables',
		'repeat'  => 1,
	},
	'user_xact_tables_stats' => {
		'output' => 'pg_stat_xact_user_tables.csv',
		'command' => 'dump_pgstatxacttables_user',
		'repeat'  => 1,
	},
	'class_size_stats' => {
		'output'  => 'pg_class_size.csv',
		'command' => 'dump_pgclass_size',
		'repeat'  => 1,
		'override' => 1,
	},
	'lock_types_stats' => {
		'output'  => 'pg_stat_locks.csv',
		'command' => 'dump_pgstatlocktypes',
		'repeat'  => 1,
	},
	'lock_modes_stats' => {
		'output'  => 'pg_stat_locks.csv',
		'command' => 'dump_pgstatlockmodes',
		'repeat'  => 1,
	},
	'lock_granted_stats' => {
		'output'  => 'pg_stat_locks.csv',
		'command' => 'dump_pgstatlockgranted',
		'repeat'  => 1,
	},
	'statements_stats' => {
		'output'   => 'pg_stat_statements.csv',
		'command'  => 'dump_pgstatstatements',
		'override'  => 1,
	},
	'xlog_stats' => {
		'output' => 'pg_xlog_stat.csv',
		'command' => 'dump_xlog_stat'
	},
	'database_size_stats' => {
		'output' => 'pg_database_size.csv',
		'command' => 'dump_pgdatabase_size'
	},
	'connections_stats' => {
		'output' => 'pg_stat_connections.csv',
		'command' => 'dump_pgstatconnections'
	},
	'pgbouncer_stats' => {
		'output' => 'pgbouncer_stats.csv',
		'command' => 'dump_pgbouncerpoolstats'
	},
	'pgbouncer_req_stats' => {
		'output' => 'pgbouncer_req_stats.csv',
		'command' => 'dump_pgbouncerquerystats'
	},
	'unused_indexes_stats' => {
		'output' => 'pg_stat_unused_indexes.csv',
		'command' => 'dump_unusedindexes',
		'repeat'  => 1,
		'override'  => 1,
	},
	'redundant_indexes_stats' => {
		'output' => 'pg_stat_redundant_indexes.csv',
		'command' => 'dump_redundantindexes',
		'repeat'  => 1,
		'override'  => 1,
	},
	'missing_fkindexes_stats' => {
		'output' => 'pg_stat_missing_fkindexes.csv',
		'command' => 'dump_missingfkindexes',
		'repeat'  => 1,
		'override'  => 1,
	},
	'pg_settings_stats' => {
		'output' => 'pg_settings.csv',
		'command' => 'dump_pgsettings',
		'repeat'  => 1,
		'override'  => 1,
	},
	'pg_db_role_setting_stats' => {
		'output' => 'pg_db_role_setting.csv',
		'command' => 'dump_pgdbrolesetting',
		'repeat'  => 1,
		'override'  => 1,
	},
	'database_buffercache_stats' => {
		'output' => 'pg_database_buffercache.csv',
		'command' => 'dump_pgdatabase_buffercache'
	},
	'database_usagecount_stats' => {
		'output' => 'pg_database_usagecount.csv',
		'command' => 'dump_pgdatabase_usercount'
	},
	'database_isdirty_stats' => {
		'output' => 'pg_database_isdirty.csv',
		'command' => 'dump_pgdatabase_isdirty'
	},
	'relation_buffercache_stats' => {
		'output' => 'pg_relation_buffercache.csv',
		'command' => 'dump_pgrelation_buffercache',
	},
	'archiver_stats' => {
		'output' => 'pg_stat_archiver.csv',
		'command' => 'dump_pgstatarchiver',
	},
	'configuration_stats' => {
		'output' => '',
		'command' => 'get_configuration_files',
	},
);

# Process command line options and look for an action keyword. There
# are no mandatory options.
my $result = GetOptions(
	"B|enable-buffercache!" => \$USE_BUFFERCACHE,
	"c|capture!"     => \$CAPTURE,
	"C|end-counter=s"=> \$END_AFTER_COUNTER,
	"d|dbname=s"     => \$DBNAME,
	"D|daemonize!"   => \$DAEMONIZE,
	"E|end-after=s"  => \$END_AFTER,
	"f|pid-file=s"   => \$PIDFILE,
	"h|host=s"       => \$DBHOST,
	"i|interval=i"   => \$INTERVAL,
	"I|incremental!" => \$INCREMENTAL,
	"k|kill!"        => \$KILL,
	"m|metric=s"     => \@METRICS,
	"M|max-size=s"   => \$MAX_SIZE,
	"p|port=i"       => \$DBPORT,
	"P|psql=s"       => \$PSQL_PROG,
	"Q|no-statement!"=> \$NO_STATEMENTS,
	"r|rotate-daily!"=> \$D_ROTATE,
	"R|rotate-hourly!"=> \$H_ROTATE,
	"s|sar=s"        => \$SAR_PROG,
	"S|disable-sar!" => \$DISABLE_SAR,
	"T|notablespace!"=> \$NOTABLESPACE,
	"U|dbuser=s"     => \$DBUSER,
	"v|verbose!"     => \$DEBUG,
	"V|version!"     => \$SHOW_VER,
	"W|password=s"   => \$DBPASS,
	"z|compress!"    => \$COMPRESS,
	"pgversion=s"    => \$PG_VERSION,
	"pgservice=s"    => \$DBSERVICE,
	"help!"          => \$HELP,
	"stat-type=s"    => \$STAT_TYPE,
	"sar-file=s"     => \$SAR_FILE,
	"list-metric!"   => \$LIST_METRIC,
	"pgbouncer-args=s" => \$PGBOUNCER_ARGS,
	"sysinfo!"       => \$OS_INFO,
	"no-sysinfo!"    => \$NO_SYSINFO,
	"no-database!"   => \$NO_DATABASE,
	"included-db=s"  => \$INCLUDED_DB,
	"exclude-time=s" => \$SKIP_HOURS,
	'enable-ssh!'    => \$USE_SSH,
	'ssh-command=s'  => \$SSH_COMMAND,
	'ssh-program=s'  => \$SSH_BIN,
	'ssh-identity=s' => \$SSH_IDENTITY,
	'ssh-option=s'   => \$SSH_OPTIONS,
	'ssh-user=s'     => \$SSH_USER,
	'ssh-timeout=i'  => \$SSH_TIMEOUT,
) or die &usage();

# Print version number to stdout
if ($SHOW_VER) {
	print "Version: $VERSION\n";
	exit 0;
}

# The daemon should be stopped
if ($KILL) {
	if (-e "$PIDFILE") {
		system("kill -15 `cat $PIDFILE`");
		if ($? == -1) {
			print "FATAL: failed to execute: $!\n";
		} elsif ($? & 127) {
			printf "ERROR: child died with signal %d, %s coredump\n", ($? & 127),  ($? & 128) ? 'with' : 'without';
		} else {
			printf "OK: pgcluu_collectd exited with value %d\n", $? >> 8;
		}
		exit 0;
	} else {
		die "ERROR: can't find $PIDFILE, is $PROGRAM running?\n";
	}
}

# Display usage if help is asked
&usage if $HELP;

if ($CAPTURE && $DAEMONIZE) {
	die "ERROR: option -D (--daemonize) can not be used with option -c (--capture).\n";
} elsif ($CAPTURE) {
	$DISABLE_SAR = 1;
}

# Set the multi host information
my @MULTI_HOST_INFO = ();
my @dbnames = split(/,/, $DBNAME);
my @dbports = split(/,/, $DBPORT);
my @dbhosts = split(/,/, $DBHOST);
my @dbusers = split(/,/, $DBUSER);
my @dbpass = split(/,/, $DBPASS);
for (my $i = 0; $i <= $#dbhosts; $i++) {
	push(@MULTI_HOST_INFO, { 'ip' => $dbhosts[$i] || 'localhost', 'port' => $dbports[$i] || 5432, 'db' => $dbnames[$i] || 'postgres', 'user' => $dbusers[$i] || 'postgres', 'password' => $dbpass[$i] || ''});
}

# Get the output dir from command line
my $OUTPUT_DIR = $ARGV[0] || '';
# Overwrite the output directory in capture mode
if ($CAPTURE) {
	$OUTPUT_DIR = 'pgcluu_capture';
	unless(mkdir "$CAPTURE_DIR/$OUTPUT_DIR") {
		die "FATAL: can not create capture temporary directory $CAPTURE_DIR/$OUTPUT_DIR: $!.\n";
	} else {
		print "INFO: creating temporary output directory: $CAPTURE_DIR/$OUTPUT_DIR\n";
	}
}

if ($USE_SSH && !$DBHOST) {
	die "FATAL: you must give an ipaddress for the remote host with -h | --host option to use sar remotely\n";
}

# List metrics and associated SQL commands, then exit
if ($LIST_METRIC) {

	# Detect PostgreSQL version
	&fetch_version();

	print "List of available metrics:\n\n";
	foreach my $c (sort {$a cmp $b} keys %METRICS_COMMANDS) {
		next if (($c =~ /^(user|all)_/) && ($c !~ /^$STAT_TYPE/)); 
		$c =~ s/_stats$//;
		print "\t$c\n";
	}
	print "\n";
	exit 0;
}

# Check if an other process is already running
if (-f $PIDFILE) {
	&dprint("FATAL: an other process is already started, see pid in $PIDFILE\n");
	exit 1;
}

# Check excluded times
if ($SKIP_HOURS) {
	my $tmp = $SKIP_HOURS;
	my @timerange = split(/[\s\t]+/, $tmp);
	foreach $tmp (@timerange) {
		next if (!$tmp);
		if ($tmp =~ m#(\d{2}):(\d{2})-(\d{2}):(\d{2})#) {
			push(@SKIP_BEGIN, "$1$2");
			push(@SKIP_END, "$3$4");
		} else {
			&dprint("FATAL: Bad time range: $tmp. Format of exclusion time must be: HH:MM-HH:MM\n");
		}
	}
}

# Normalize number of seconds before terminating
if ($END_AFTER) {
	$END_AFTER =~ s/(\D+)//g;
	$END_AFTER ||= 0;
	if ($1 =~ /^M/i) {
		$END_AFTER *= 60;
	} elsif ($1 =~ /^H/i) {
		$END_AFTER *= 3600;
	} elsif ($1 =~ /^D/i) {
		$END_AFTER *= 86400;
	}
}

# Set max size of the output dir before terminating
$MAX_SIZE = &parse_pretty_size($MAX_SIZE);

# Validate action(s) to execute
if ($#METRICS >= 0) {

	my @list_metric = ();
	push(@list_metric, split(/[,]+/, join(',', @METRICS)));
	@METRICS = ();
	foreach my $a (@list_metric) {
		if (!grep(/^${a}_stats$/, keys %METRICS_COMMANDS)) {
			&dprint("FATAL: metric $a does not exist. Use --list-metric to show the available metrics.\n");
			exit 1;
		} else {
			push(@METRICS, "${a}_stats");
		}
	}

} else {

	# Perform all metrics actions per default
	push(@METRICS, sort {$a cmp $b} keys %METRICS_COMMANDS);

}

# Check if output directory exists
if (!$OUTPUT_DIR) {
	&dprint("FATAL: no output directory\n");
	exit 1;
} elsif (!$CAPTURE) {
	# Ensure this is not a relative path
	if (dirname($OUTPUT_DIR) eq '.') {
		&dprint("FATAL: output directory ($OUTPUT_DIR) is not an absolute path.\n");
		exit 1;
	}
}

# Go to that directory if we are not in capture mode otherwise go to /tmp
if (!$CAPTURE) {
	unless (chdir($OUTPUT_DIR)) {
		&dprint("FATAL: can not change directory to $OUTPUT_DIR: $!\n");
		exit 1;
	} else {
		# Check if we can write in this directory
		unless(open(OUT, ">wtest")) {
			&dprint("FATAL: can not write into output directory $OUTPUT_DIR\n");
			exit 1;
		}
		close(OUT);
		unlink("wtest");

	}
} else {
	unless(chdir("$CAPTURE_DIR")) {
		&dprint("FATAL: can not change directory to $CAPTURE_DIR: $!\n");
		exit 1;
	}
	$OUT_DIR = $OUTPUT_DIR;
}

# Generate the sar command
my $def_sar_command = "LC_ALL=C $SAR_PROG -t -p -A 1 1";
my $sshcmd = '';
# Set command to execute sar remotely using ssh if necessary 
if ($USE_SSH) {
	# Force using the user defined ssh command
	if ($SSH_COMMAND) {
		$sshcmd = $SSH_COMMAND;
	# else compute command following the configuration parameters
	} else {
		$sshcmd = $SSH_BIN || 'ssh';
		$sshcmd .= " -i $SSH_IDENTITY" if ($SSH_IDENTITY);
		$sshcmd .= " $SSH_OPTIONS" if ($SSH_OPTIONS);
		if ($SSH_USER) {
			$sshcmd .= " $SSH_USER\@$DBHOST";
		} else {
			$sshcmd .= " $DBHOST";
		}
	}

}

# Get Os information and exit
if ($OS_INFO && !$NO_SYSINFO) {
	&grab_os_information();
	exit 0;
}


# Die cleanly on signal
sub terminate
{
	# block signal
	local $SIG{TERM} = 'IGNORE';
	local $SIG{INT}  = 'IGNORE';
	local $SIG{QUIT} = 'IGNORE';

	&dprint("LOG: Received terminating signal.\n");

	if (-f $PIDFILE) {
		unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
	}

        # Wait for all child processes to die except for the logger
	&wait_all_childs();

	exit 0;
}

# Die on kill -2, -3 or -15 
$SIG{'INT'} = $SIG{'QUIT'} = $SIG{'TERM'} = 'terminate';

# Run in interactive mode if required
if (!$DAEMONIZE) {
	# Start in interactive mode
	print "\n*** $PROGRAM v$VERSION (pid:$$) started at " . localtime(time) . "\n";
	if ($CAPTURE) {
		print "Capture mode enabled, $PROGRAM will ended after one single loop.\n\n";
	} else {
		print "Type Ctrl+c to quit.\n\n";
	}
} else {
	# detach from terminal
	my $pid = fork;
	exit 0 if ($pid);
	die "FATAL: Couldn't fork: $!" unless defined($pid);
	POSIX::setsid() or die "Can't detach: \$!";
	&dprint("LOG: Detach from terminal with pid: $$\n");
	open(STDIN, "/dev/null");
	open(STDOUT, ">/dev/null");
	open(STDERR, ">/dev/null");
}


# Set name of the program without path
my $orig_name = $0;
$0 = $PROGRAM;

# Create pid file
unless(open(OUT, ">$PIDFILE")) {
	die "FATAL: can't create pid file $PIDFILE, $!\n";
}
print OUT $$;
close(OUT);

# Generate the psql command
$PSQL_PROG .= " -Atq -F';' -f - ";
my $PGBOUNCER_PROG = $PSQL_PROG . ' ' . $PGBOUNCER_ARGS . ' pgbouncer';

if ($NO_DATABASE) {
	$PSQL_PROG = '';
} else {
	$PSQL_PROG .= " -h $DBHOST" if ($DBHOST);
	$PSQL_PROG .= " -p $DBPORT" if ($DBPORT);
	$PSQL_PROG .= " -U $DBUSER" if ($DBUSER);
	if ($#dbnames >= 0) {
		if ($dbnames[0] !~ /^[a-z0-9\_\-]+$/i) {
			$PSQL_PROG .= " -d '$dbnames[0]'";
		} else {
			$PSQL_PROG .= " -d $dbnames[0]";
		}
	}
	if ($DBPASS) {
		$ENV{PGPASSWORD} = $DBPASS;
	}
}

# Try to grab the sysstat version
my $sysstat_version = '';
if (!$CAPTURE) {
	$sysstat_version = &sysstat_version() || '';
}

# Remove action that can't be run with the PostgreSQL version
my $PG_RET = '';
$PG_RET = &verify_action() if (!$NO_DATABASE);

# Look if we should limit statistic collect to some DB
my @included_dbs = split(/,/, $INCLUDED_DB);

# end counter
my $tcounter = 0;

# Force rotation in incremental mode
if ($INCREMENTAL) {
	$D_ROTATE = 1;
	$H_ROTATE = 1;
}

while (1) {

	# Stores loop start time
	my $t0 = time;

	# Search if the monitoring need to stop here
	if ($END_AFTER && ($t0 > ($START_TIME + $END_AFTER))) {
		&dprint("LOG: ending time reach, terminating.\n");
		if (-f $PIDFILE) {
			unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
		}
		exit 0;
	}

	# If counter reached then exit
	$tcounter++;

	if (($END_AFTER_COUNTER > 0) && ($tcounter > $END_AFTER_COUNTER)) {
		&dprint("LOG: counter reached, terminating.\n");
		if (-f $PIDFILE) {
			unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
		}
		exit 0;
	}

	# Some time range may not collect data
	my ($sec , $min, $hour, $mday, $mon, $year, @other) = localtime(time);
	$min = "0$min" if ($min < 10);
	$hour = "0$hour" if ($hour < 10);
	$mday = "0$mday" if ($mday < 10);
	$mon++;
	$mon = "0$mon" if ($mon < 10);
	$year += 1900;
	if (!$CAPTURE) {
		for (my $i = 0; $i <= $#SKIP_BEGIN; $i++) {
			if ( $SKIP_BEGIN[$i] <= $SKIP_END[$i] ) {
				if ( ("$hour$min" >= $SKIP_BEGIN[$i]) && ("$hour$min" <= $SKIP_END[$i]) ) {
					# Wait next run
					sleep($INTERVAL);
					next;
				}
			} elsif ( $SKIP_BEGIN[$i] > $SKIP_END[$i] ) {
				if ( ("$hour$min" >= $SKIP_BEGIN[$i]) && ("$hour$min" < 2400) ||
						("$hour$min" <= $SKIP_END[$i]) && ("$hour$min" > 0)) {
					# Wait for next run
					sleep($INTERVAL);
					next;
				}
			}
		}
	}

	####
	# Output dir is now relative to $OUTPUT_DIR because we have chdir to that directory.
	####

	# Compress previous hourly directory if required
	if ($COMPRESS && $OLD_OUT_DIR && ($OLD_OUT_DIR ne "$year/$mon/$mday") && $D_ROTATE && !$H_ROTATE) {
		# Fork a process to compress old data files in parallel
		&spawn(sub {
			&compress_files($OLD_OUT_DIR);
		});
	}

	# Set daily rotation subdirectories 
	if ($D_ROTATE && !$H_ROTATE && !-d "$year/$mon/$mday") {
		mkdir "$year" if (!-d "$year");
		mkdir "$year/$mon" if (!-d "$year/$mon");
		mkdir "$year/$mon/$mday";
		# Set the new output directory
		$OUT_DIR = "$year/$mon/$mday";
	} elsif ($D_ROTATE) {
		# Set the new output directory
		$OUT_DIR = "$year/$mon/$mday";
        }

	# Compress previous hourly directory if required
	if ($COMPRESS && $OLD_OUT_DIR && ($OLD_OUT_DIR ne "$year/$mon/$mday") && $H_ROTATE) {
		# Fork a process to compress old data files in parallel
		&spawn(sub {
			&compress_files($OLD_OUT_DIR);
		});
	}

	# Set hourly rotation/incremental subdirectories 
	if ($H_ROTATE && !-d "$year/$mon/$mday/$hour") {
		mkdir "$year" if (!-d "$year");
		mkdir "$year/$mon" if (!-d "$year/$mon");
		mkdir "$year/$mon/$mday" if (!-d "$year/$mon/$mday");
		mkdir "$year/$mon/$mday/$hour";
		# Set the new output directory
		$OUT_DIR = "$year/$mon/$mday/$hour";
	} elsif ($H_ROTATE) {
		# Set the new output directory
		$OUT_DIR = "$year/$mon/$mday/$hour";
        }

	# stores previous directory to check when rotated files must compressed
	$OLD_OUT_DIR = $OUT_DIR;

	# Collect initial information about the cluster
	if (!-e "$OUT_DIR/sysinfo.txt") {
		if (!$NO_DATABASE && ($PG_RET >= 0)) {
			&get_initial_info();
		}

		# Get Os information
		if (!$NO_SYSINFO) {
			&grab_os_information();
		}
	}

	# Copy configuration files into output directory
	if (!$NO_DATABASE && ($#METRICS >= 0) && grep(/^configuration_stats$/, @METRICS)) {
		my @conf_files = $METRICS_COMMANDS{'configuration_stats'}->{command}->();
		@conf_files = () if ($PG_RET < 0);
		my $outdir = $OUT_DIR;
		$outdir = "$CAPTURE_DIR/$OUT_DIR" if ($CAPTURE);
		foreach my $f (@conf_files, '/etc/pgbouncer/pgbouncer.ini') {
			my $filename = basename($f);
			next if (-e "$OUT_DIR/$filename"); # Do not copy file twice in same directory
			if ($sshcmd) {
				`$sshcmd "$CAT_PROG $f" >$outdir/$filename 2>/dev/null`;
			} else {
				`$CAT_PROG $f >$outdir/$filename 2>/dev/null`;
			}
			# remove file when empty
			unlink("$OUT_DIR/$filename" ) if (-z "$OUT_DIR/$filename");
		}
	}

	# Set the sar command
	my $sar_command = '';
	if (!$DISABLE_SAR) {
		$sar_command = ($sshcmd) ? $sshcmd . ' "' . $def_sar_command . ' "' : $def_sar_command;
		$sar_command .= " >>$OUT_DIR/$SAR_FILE";
	}

	# Remove files that must be overriden
	foreach my $type (sort {$a cmp $b} keys %METRICS_COMMANDS) {
		if (($METRICS_COMMANDS{$type}->{override}) && -e "$OUT_DIR/$METRICS_COMMANDS{$type}->{output}") {
			unlink("$OUT_DIR/$METRICS_COMMANDS{$type}->{output}");
		}
	}

	# Generating global statistics collector SQL script 
	my $script_sql = &create_sql_script();
	if (!$script_sql && ($#METRICS < 0)) {
		# No action can be perform with this PostgreSQL version
		if (-f $PIDFILE) {
			unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
		}
		&dprint("FATAL: no action will be run on this PostgreSQL version $DB_INFO{major}.$DB_INFO{minor}, $!\n");
		exit 1;
	}

	if (!$NO_DATABASE) {
		# Collecting database statistics if there other than pgbouncer metrics to collect
		if ($PG_RET >= 0) {
			open(PSQL, "| $PSQL_PROG");
			print PSQL $script_sql, "\n";
			close(PSQL);

			# Get database list
			my @dblist = &get_databases();
			push(@dblist, $DBNAME) if (($#dblist == -1) && $DBNAME);

			# Generating per database statistics collector SQL script 
			foreach my $db (@dblist) {

				next if (($#included_dbs >= 0) && !grep(/^$db$/i, @included_dbs));

				my $script_repeat_sql = &create_sql_script($db);
				if (!$script_repeat_sql) {
					last;
				}
				my $LOCAL_PSQL_PROG = $PSQL_PROG;
				if ($db && ($LOCAL_PSQL_PROG !~ s/-d $dbnames[0]/-d $db/i)) {
					if ($db !~ /^[a-z0-9\_\-]+$/i) {
						$LOCAL_PSQL_PROG .= " -d '$db'";
					} else {
						$LOCAL_PSQL_PROG .= " -d $db";
					}
				}
				# Collecting database statistics
				open(PSQL, "| $LOCAL_PSQL_PROG");
				print PSQL $script_repeat_sql, "\n";
				close(PSQL);
			}
		}
	}

	# Collecting pgbouncer statistics
	if ($PGBOUNCER_ARGS) {
		foreach my $c (@METRICS) {
			next if ($c !~ /^pgbouncer_/);
			# create a timestamp to registrer pgbouncer statistics
			my $timestamp = &get_current_timestamp();

			my $sql = '';
			eval { $sql = &{$METRICS_COMMANDS{$c}->{command}}; };
			if (!$sql || $@) {
				if (-f $PIDFILE) {
					unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
				}
				die "FATAL: no SQL with metric command pgbouncer_stats ($sql).\n" if (!$sql || $@);
			}
			`$PGBOUNCER_PROG -c "$sql" | sed 's/^/$timestamp;/' $FILE_REDIR $OUT_DIR/$METRICS_COMMANDS{$c}->{output}`;
			if ($? != 0) {
				&dprint("LOG: $PGBOUNCER_PROG -c \"$sql\" | sed 's/^/$timestamp;/' $FILE_REDIR $OUT_DIR/$METRICS_COMMANDS{$c}->{output}\n");
				&dprint("ERROR: pgbouncer pool query failure, $!\n");
			}
		}
	}

	# Collecting system statistics
	if (!$DISABLE_SAR && $sar_command && ($sysstat_version > -1)) {
		&dprint(`$sar_command`);
	}

	if (!$CAPTURE) {
		# Lookup if output dir size is upper than limit
		if ($MAX_SIZE) {
			my $lsize = `du -s $OUTPUT_DIR`;
			chomp($lsize);
			if ($lsize > $MAX_SIZE) {
				&dprint("LOG: max size limit reach, terminating.\n");
				if (-f $PIDFILE) {
					unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
				}
				exit 0;
			}
		}

		# Wait next run following the interval value
		my $t1 = time - $t0;
		if ($t1 >= $INTERVAL) {
			print "WARNING: loop took: $t1, you may consider increase the interval ($INTERVAL sec) using -i option.\n";
			# Wait next run following the interval value
			sleep($INTERVAL);
		} else {
			# Wait next run following the interval value minus the loop time
			sleep($INTERVAL - $t1);
		}
	}

	last if ($CAPTURE);
}

if ($COMPRESS) {
	# Wait for last child stop
	&wait_all_childs();
}

if ($CAPTURE) {
	# be sure to be in the parent directory
	chdir($CAPTURE_DIR);
	# Then create a tarball before removing the output directory
	print "INFO: creating archive $CAPTURE_DIR/pgcluu_capture.tar.gz of the capture directory $CAPTURE_DIR/$OUTPUT_DIR.\n";
	`$TAR_PROG pgcluu_capture.tar.gz $OUTPUT_DIR`;
	# Removing temporary directory and ensure that we are not removing anything else
	print "INFO: removing capture temporary directory: $CAPTURE_DIR/$OUTPUT_DIR\n";
	if ($OUTPUT_DIR eq 'pgcluu_capture') {
		`rm -rf $OUTPUT_DIR`;
	}
}

if (-f $PIDFILE) {
	unlink("$PIDFILE") or &dprint("ERROR: Unable to remove pid file $PIDFILE, $!\n");
}

exit 0;

sub get_initial_info
{
	return if ($NO_DATABASE);

	# Get list of database in the PostgreSQL cluster
	my @alldbs = ();
	if ($#dbnames >= 0) {
		# from command line
		push(@alldbs, @dbnames);
	} else {
		# or by querying the database list
		@alldbs = &get_databases();
	}

	# Store PG version into sysinfo file
	&fetch_version(1);

	# Collect general information about each database
	if (!$NO_SYSINFO && ($#alldbs > -1)) {
		my @extensions = ();
		my @schemas = ();
		my @procs = ();
		my @trigs = ();
		unless(open(OUT, ">>$OUT_DIR/sysinfo.txt")) {
			&dprint("FATAL: can not write into file $OUT_DIR/sysinfo.txt\n");
			exit 1;
		}
		if (&backend_minimum_version(9, 1)) {
			# Look for all installed extensions
			print OUT "[EXTENSION]\n";
			foreach my $db (@alldbs) {
				my @ext = &get_extensions($db);
				push(@extensions, "$db=" . join(',', @ext)) if ($#ext >= 0);
			}
			foreach my $e (@extensions) {
				print OUT "$e\n";
			}
		}
		# Look for all schema in a database
		print OUT "[SCHEMA]\n";
		foreach my $db (@alldbs) {
			my @sch = &get_schemas($db);
			push(@schemas, "$db=" . join(',', @sch)) if ($#sch >= 0);
		}
		foreach my $s (@schemas) {
			print OUT "$s\n";
		}
		# Look for all schema in a database
		print OUT "[PROCEDURE]\n";
		foreach my $db (@alldbs) {
			my @pro = &get_proc_name($db);
			push(@procs, "$db=" . join(',', @pro)) if ($#pro >= 0);
		}
		foreach my $p (@procs) {
			print OUT "$p\n";
		}
		# Look for all schema in a database
		print OUT "[TRIGGER]\n";
		foreach my $db (@alldbs) {
			my $tgs = &get_triggers($db);
			push(@trigs, "$db=$tgs");
		}
		foreach my $t (@trigs) {
			print OUT "$t\n";
		}
		close(OUT);

	}
}

sub verify_action
{
	# Detect PostgreSQL version
	my $ret = &fetch_version();

	# Only pgbouncer metrics and configuration
	if ($ret < 0) {
		foreach my $m (keys %METRICS_COMMANDS) {
			delete $METRICS_COMMANDS{$m} if (($m ne 'configuration_stats') && ($m !~ /pgbouncer/));
		}
		return $ret;
	}
	# Only keep setting and configuration collect
	if ($CAPTURE) {
		foreach my $m (keys %METRICS_COMMANDS) {
			delete $METRICS_COMMANDS{$m} if (($m ne 'configuration_stats') &&
							 ($m ne 'pg_settings_stats') &&
							 ($m ne 'pg_db_role_setting_stats') &&
							 ($m ne 'tablespace_size_stats') &&
							 ($m ne 'database_size_stats') &&
							 ($m ne 'replication_stats')
							);
		}
		return $ret;
	}
	if (!&backend_minimum_version(9, 4)) {
		delete $METRICS_COMMANDS{'archiver_stats'};
	}
	if (!&backend_minimum_version(8, 3)) {
		delete $METRICS_COMMANDS{'bgwriter_stats'};
		delete $METRICS_COMMANDS{'pg_buffercache_stats'};
	}
	if (!&backend_minimum_version(9, 1)) {
		delete $METRICS_COMMANDS{'conflict_stats'};
		delete $METRICS_COMMANDS{'replication_stats'};
	}
	if (!&backend_minimum_version(9, 0)) {
		delete $METRICS_COMMANDS{'hot_standby_delay'};
		delete $METRICS_COMMANDS{'user_xact_tables_stats'};
		delete $METRICS_COMMANDS{'all_xact_tables_stats'};
		delete $METRICS_COMMANDS{'xact_functions_stats'};
		delete $METRICS_COMMANDS{'pg_db_role_setting_stats'};
	}
	if (!&backend_minimum_version(8, 4)) {
		delete $METRICS_COMMANDS{'functions_stats'};
		delete $METRICS_COMMANDS{'redundant_indexes_stats'};
		delete $METRICS_COMMANDS{'missing_fkindexes_stats'};
	}
	if (!&backend_minimum_version(8, 1)) {
		delete $METRICS_COMMANDS{'xlog_stats'};
	}
	$DB_INFO{pg_stat_statement} = &has_pgstatstatements();
	if ($NO_STATEMENTS || !$DB_INFO{pg_stat_statement}) {
		delete $METRICS_COMMANDS{'statements_stats'};
	}
	if (!&is_superuser($DBUSER)) {
		delete $METRICS_COMMANDS{'xlog_stats'};
	}
	if ($NOTABLESPACE) {
		delete $METRICS_COMMANDS{'tablespace_size_stats'};
	}

	# Check if the connection database has pg_buffercache installed
	if (!$USE_BUFFERCACHE || !&has_pg_buffercache()) {
		delete $METRICS_COMMANDS{'database_buffercache_stats'};
		delete $METRICS_COMMANDS{'database_usagecount_stats'};
		delete $METRICS_COMMANDS{'relation_buffercache_stats'};
		delete $METRICS_COMMANDS{'database_isdirty_stats'};
	} 

	return $ret;
}

sub create_sql_script
{
	my $db = shift;

	# Prepare the SQL script
	my $sql_queries = '';
	foreach my $type (sort {$a cmp $b} keys %METRICS_COMMANDS) {
		next if (!$db && $METRICS_COMMANDS{$type}->{repeat});
		next if ($db && !$METRICS_COMMANDS{$type}->{repeat});
		next if ($type =~ /^pgbouncer_|configuration_/);
		next if (($type =~ /^(user|all)_/) && ($type !~ /^$STAT_TYPE/)); 
		next if (($#METRICS >= 0) && !grep(/^$type$/, @METRICS));

		my $sql = $METRICS_COMMANDS{$type}->{command}->();
		if (!$sql) {
			return;
		}
		if (&backend_minimum_version(8, 2)) {
			$sql_queries .= <<EOF
-- $type
\\o | cat $FILE_REDIR $OUT_DIR/$METRICS_COMMANDS{$type}->{output}
COPY ($sql) TO STDOUT CSV DELIMITER ';';

EOF
		} else {
			# 8.1 and lower doesn't support select statement into COPY
			# we use a temporary table instead
			$sql_queries .= <<EOF
-- $type
\\o | cat $FILE_REDIR $OUT_DIR/$METRICS_COMMANDS{$type}->{output}
BEGIN;
CREATE TEMPORARY TABLE __pgcluu as $sql;
COPY __pgcluu TO STDOUT CSV DELIMITER ';';
ROLLBACK;

EOF
		}
	}

	if ($sql_queries && &backend_minimum_version(9, 0)) {
		$sql_queries = "SET application_name TO pgcluu;\n" . $sql_queries;
	}
	return $sql_queries
}

sub fetch_version
{
	my $save = shift;

	if ($PG_VERSION) {
		if ($PG_VERSION =~ /^(\d+)\.(\d+)/) {
			$DB_INFO{major} = $1;
			$DB_INFO{minor} = $2;
			return "$1.$2";
		}
	}

	return -1 if ($NO_DATABASE);

	my $pg_ver = `$PSQL_PROG -c "SELECT version();" 2>&1`;
	if ($? != 0) {
		# Only stop here if there's no pgbouncer querying
		if (!$PGBOUNCER_ARGS) {
			&dprint("FATAL: psql error. $pg_ver\n");
			# remove the pidfile
			if (-f $PIDFILE) {
				unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
			}
			exit 0;
		} else {
			return -1;
		}
	}
	chomp($pg_ver);

	if ($pg_ver =~ /^PostgreSQL (\d+)\.(\d+)/) {
		$DB_INFO{major} = $1;
		$DB_INFO{minor} = $2;
	}

	if (!$NO_SYSINFO && $save) {
		unless(open(OUT, ">>$OUT_DIR/sysinfo.txt")) {
			&dprint("FATAL: can not write into file $OUT_DIR/sysinfo.txt\n");
			exit 1;
		}
		print OUT "[PGVERSION]\n";
		print OUT "$pg_ver\n";
		close(OUT);
	}

	return "$1.$2";
}

####
# Retrieve installed extension for a given database.
####
sub get_extensions
{
	my $database = shift;

	my $LOCAL_PSQL_PROG = $PSQL_PROG;
	if ($database && ($LOCAL_PSQL_PROG !~ s/-d $dbnames[0]/-d $database/i)) {
		if ($database !~ /^[a-z0-9\_\-]+$/i) {
			$LOCAL_PSQL_PROG .= " -d '$database'";
		} else {
			$LOCAL_PSQL_PROG .= " -d $database";
		}
	}
	my @db_extension = `$LOCAL_PSQL_PROG -c "SELECT extname||'-'||extversion FROM pg_extension;"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp(@db_extension);

	return @db_extension;
}

####
# Get the list of database that are not template and that allow connction
####
sub get_databases
{

	my @dbs = `$PSQL_PROG -c "SELECT datname FROM pg_database WHERE NOT datistemplate AND datallowconn;"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp(@dbs);

	return @dbs;
}

####
# Retrieve schemas for a given database.
####
sub get_schemas
{
	my $database = shift;

	my $LOCAL_PSQL_PROG = $PSQL_PROG;
	if ($database && ($LOCAL_PSQL_PROG !~ s/-d $dbnames[0]/-d $database/i)) {
		if ($database !~ /^[a-z0-9\_\-]+$/i) {
			$LOCAL_PSQL_PROG .= " -d '$database'";
		} else {
			$LOCAL_PSQL_PROG .= " -d $database";
		}
	}
	my @db_schema = `$LOCAL_PSQL_PROG -c "SELECT nspname FROM pg_namespace WHERE nspname !~ '^pg_' AND nspname <> 'information_schema' ORDER BY 1"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp(@db_schema);

	return @db_schema;
}

####
# Check if the database have the pg_buffercache extension
####
sub has_pg_buffercache
{

	my $hasit = `$PSQL_PROG -c "SELECT proname FROM pg_proc WHERE proname = 'pg_buffercache_pages';"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp($hasit);

	return $hasit;
}


####
# Check if the database have the pg_stat_statements extension
####
sub has_pgstatstatements
{

	my $hasit = `$PSQL_PROG -c "SELECT n.nspname||'.'||p.proname FROM pg_proc p, pg_namespace n WHERE p.proname='pg_stat_statements' AND p.pronamespace=n.oid;"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp($hasit);

	return $hasit;

}

sub is_superuser
{
	my $usr = shift;

	# Assume it is superuser per default. In any case this will raise a psql error.
	return 1 if (!$usr);

	$DB_INFO{is_superuser} = `$PSQL_PROG -c "SELECT 1 FROM pg_user WHERE usename='$usr' AND usesuper"`;
	chomp($DB_INFO{is_superuser});

	return $DB_INFO{is_superuser};

}

sub sysstat_version
{
	if (!$DISABLE_SAR) {
		my $ver_command = "LC_ALL=C $SAR_PROG -V 2>&1";
		$ver_command = $sshcmd . ' "' . $ver_command . '"' if ($sshcmd) ;
		my $ver = 0;
		$ver = `$ver_command | grep "sysstat version"`;
		if ($?) {
			&dprint("ERROR: $SAR_PROG execution failure: $!\n");
			return -1;
		}
		if ($ver =~ m!^sysstat version (\d+)\.!) {
			return $1;
		}
	}
	return -1;
}

sub grab_os_information
{

	# Look at CPU informations
	my $cmd = 'sysctl hw.ncpu hw.model hw.cpuspeed 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @cpuinfo = `$cmd`;
	# Look at kernel informations
	$cmd = 'uname -a  2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my $kernel_info = `$cmd`;
	# Look at memory informations
	$cmd = 'cat /proc/meminfo 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @meminfo = `$cmd`;
	# Look at filesystem informations
	$cmd = 'df -h 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @dfinfo = `$cmd`;
	# Mount informations
	$cmd = 'mount -l 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @mountinfo = `$cmd`;
	# Fstab informations
	$cmd = 'cat /etc/fstab 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @fstabinfo = `$cmd | grep -v "^#"`;
	# PCI information
	$cmd = 'lspci 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @pciinfo = `$cmd`;
	# Release informations
	$cmd = 'uname -srv 2>/dev/null';
	my @releaseinfo = `$cmd`;
	# Process list
	$cmd = 'ps -faux 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @pslist = `$cmd`;
	# System kernel tuning parameters
	$cmd = '/sbin/sysctl -a 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my @system = `$cmd | grep -E "vm.nr_hugepages |vm.overcommit|vm.dirty_.*(ratio|bytes)|swappiness|zone_reclaim_mode|shmmax|shmall"`;
	unless(open(OUT, ">>$OUT_DIR/sysinfo.txt")) {
		&dprint("FATAL: can not write into file $OUT_DIR/sysinfo.txt\n");
		exit 1;
	}
	$cmd = 'cat /sys/kernel/mm/*transparent_hugepage/enabled 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	my $ret = `$cmd`;
	push(@system, "/sys/kernel/mm/transparent_hugepage/enabled: " . $ret);
	$cmd = 'cat /sys/kernel/mm/*transparent_hugepage/defrag 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	$ret = `$cmd`;
	push(@system, "/sys/kernel/mm/transparent_hugepage/defrag: " . $ret);
	$cmd = 'cat /sys/kernel/mm/*transparent_hugepage/khugepaged/defrag 2>/dev/null';
	$cmd = $sshcmd . ' "' . $cmd . "\"" if ($sshcmd);
	$ret = `$cmd`;
	push(@system, "/sys/kernel/mm/transparent_hugepage/khugepaged/defrag: " . $ret);
	print OUT "[CPU]\n";
	print OUT @cpuinfo;
	print OUT "[KERNEL]\n";
	print OUT $kernel_info;
	print OUT "[MEMORY]\n";
	print OUT @meminfo;
	print OUT "[DF]\n";
	print OUT @dfinfo;
	print OUT "[MOUNT]\n";
	print OUT @mountinfo;
	print OUT "[FSTAB]\n";
	print OUT @fstabinfo;
	print OUT "[PCI]\n";
	print OUT @pciinfo;
	print OUT "[RELEASE]\n";
	print OUT @releaseinfo;
	print OUT "[SYSTEM]\n";
	print OUT @system;
	print OUT "[PROCESS]\n";
	print OUT @pslist;
	
	close(OUT);
}

sub usage
{
    print qq{
usage: $PROGRAM [options] output_dir

	output_dir: full path to directory where pgcluu_collectd will
		    store statistics.

options:

  -B, --enable-buffercache enable buffercache statistics if pg_buffercache
			   extension is installed.
  -c, --capture            create a snapshot of the PostgreSQL installation
                           into tmp/pgcluu_capture.tar.gz.
  -C, --end-counter=NUM    terminate program after NUM reports.
  -d, --dbname=DATABASE    database name to connect to. Default to current user.
  -D, --daemonize          detach from console and enter in daemon mode.
  -E, --end-after=SECOND   self terminate program after a given number of seconds.
                           Can be written: 7200 or 120M or 2H, for days use 7D for
                           example to stop collecting data after seven days. 
  -f, --pid-file=FILE      path to pid file. Default: $PIDFILE.
  -h, --host=HOSTNAME      database server host or socket directory
  -i, --interval=NUM       time to wait between runs
  -k, --kill		   stop current $PROGRAM running daemon.
  -m, --metric=METRIC      set a coma separated list of metrics to perform.
  -M, --max-size=SIZE      self terminate program when the size of the output dir
                           exceed a given size. Can be written: 2GB or 2000MB. 
  -p, --port=PORT          database port(s) to connect to. Defaults to 5432.
  -P, --psql=BIN           path to the psql command. Default: $PSQL_PROG.
  -Q, --no-statement       do not collect queries statistics from pg_stat_statements.
  -r, --rotate-daily       force the daily rotation of data files.
  -R, --rotate-hourly      force the daily rotation of data files.
  -s, --sar=BIN            path to sar sysstat command. Default: $SAR_PROG.
  -S, --disable-sar        disable collect of system statistics with sar.
  -T, --notablespace       disable lookup at tablespace when the connect user
			   is not superuser to avoid printing an error message.
  -U, --dbuser=USERNAME    database user to connect as. Default to current user.
  -v, --verbose            Print out debug informations.
  -V, --version            Show pgcluu_collectd version and exit.
  -W, --password=pass      database password.
  -z, --compress           force compression of rotated data files.
  --included-db=DATABASE   collect statistics only for those databases present
                           in a comma separated list of database names.
  --list-metric            list available metrics actions that can be performed.
  --sysinfo                get operating system information and exit (sysinfo.txt).
  --no-sysinfo             do not collect operating system information at all. 
  --no-database            do not collect database statistics at all.
  --pgbouncer-args=OPTIONS Option to used to connect to the pgbouncer system
			   database. Ex: -p 6432 -U postgres -h 192.168.1.100
                           You must at least give one parameter to enable
                           pgbouncer monitoring.
  --sar-file=FILE          path to sar output data file for sysstat stats
                           Default to output_dir/sar_stats.dat.
  --stat-type all|user     Set stats tables to read. Values: 'all' or 'user' to
			   look at pg_stat_(all|user) tables. Default: user.
  --pgversion X.Y          force the PostgreSQL version to the given value.
  --pgservice NAME         Name of service inside of the pg_service.conf file.
  --exclude-time RANGE     exclude a laps of time by giving the start and end
			   hours.
  --help                   print usage

Use those options to execute sar on the remote host defined by the -h option,
otherwise it will be executed locally:

  --enable-ssh             activate the use of ssh to run sysstat remotely.
  --ssh-program ssh        path to the ssh program to use. Default: ssh.
  --ssh-user username      connection login name. Default to running user.
  --ssh-identity file      path to the identity file to use.
  --ssh-timeout second     timeout to ssh connection failure. Default 10 seconds.
  --ssh-options  options   list of -o options to use for the ssh connection. Options
			   always used:
				 -o ConnectTimeout=\$ssh_timeout
				 -o PreferredAuthentications=hostbased,publickey

For example, as postgres user to monitor locally a full PostgreSQL cluster:

	mkdir /tmp/stat_db1/
	pgcluu_collectd -D -i 60 /tmp/stat_db1/

to collect statistics from pgbouncer too, and limit database statistics to a single
database:

	pgcluu_collectd -D -i 60 /tmp/stat_db1/ -h 10.10.1.1 -U postgres -d mydb \
		--pgbouncer-args='-p 5342'

to disable statistics collect between 22:30 and 06:30 the next day:

	pgcluu_collectd -D -i 60 /tmp/stat_db1/ --exclude-time "22:30-06:30"

to collect statistics from a remote server:

	pgcluu_collectd -D -i 60 /tmp/stat_db1/ -h 10.0.0.1 -U postgres --disable-sar

the same but with collecting system statistics using remote sar calls:

	pgcluu_collectd -D -i 60 /tmp/stat_db1/ -h 10.0.0.1 -U postgres --enable-ssh \
		--ssh-user postgres --ssh-identity /var/lib/postgresql/.ssh/id_rsa.pub

You may need a .pgpass and be able to establish passwordless ssh connections to be
able to collect statistics from remote hosts.

Then after some time and activities on the database, stop the daemon as follow:

        pgcluu_collectd -k

or by sending sigterm to the pgcluu_collectd's pid.
};
    exit 1
}

sub dprint
{
	foreach (@_) {
		next if (/^DEBUG:/s && !$DEBUG);
		print "$_"
	}
}

# Compare given major and minor numbers to the one of the connected server
sub backend_minimum_version
{
	my ($major, $minor) = @_;

	return if ($major eq '');
	return if ($minor eq '');

	return ($DB_INFO{major} > $major) || (($DB_INFO{major} == $major) && ($DB_INFO{minor} >= $minor));
}

sub dump_pgstatactivity
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), datid, datname, %s, " .
		"usesysid, usename, %s%s%s%s%s" .
		"date_trunc('seconds', query_start) AS query_start, " .
		"%s%s%s " .
		"FROM pg_stat_activity %s" .
		"ORDER BY %s",
		&backend_minimum_version(9, 2) ? "pid" : "procpid",
		&backend_minimum_version(9, 0) ? "application_name, " : "",
		&backend_minimum_version(8, 1) ? "client_addr, " : "",
		&backend_minimum_version(9, 1) ? "client_hostname, " : "",
		&backend_minimum_version(8, 1) ? "client_port, date_trunc('seconds', backend_start) AS backend_start, " : "",
		&backend_minimum_version(8, 3) ? "date_trunc('seconds', xact_start) AS xact_start, " : "",
		&backend_minimum_version(9, 2) ? "state_change, " : "",
		&backend_minimum_version(8, 2) ? "waiting, " : "",
		&backend_minimum_version(9, 2) ? "query" : "current_query",
		&backend_minimum_version(9, 0) ? "WHERE application_name != 'pgcluu' " : "",
		&backend_minimum_version(9, 2) ? "pid" : "procpid"
	);

	return $sql;
}

sub dump_pgstatbgwriter
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), checkpoints_timed, " .
		"checkpoints_req, %sbuffers_checkpoint, buffers_clean, " .
		"maxwritten_clean, buffers_backend, %sbuffers_alloc%s " .
		"FROM pg_stat_bgwriter ",
		&backend_minimum_version(9, 2) ? "checkpoint_write_time, checkpoint_sync_time, " : "",
		&backend_minimum_version(9, 1) ? "buffers_backend_fsync, " : "",
		&backend_minimum_version(9, 1) ? ", date_trunc('seconds', stats_reset) AS stats_reset " : ""
	);

	return $sql;
}

sub dump_pgstatdatabase
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), datid, datname, " .
		"numbackends, xact_commit, xact_rollback, blks_read, blks_hit" .
		"%s%s%s " .
		"FROM pg_stat_database " .
		"ORDER BY datname",
		&backend_minimum_version(8, 3) ? ", tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted" : "",
		&backend_minimum_version(9, 1) ? ", conflicts, date_trunc('seconds', stats_reset) AS stats_reset" : "",
		&backend_minimum_version(9, 2) ? ", temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time" : ""
	);

	return $sql;
}

sub dump_pgtablespace_size
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), spcname, " .
		"pg_tablespace_size(spcname), " .
		"CASE WHEN %s = '' THEN CASE WHEN spcname = 'pg_default' THEN (select setting from pg_settings where name='data_directory')||'/base' ELSE (select setting from pg_settings where name='data_directory')||'/global' END ELSE %s END as tablespace_location " .
		"FROM pg_tablespace " .
		"ORDER BY spcname",
		&backend_minimum_version(9, 2) ? "pg_tablespace_location(oid)" : "spclocation",
		&backend_minimum_version(9, 2) ? "pg_tablespace_location(oid)" : "spclocation"
	);

	return $sql;
}

sub dump_pgstatdatabaseconflicts
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), * " .
		"FROM pg_stat_database_conflicts " .
		"ORDER BY datname"
	);

	return $sql;
}

sub dump_pgstatreplication
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), %s, usesysid, usename, " .
		"application_name, client_addr, client_hostname, client_port, " .
		"date_trunc('seconds', backend_start) AS backend_start, state, " .
		"pg_current_xlog_location() AS master_location, " .
		"sent_location, write_location, flush_location, replay_location, " .
		"sync_priority, " .
		"sync_state " .
		"FROM pg_stat_replication " .
		"ORDER BY application_name",
		&backend_minimum_version(9, 2) ? "pid" : "procpid"
	);

	return $sql;
}

sub dump_pgstattables_user { return dump_pgstattables('user'); };

sub dump_pgstattables
{
	my $type = shift;

	$type ||= 'all';

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), relid, schemaname, relname, " .
		"seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, " .
		"n_tup_upd, n_tup_del" .
		"%s" .
		"%s" .
		"%s" .
		" FROM pg_stat_${type}_tables " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY schemaname, relname",
		&backend_minimum_version(8, 3) ? ", n_tup_hot_upd, n_live_tup, n_dead_tup" : "",
		&backend_minimum_version(8, 2) ? ", date_trunc('seconds', last_vacuum) AS last_vacuum, date_trunc('seconds', last_autovacuum) AS last_autovacuum, date_trunc('seconds',last_analyze) AS last_analyze, date_trunc('seconds',last_autoanalyze) AS last_autoanalyze" : "",
		&backend_minimum_version(9, 1) ? ", vacuum_count, autovacuum_count, analyze_count, autoanalyze_count" : ""
	);

	return $sql;
}

sub dump_pgstatindexes_user { return dump_pgstatindexes('user'); };

sub dump_pgstatindexes
{
	my $type = shift;

	$type ||= 'all';

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), * " .
		"FROM pg_stat_${type}_indexes " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY schemaname, relname"
	);

	return $sql;
}

sub dump_pgstatiotables_user { return dump_pgstatiotables('user'); };

sub dump_pgstatiotables
{
	my $type = shift;

	$type ||= 'all';

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), * " . 
		"FROM pg_statio_${type}_tables " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY schemaname, relname"
	);

	return $sql;
}

sub dump_pgstatioindexes_user { return dump_pgstatioindexes('user'); };

sub dump_pgstatioindexes
{
	my $type = shift;

	$type ||= 'all';

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), * " .
		"FROM pg_statio_${type}_indexes " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY schemaname, relname"
	);

	return $sql;
}

sub dump_pgstatiosequences_user { return dump_pgstatiosequences('user'); };

sub dump_pgstatiosequences
{
	my $type = shift;

	$type ||= 'all';

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), * " .
		"FROM pg_statio_${type}_sequences " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY schemaname, relname"
	);

	return $sql;
}

sub dump_pgstatuserfunctions
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), * " . 
		"FROM pg_stat_user_functions " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY schemaname, funcname"
	);

	return $sql;
}

sub dump_pgclass_size
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), n.nspname, c.relname, c.relkind, c.reltuples, c.relpages%s " .
		"FROM pg_class c, pg_namespace n " .
		"WHERE n.oid=c.relnamespace AND n.nspname <> 'information_schema' AND n.nspname <> 'pg_catalog' " .
		"ORDER BY n.nspname, c.relname",
		&backend_minimum_version(8, 1) ? ", pg_relation_size(c.oid)" : ""
	);

	return $sql;
}

sub dump_pgstatstatements
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), r.rolname, d.datname, " .
		"regexp_replace(regexp_replace(query, E'[ \\n]+', ' ', 'g'), E';', '#SMCLN#', 'g'), calls, total_time, rows, " .
		"shared_blks_hit, shared_blks_read, shared_blks_written, " .
		"local_blks_hit, local_blks_read, local_blks_written, " .
		"temp_blks_read, temp_blks_written " .
		"FROM $DB_INFO{pg_stat_statement} q, pg_database d, pg_roles r " .
		"WHERE q.userid=r.oid and q.dbid=d.oid " .
		"ORDER BY r.rolname, d.datname"
	);

	return $sql;
}

sub dump_xlog_stat
{

	my $sql = '';

	if (&backend_minimum_version(8, 4)) {
		$sql = sprintf(
			"SELECT date_trunc('seconds', now()), count(*) AS num_file, " .
			"pg_xlogfile_name(pg_current_xlog_location()) AS current, " .
			"sum(is_recycled::int) AS is_recycled, " .
			"sum((NOT is_recycled)::int) AS written, " .
			"%s " .
			"FROM ( " .
			"SELECT file > first_value(file) OVER w AS is_recycled " .
			"%s " .
			"FROM pg_ls_dir('pg_xlog') as file " .
			"WHERE file ~ '^[0-9A-F]{24}\$' " .
			"WINDOW w AS ( " .
			"ORDER BY (pg_stat_file('pg_xlog/'||file)).modification " .
			"DESC " .
			") " .
			") AS t " .
			"GROUP BY 6 ",
			&backend_minimum_version(9, 0) ? "CASE WHEN max_wal1 > max_wal2 THEN max_wal1 ELSE max_wal2 END AS max_wal" : "1 + ( current_setting('checkpoint_segments')::float4 * ( 2 + current_setting('checkpoint_completion_target')::float4 )) AS max_wal",
			&backend_minimum_version(9, 0) ? ",1 + ( current_setting('checkpoint_segments')::float4 * ( 2 + current_setting('checkpoint_completion_target')::float4 )) AS max_wal1, 1 + current_setting('wal_keep_segments')::float4 + current_setting('checkpoint_segments')::float4 AS max_wal2" : ""
		);
	} else {
		$sql = sprintf(
			"SELECT date_trunc('seconds', now()), count(*) AS num_file, " .
			"%s AS current, " .
			"sum(recycled::int) AS is_recycled, " .
			"sum((NOT recycled)::int) AS written, " .
			"%s " .
			"FROM ( " .
			"SELECT file, file > ( " .
			"SELECT s.f " .
			"FROM pg_ls_dir('pg_xlog') AS s(f) " .
			"ORDER BY (pg_stat_file('pg_xlog/'||s.f)).modification DESC " .
			"LIMIT 1 " .
			") AS recycled " .
			"FROM pg_ls_dir('pg_xlog') AS file " .
			"WHERE file ~ '^[0-9A-F]{24}\$' " .
			") AS t  ",
			&backend_minimum_version(8, 2) ? "pg_xlogfile_name(pg_current_xlog_location())" : "'-'::text",
			&backend_minimum_version(8, 3) ? "1 + ( current_setting('checkpoint_segments')::float4 * ( 2 + current_setting('checkpoint_completion_target')::float4 ))" : "1 + (current_setting('checkpoint_segments')::integer * 2)"
		);
	}

	return $sql;
}

sub dump_pgdatabase_size
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), datid, datname, pg_database_size(datid) AS size " .
		"FROM pg_stat_database " .
		"ORDER BY datname"
	);

	return $sql;
}

sub dump_pgstatconnections
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), COUNT(*) AS total, coalesce(SUM((%s)::integer), 0) AS active, " .
		"%s AS waiting, " .
		"coalesce(SUM((%s)::integer), 0) AS idle_in_xact, pg_database.datname AS datname " .
		"FROM pg_stat_activity JOIN pg_database ON (pg_database.oid=pg_stat_activity.datid) " .
		"WHERE %s <> pg_backend_pid() GROUP BY pg_database.datname" ,
		&backend_minimum_version(9, 2) ? "state NOT LIKE 'idle%'" : "current_query NOT IN ('<IDLE>','<IDLE> in transaction')",
		&backend_minimum_version(8, 2) ? "coalesce(SUM(waiting::integer), 0)" : "0::integer",
		&backend_minimum_version(9, 2) ? "state = 'idle in transaction'" : "current_query = '<IDLE> in transaction'",
		&backend_minimum_version(9, 2) ? "pid" : "procpid" ,
	);

	return $sql;
}

sub dump_pgstatlocktypes
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), 'lock_type'::text as label, " .
		"locktype, count(locktype) as count " .
		"FROM pg_locks GROUP BY locktype"
	);

	return $sql;
}

sub dump_pgstatlockmodes
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), 'lock_mode'::text as label, " .
		"mode, count(mode) as count " .
		"FROM pg_locks GROUP BY mode"
	);

	return $sql;
}

sub dump_pgstatlockgranted
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), 'lock_granted'::text as label, " .
		"granted, count(granted) as count " .
		"FROM pg_locks GROUP BY granted"
	);

	return $sql;
}





sub dump_pgbouncerpoolstats
{

	my $sql = sprintf(
		"SHOW POOLS"
	);

	return $sql;
}

sub dump_pgbouncerquerystats
{

	my $sql = sprintf(
		"SHOW STATS"
	);

	return $sql;
}



sub get_current_timestamp
{
	
	my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);

	$year += 1900;
	$mon++;

	return $year . '-' . sprintf("%02d", $mon) . '-' . sprintf("%02d", $mday) . ' ' .
		 sprintf("%02d", $hour) . ':' . sprintf("%02d", $min) . ':' . sprintf("%02d", $sec);
}

sub dump_pgstatxactuserfunctions
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), * " . 
		"FROM pg_stat_xact_user_functions " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY 3, 4"
	);

	return $sql;
}

sub dump_pgstatxacttables_user { return dump_pgstatxacttables('user'); };

sub dump_pgstatxacttables
{
	my $type = shift;

	$type ||= 'all';

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), * " . 
		"FROM pg_stat_xact_${type}_tables " .
		"WHERE schemaname <> 'information_schema' " .
		"ORDER BY 3, 4"
	);

	return $sql;
}

sub get_proc_name
{
	my $database = shift;

	my $LOCAL_PSQL_PROG = $PSQL_PROG;
	if ($database && ($LOCAL_PSQL_PROG !~ s/-d $dbnames[0]/-d $database/i)) {
		if ($database !~ /^[a-z0-9\_\-]+$/i) {
			$LOCAL_PSQL_PROG .= " -d '$database'";
		} else {
			$LOCAL_PSQL_PROG .= " -d $database";
		}
	}
	my @db_proc = `$LOCAL_PSQL_PROG -c "SELECT n.nspname||'.'||p.proname FROM pg_proc p, pg_namespace n WHERE p.pronamespace=n.oid AND n.nspname NOT IN ('pg_catalog', 'information_schema');"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp(@db_proc);

	return @db_proc;

}

####
# Retrieve user triggers count for a given database.
####
sub get_triggers
{
	my $database = shift;

	my $LOCAL_PSQL_PROG = $PSQL_PROG;
	if ($database && ($LOCAL_PSQL_PROG !~ s/-d $dbnames[0]/-d $database/i)) {
		if ($database !~ /^[a-z0-9\_\-]+$/i) {
			$LOCAL_PSQL_PROG .= " -d '$database'";
		} else {
			$LOCAL_PSQL_PROG .= " -d $database";
		}
	}
	my $db_trigger = `$LOCAL_PSQL_PROG -c "SELECT count(tgname) FROM pg_trigger WHERE NOT tgisinternal;"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp($db_trigger);

	return $db_trigger;
}

####
# Get unused indexes in a database
####
sub dump_unusedindexes
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), " .
		"schemaname, relname, indexrelname, pg_get_indexdef(pg_stat_user_indexes.indexrelid) " .
		"FROM pg_stat_user_indexes " .
		"INNER JOIN pg_index ON pg_index.indexrelid = pg_stat_user_indexes.indexrelid " .
		"WHERE NOT indexrelname ILIKE 'fki%' " .
		"AND NOT indexrelname ILIKE 'pk%' " .
		"AND indisunique = FALSE AND idx_scan = 0 " .
		"ORDER BY schemaname, relname, indexrelname"
	);

	return $sql;
}

####
# Get redundant indexes in a database
####
sub dump_redundantindexes
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), " .
		"pg_get_indexdef(indexrelid) AS contained, " . 
		"pg_get_indexdef(index_backward) AS container " . 
		"FROM " . 
		"( " . 
		"  SELECT indexrelid, " . 
		"    indrelid, " . 
		"    array_to_string(indkey,'+') AS colindex, " . 
		"    indisunique AS is_unique, " . 
		"    lag(array_to_string(indkey,'+')) OVER search_window AS colindexbackward, " . 
		"    lag(indexrelid) OVER search_window AS index_backward, " . 
		"    lag(indisunique) OVER search_window AS is_unique_backward " . 
		"  FROM pg_index " . 
		"    WINDOW search_window AS (PARTITION BY indrelid " . 
		"    ORDER BY array_to_string(indkey,'+') DESC) " . 
		") AS tmp " . 
		"WHERE (colindexbackward LIKE (colindex || '+%') OR colindexbackward = colindex) " .
		"  AND (is_unique_backward <> is_unique)" 
	);

	return $sql;
}

####
# Get PostgreSQL settings
####
sub dump_pgsettings
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), category, name, " .
		" setting, %s, context, source, %s, %s " .
		"FROM pg_settings " . 
		"ORDER BY category,name",
		&backend_minimum_version(8, 2) ? "unit" : "''::text as unit",
		&backend_minimum_version(8, 4) ? "boot_val" : "''::text as boot_val",
		&backend_minimum_version(8, 4) ? "reset_val" : "''::text as reset_val"
	);

	return $sql;
}

####
# Get PostgreSQL database and role settings
####
sub dump_pgdbrolesetting
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), b.datname, c.rolname, a.setconfig " .
		"FROM pg_db_role_setting a LEFT JOIN pg_database b ON (a.setdatabase=b.oid) " .
		"LEFT JOIN pg_roles c ON (a.setrole=c.oid) " .
		"ORDER BY 2,3"
	);

	return $sql;
}


####
# Get path the PostgreSQL configuration files
####
sub get_configuration_files
{
	my @cfiles = `$PSQL_PROG -c "SELECT setting FROM pg_settings WHERE name IN ('config_file','hba_file','ident_file','data_directory') ORDER BY name;"`;
	if ($? != 0) {
		&dprint("FATAL: psql error.\n");
		# remove the pidfile
		if (-f $PIDFILE) {
			unlink $PIDFILE or &dprint("ERROR: Unable to remove pidfile: $!\n");
		}
		exit 0;
	}
	chomp(@cfiles);

	# Add recovery.conf and postgresql.auto.conf to the file list
	my $alter_system_conf = '';
	for (my $i = 0; $i <= $#cfiles; $i++) {
		if ($cfiles[$i] !~ /\.conf$/) {
			$alter_system_conf = $cfiles[$i] . '/postgresql.auto.conf';
			$cfiles[$i] .= '/recovery.conf';
		}
	}
	push(@cfiles, $alter_system_conf);

	return @cfiles;
}

####
# Get list of DDL to create indexes missing on foreign keys
####
sub dump_missingfkindexes
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), current_database(), relname, " .
		"'CREATE INDEX idx_' || relname || '_' || " . 
		"         array_to_string(column_name_list, '_') || ' ON ' || conrelid || " . 
		"         ' (' || array_to_string(column_name_list, ',') || ')' AS ddl " . 
		"FROM (SELECT DISTINCT conrelid, " . 
		"       array_agg(attname) AS column_name_list, " . 
		"       array_agg(attnum) AS column_list " . 
		"     FROM pg_attribute " . 
		"          JOIN (SELECT conrelid::regclass, conname, " . 
		"                 unnest(conkey) AS column_index " . 
		"                FROM (SELECT DISTINCT conrelid, conname, conkey " . 
		"                      FROM pg_constraint " . 
		"                        JOIN pg_class ON pg_class.oid = pg_constraint.conrelid " . 
		"                        JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace " . 
		"                      WHERE nspname !~ '^pg_' AND nspname <> 'information_schema' AND pg_constraint.contype = 'f' " . 
		"                      ) fkey " . 
		"               ) fkey " . 
		"               ON fkey.conrelid = pg_attribute.attrelid " . 
		"                  AND fkey.column_index = pg_attribute.attnum " . 
		"     GROUP BY conrelid, conname " . 
		"     ) candidate_index " . 
		"JOIN pg_class ON pg_class.oid = candidate_index.conrelid " . 
		"LEFT JOIN pg_index ON pg_index.indrelid = conrelid " . 
		"                      AND indkey::text = array_to_string(column_list, ' ') " .
		"WHERE pg_index.indrelid IS NULL "
	);

	return $sql;
}

####
# Get per database shared buffers statistics with pg_buffercache
####
sub dump_pgdatabase_buffercache
{

	# date_trunc | datname | buffers | buffered | buffers % | database %
	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), d.datname, " .
		"count(*) AS buffers, count(*)*8192 as buffered, " .
		"round(100.0 * count(*) / (SELECT setting FROM pg_settings " .
		"  WHERE name='shared_buffers')::integer,1) AS \"buffers %\", " .
		"round(100.0 * count(*) * 8192 / pg_database_size(d.datname),1) AS \"database %\" " .
		"FROM pg_database d INNER JOIN pg_buffercache b ON b.reldatabase=d.oid " . 
		"GROUP BY d.datname " .
		"ORDER BY 2,3 DESC"
	);

	return $sql;
}

####
# Get per relation shared buffers statistics with pg_buffercache
####
sub dump_pgrelation_buffercache
{

	# date_trunc | datname | relname | buffers | relpages | buffered | buffers % | relation %
	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), d.datname, c.relname, " .
		"count(*) AS buffers, c.relpages, count(*)*8192 as buffered, " .
		"round(100.0 * count(*) / (SELECT setting FROM pg_settings " .
		"  WHERE name='shared_buffers')::integer,1) AS \"buffers %\", " .
		"round(100.0 * count(*) * 8192 / pg_relation_size(c.oid),1) AS \"relation %\" " .
		"FROM pg_class c INNER JOIN pg_buffercache b ON b.relfilenode=c.relfilenode " . 
		"INNER JOIN pg_database d ON ( b.reldatabase=d.oid ) " .
		"WHERE pg_relation_size(c.oid) > 0 " .
#		"AND c.relname !~ '^pg_' " .
		"GROUP BY d.datname, c.relname, c.relpages, c.oid " .
		"ORDER BY 2,4 DESC"
	);

	return $sql;
}

####
# Get usagecount distribution in shared buffers statistics with pg_buffercache
####
sub dump_pgdatabase_usercount
{

	# date_trunc | datname | usagecount | buffer | buffers %
	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), d.datname, usagecount, " .
		"count(*) AS buffers, round(100.0 * count(*) / (SELECT setting FROM pg_settings " .
		"  WHERE name='shared_buffers')::integer,1) AS \"buffers %\" " .
		"FROM pg_database d INNER JOIN pg_buffercache b ON b.reldatabase=d.oid " . 
		"GROUP BY d.datname, usagecount " .
		"ORDER BY 2,3 DESC"
	);
	return $sql;
}

####
# Get dirty usagecount distribution in shared buffers statistics with pg_buffercache
####
sub dump_pgdatabase_isdirty
{

	# date_trunc | datname | usagecount | isdirty | buffer | buffers %
	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), d.datname, usagecount, isdirty, " .
		"count(*) AS buffers, round(100.0 * count(*) / (SELECT setting FROM pg_settings " .
		"  WHERE name='shared_buffers')::integer,1) AS \"buffers %\" " .
		"FROM pg_database d INNER JOIN pg_buffercache b ON b.reldatabase=d.oid " . 
		"GROUP BY d.datname, usagecount, isdirty " .
		"ORDER BY 2,3,4 DESC"
	);
	return $sql;
}

####
# Get information about archive
####
sub dump_pgstatarchiver
{

	my $sql = sprintf(
		"SELECT date_trunc('seconds', now()), archived_count, last_archived_wal, " .
		"last_archived_time, failed_count, last_failed_wal, last_failed_time, stats_reset " . 
		"FROM pg_stat_archiver"
	);

	return $sql;
}

sub parse_pretty_size
{
        my $val = shift;
        return 0 if (!$val);

	$val =~ s/\s+//g;

        if ($val =~ /^(\d+)PB/i) {
                $val = $1 * 1024 * 1024 * 1024 * 1024 * 1024;
        } elsif ($val =~ /^(\d+)TB/i) {
                $val = $1 * 1024 * 1024 * 1024 * 1024;
        } elsif ($val =~ /^(\d+)GB/i) {
                $val = $1 * 1024 * 1024 * 1024;
        } elsif ($val =~ /^(\d+)MB/i) {
                $val = $1 * 1024 * 1024;
        } elsif ($val =~ /^(\d+)KB/i) {
                $val = $1 * 1024;
        }

	# du -s command returns kb so do the same here
        return int($val/1024);
}


####
# method used to fork as many child as wanted
##
sub spawn
{
	my $coderef = shift;

	unless (@_ == 0 && $coderef && ref($coderef) eq 'CODE') {
		print "usage: spawn CODEREF";
		exit 0;
	}

	if (!defined($GZIP_PID = fork)) {
		print STDERR "Error: cannot fork: $!\n";
		return;
	} elsif ($GZIP_PID) {
		return; # the parent
	}
	# the child -- go spawn
	$< = $>;
	$( = $); # suid progs only

	exit &$coderef();
}

sub wait_all_childs
{

	while ($GZIP_PID) {
		my $kid = waitpid(-1, WNOHANG);
		if ($kid > 0) {
			$GZIP_PID = 0;
		}
		sleep(1);
	}
}

sub compress_files
{
	my ($dir) = shift;

	&dprint("LOG: compressing directory $dir using $GZIP_PROG.\n");

	`$GZIP_PROG $dir/*`;

}
