From cc6d7882dba89bce0a1f3a27d9fd3b399a2430b9 Mon Sep 17 00:00:00 2001 From: einarjh Date: Sat, 10 Jun 2017 11:20:48 +0200 Subject: [PATCH 001/332] Strip all non-ASCII characters from hddtemp output (#136) --- agent-local/hddtemp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index e2b99d759..9098ec53a 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -32,7 +32,7 @@ if [ "${hddtemp}" != "" ]; then else output=`${hddtemp} -w -q ${disks} 2>/dev/null` fi - content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g'` + content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From a4efb62466c58ee05b3c078283a2a9fecb7cd3ce Mon Sep 17 00:00:00 2001 From: Stefan Funke Date: Wed, 28 Jun 2017 22:36:26 +0200 Subject: [PATCH 002/332] unnecessary use of wc while already calling grep (#137) * useless call of wc while already calling grep * move grep count call to CMD_GREP to stay in project style --- snmp/os-updates.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index c4d296767..b015abb19 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -13,6 +13,7 @@ BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' BIN_GREP='/bin/grep' +CMD_GREP='-c' CMD_WC='-l' BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' @@ -52,7 +53,7 @@ if [ -f /etc/os-release ]; then echo "0"; fi elif [ $OS == "debian" ] || [ $OS == "devuan" ] || [ $OS == "ubuntu" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` if [ $UPDATES -gt 1 ]; then echo $UPDATES; else From 3f9dc0f5f02c1590d6e84ac10c6f7c973d54f771 Mon Sep 17 00:00:00 2001 From: RedChops Date: Thu, 29 Jun 2017 16:11:26 -0400 Subject: [PATCH 003/332] Fix for bug https://github.com/librenms/librenms/issues/6821 (#138) --- snmp/postgres | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/postgres b/snmp/postgres index 5897d0618..c03b2ccb4 100644 --- a/snmp/postgres +++ b/snmp/postgres @@ -108,6 +108,7 @@ BEGIN{ toAdd=1; } END{ + OFMT = "%.0f" print backends; print commits; print rollbacks; From 584fd645d470e85e30607b8be3102292b4a7b54e Mon Sep 17 00:00:00 2001 From: drid Date: Wed, 12 Jul 2017 22:55:02 +0300 Subject: [PATCH 004/332] C.H.I.P. power values (#134) * C.H.I.P. power values * Added attribution * Fix ACIN current calculation * Battery current fix --- snmp/chip.sh | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 snmp/chip.sh diff --git a/snmp/chip.sh b/snmp/chip.sh new file mode 100644 index 000000000..07012d906 --- /dev/null +++ b/snmp/chip.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# Based on https://github.com/Photonicsguy/CHIP +# Enable ADC registers +i2cset -y -f 0 0x34 0x82 0xff + +## REGISTER 00 ## +REG=$(i2cget -y -f 0 0x34 0x00) +STATUS_ACIN=$(($(($REG&0x80))/128)) +STATUS_VBUS=$(($(($REG&0x20))/32)) +STATUS_CHG_DIR=$(($(($REG&0x04))/4)) + +REG=$(i2cget -y -f 0 0x34 0x01) +STATUS_CHARGING=$(($(($REG&0x40))/64)) +STATUS_BATCON=$(($(($REG&0x20))/32)) + +BAT_C=0 +BAT_D=0 + +if [ $STATUS_ACIN == 1 ]; then + # ACIN voltage + REG=`i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + ACIN=`echo "$REG*0.0017"|bc` + # ACIN Current + REG=`i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + ACIN_C=`echo "$REG*0.000625"|bc` +else + ACIN=0 + ACIN_C=0 +fi + +if [ $STATUS_VBUS == 1 ]; then + # VBUS voltage + REG=`i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + VBUS=`echo "$REG*0.0017"|bc` + + # VBUS Current + REG=`i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + VBUS_C=`echo "$REG*0.000375"|bc` +else + VBUS=0 + VBUS_C=0 +fi + +if [ $STATUS_BATCON == 1 ]; then + # Battery Voltage + REG=`i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + VBAT=`echo "$REG*0.0011"|bc` + + if [ $STATUS_CHG_DIR == 1 ]; then + # Battery Charging Current + REG=`i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG_C=`printf "%d" "$REG"` + BAT_C=`echo "scale=2;$REG_C*0.001"|bc` + else + # Battery Discharge Current + REG=`i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG_D=`printf "%d" "$REG"` + BAT_D=`echo "scale=2;$REG_D*0.001"|bc` + fi + # Battery % + REG=`i2cget -y -f 0 0x34 0xB9` + BAT_PERCENT=`printf "%d" "$REG"` +else + VBAT=0 + BATT_CUR=0 + BAT_PERCENT=0 +fi + +# Temperature +REG=`i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` +REG=`printf "%d" "$REG"` +THERM=`echo "($REG*0.1)-144.7"|bc` + +echo $THERM +echo $ACIN +echo $ACIN_C +echo $VBUS +echo $VBUS_C +echo $VBAT +echo $(echo "$BAT_C-$BAT_D"|bc) +echo $BAT_PERCENT +echo $STATUS_CHARGING From a50e1dffb89738814a1183e2e0560ab86daaf3f0 Mon Sep 17 00:00:00 2001 From: Neil Lathwood Date: Thu, 3 Aug 2017 17:11:26 +0100 Subject: [PATCH 005/332] Update raspberry.sh (#140) --- snmp/raspberry.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 39840949f..575a6fb16 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -31,4 +31,17 @@ sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' -sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' +sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusH264 | $pised 's/enabled/2/g' +sudo $picmd $getStatusMPG2 | $pised 's/enabled/2/g' +sudo $picmd $getStatusWVC1 | $pised 's/enabled/2/g' +sudo $picmd $getStatusMPG4 | $pised 's/enabled/2/g' +sudo $picmd $getStatusMJPG | $pised 's/enabled/2/g' +sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusH264 | $pised 's/disabled/1/g' +sudo $picmd $getStatusMPG2 | $pised 's/disabled/1/g' +sudo $picmd $getStatusWVC1 | $pised 's/disabled/1/g' +sudo $picmd $getStatusMPG4 | $pised 's/disabled/1/g' +sudo $picmd $getStatusMJPG | $pised 's/disabled/1/g' +sudo $picmd $getStatusWMV9 | $pised 's/disabled/1/g' From 3380a85ff13f0dad706690b71b2bd8e9d9452926 Mon Sep 17 00:00:00 2001 From: Zucht Date: Sat, 12 Aug 2017 17:30:02 +0200 Subject: [PATCH 006/332] Update raspberry.sh (#143) Fix state WMV9 --- snmp/raspberry.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 575a6fb16..f5c57f827 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -31,7 +31,7 @@ sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' -sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' sudo $picmd $getStatusH264 | $pised 's/enabled/2/g' sudo $picmd $getStatusMPG2 | $pised 's/enabled/2/g' sudo $picmd $getStatusWVC1 | $pised 's/enabled/2/g' From 45478555df856af51e707c3cd6ace716c709e0fb Mon Sep 17 00:00:00 2001 From: arrmo Date: Sun, 27 Aug 2017 14:59:15 -0500 Subject: [PATCH 007/332] Update Distro, for Raspbian Support (#144) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 5886e5cd0..639ac225b 100755 --- a/snmp/distro +++ b/snmp/distro @@ -47,6 +47,10 @@ elif [ "${OS}" = "Linux" ] ; then elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" + ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + if [ "${ID}" = "Raspbian" ] ; then + DIST="Raspbian `cat /etc/debian_version`" + fi elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" From 1b03d2f9f74ca29b177e596c0ff2ba13a0e1292d Mon Sep 17 00:00:00 2001 From: Uwe Arzt Date: Wed, 6 Sep 2017 20:42:58 +0200 Subject: [PATCH 008/332] Add Oracle Linux Distribution to distro script (#146) * Add Oracle Linux to distro script * Revert local change --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index 639ac225b..61ad2488c 100755 --- a/snmp/distro +++ b/snmp/distro @@ -28,6 +28,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Mandriva" PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` + elif [ -f /etc/oracle-release ]; then + DIST="Oracle" else DIST="RedHat" fi From 6a40ca1e9cc4319e6b7363541feb9681dcf5bc5f Mon Sep 17 00:00:00 2001 From: tomarch Date: Wed, 20 Sep 2017 21:47:11 +0200 Subject: [PATCH 009/332] fix munin agent (#148) Without the full munin-scripts path, this script won't find munin file and return nothing. --- agent-local/munin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/munin b/agent-local/munin index 47e513fa5..a95f3f145 100755 --- a/agent-local/munin +++ b/agent-local/munin @@ -1,6 +1,6 @@ # Lokale Einzelchecks export MUNIN_LIBDIR=/usr/share/munin -if cd munin-scripts +if cd $MUNIN_LIBDIR/munin-scripts then for skript in $(ls) do From d0762871b4cfb0a7cbfcc5ba99bc1fe0b0c51cf3 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Tue, 10 Oct 2017 08:02:05 +1300 Subject: [PATCH 010/332] os-update.sh: back to package management based and count fixes (#149) * Update os-updates.sh * Update os-updates.sh * Update os-updates.sh --- snmp/os-updates.sh | 84 +++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index b015abb19..6986c1d8c 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -10,13 +10,12 @@ #--------------------------------------------------------------# # please make sure you have the path/binaries below # ################################################################ -BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' BIN_GREP='/bin/grep' CMD_GREP='-c' CMD_WC='-l' BIN_ZYPPER='/usr/bin/zypper' -CMD_ZYPPER='lu' +CMD_ZYPPER='-q lu' BIN_YUM='/usr/bin/yum' CMD_YUM='-q check-update' BIN_DNF='/usr/bin/dnf' @@ -29,45 +28,46 @@ CMD_PACMAN='-Sup' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -if [ -f /etc/os-release ]; then - OS=`$BIN_AWK -F= '/^ID=/{print $2}' /etc/os-release` - if [ $OS == "opensuse" ]; then - UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 3 ]; then - echo $(($UPDATES-3)); - else - echo "0"; - fi - elif [ $OS == "\"centos\"" ]; then - UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 6 ]; then - echo $(($UPDATES-6)); - else - echo "0"; - fi - elif [ $OS == "fedora" ]; then - UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 6 ]; then - echo $(($UPDATES-6)); - else - echo "0"; - fi - elif [ $OS == "debian" ] || [ $OS == "devuan" ] || [ $OS == "ubuntu" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` - if [ $UPDATES -gt 1 ]; then - echo $UPDATES; - else - echo "0"; - fi - elif [ $OS == "arch" ]; then - UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then - echo $(($UPDATES-1)); - else - echo "0"; - fi - fi +if [ -f $BIN_ZYPPER ]; then + # OpenSUSE + UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 2 ]; then + echo $(($UPDATES-2)); + else + echo "0"; + fi +elif [ -f $BIN_DNF ]; then + # Fedora + UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_PACMAN ]; then + # Arch + UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_YUM ]; then + # CentOS / Redhat + UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_APT ]; then + # Debian / Devuan / Ubuntu + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` + if [ $UPDATES -gt 1 ]; then + echo $UPDATES; + else + echo "0"; + fi else - echo "0"; + echo "0"; fi - From 2996ad88b00f24777c0e5629cb931b8b448dd515 Mon Sep 17 00:00:00 2001 From: dragans Date: Fri, 27 Oct 2017 07:39:09 +0200 Subject: [PATCH 011/332] fix: Update mysql (#127) Update mysql agent script based on updated changes in newest version of Percona Monitoring Plugins (Cacti template). Changes enable correct parsing of status data for newer versions of MySQL/MariaDB database servers and should be backward compatible with older versions. --- agent-local/mysql | 492 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 342 insertions(+), 150 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 4db91f689..3b8b30427 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -7,24 +7,11 @@ ### This script requires php-cli and php-mysql packages # ============================================================================ -# This is a script to retrieve information from a MySQL server for input to a -# Cacti graphing process. It is hosted at -# http://code.google.com/p/mysql-cacti-templates/. -# -# This program is copyright (c) 2007 Baron Schwartz. Feedback and improvements -# are welcome. -# -# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED -# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF -# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation, version 2. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. +# This program is part of Percona Monitoring Plugins +# License: GPL License (see COPYING) +# Copyright 2008-2016 Baron Schwartz, 2012-2016 Percona +# Authors: +# Baron Schwartz, Roman Vynar # ============================================================================ # ============================================================================ @@ -50,9 +37,19 @@ $mysql_pass = ''; $mysql_host = 'localhost'; $mysql_port = 3306; $mysql_ssl = FALSE; # Whether to use SSL to connect to MySQL. +$mysql_ssl_key = '/etc/pki/tls/certs/mysql/client-key.pem'; +$mysql_ssl_cert = '/etc/pki/tls/certs/mysql/client-cert.pem'; +$mysql_ssl_ca = '/etc/pki/tls/certs/mysql/ca-cert.pem'; +$mysql_connection_timeout = 5; + +$heartbeat = FALSE; # Whether to use pt-heartbeat table for repl. delay calculation. +$heartbeat_utc = FALSE; # Whether pt-heartbeat is run with --utc option. +$heartbeat_server_id = 0; # Server id to associate with a heartbeat. Leave 0 if no preference. +$heartbeat_table = 'percona.heartbeat'; # db.tbl. + -$heartbeat = ''; # db.tbl in case you use mk-heartbeat from Maatkit. $cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$timezone = null; # If not set, uses the system default. Example: "UTC" $cache_time = 30; # How long to cache data. $chk_options = array ( @@ -60,6 +57,7 @@ $chk_options = array ( 'master' => true, # Do you want to check binary logging? 'slave' => true, # Do you want to check slave status? 'procs' => true, # Do you want to check SHOW PROCESSLIST? + 'get_qrt' => true, # Get query response times from Percona Server or MariaDB? ); $use_ss = FALSE; # Whether to use the script server or not @@ -78,6 +76,7 @@ echo("<<>>\n"); if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); + debug('Found configuration file ' . __FILE__ . '.cnf'); } else { echo("No ".__FILE__ . ".cnf found!\n"); exit(); @@ -115,6 +114,19 @@ function error_handler($errno, $errstr, $errfile, $errline) { # } #} +# ============================================================================ +# Set the default timezone either to the configured, system timezone, or the +# default set above in the script. +# ============================================================================ +if ( function_exists("date_default_timezone_set") + && function_exists("date_default_timezone_get") ) { + $tz = ($timezone ? $timezone : @date_default_timezone_get()); + if ( $tz ) { + @date_default_timezone_set($tz); + } +} + + # ============================================================================ # Make sure we can also be called as a script. # ============================================================================ @@ -172,7 +184,7 @@ if (!function_exists('array_change_key_case') ) { # ============================================================================ function validate_options($options) { debug($options); - $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port'); + $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port', 'server-id'); # Required command-line options foreach ( array() as $option ) { if (!isset($options[$option]) || !$options[$option] ) { @@ -190,21 +202,23 @@ function validate_options($options) { # Print out a brief usage summary # ============================================================================ function usage($message) { - global $mysql_host, $mysql_user, $mysql_pass, $mysql_port, $heartbeat; + global $mysql_host, $mysql_user, $mysql_pass, $mysql_port; $usage = << --items [OPTION] - - --host Hostname to connect to; use host:port syntax to specify a port - Use :/path/to/socket if you want to connect via a UNIX socket - --items Comma-separated list of the items whose data you want - --user MySQL username; defaults to $mysql_user if not given - --pass MySQL password; defaults to $mysql_pass if not given - --heartbeat MySQL heartbeat table; defaults to '$heartbeat' (see mk-heartbeat) - --nocache Do not cache results in a file - --port MySQL port; defaults to $mysql_port if not given - --mysql_ssl Add the MYSQL_CLIENT_SSL flag to mysql_connect() call +Usage: php ss_get_mysql_stats.php --host --items [OPTION] + + --host MySQL host + --items Comma-separated list of the items whose data you want + --user MySQL username + --pass MySQL password + --port MySQL port + --socket MySQL socket + --flags MySQL flags + --connection-timeout MySQL connection timeout + --server-id Server id to associate with a heartbeat if heartbeat usage is enabled + --nocache Do not cache results in a file + --help Show usage EOF; die($usage); @@ -256,8 +270,11 @@ function parse_cmdline( $args ) { # ============================================================================ function ss_get_mysql_stats( $options ) { # Process connection options and connect to MySQL. - global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $cache_time, - $chk_options, $mysql_host, $mysql_port, $mysql_ssl; + global $debug, $mysql_host, $mysql_user, $mysql_pass, $cache_dir, $poll_time, $chk_options, + $mysql_port, $mysql_socket, $mysql_flags, + $mysql_ssl, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, + $mysql_connection_timeout, + $heartbeat, $heartbeat_table, $heartbeat_server_id, $heartbeat_utc; # Connect to MySQL. $user = isset($options['user']) ? $options['user'] : $mysql_user; @@ -265,26 +282,15 @@ function ss_get_mysql_stats( $options ) { $port = isset($options['port']) ? $options['port'] : $mysql_port; $host = isset($options['host']) ? $options['host'] : $mysql_host; - $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; + $socket = isset($options['socket']) ? $options['socket'] : $mysql_socket; + $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; + $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; + $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); - debug(array('connecting to', $host_str, $user, $pass)); - if (!extension_loaded('mysqli') ) { - debug("The MySQL extension is not loaded"); - die("The MySQL extension is not loaded"); - } - if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { - $conn = ((($GLOBALS["___mysqli_ston"] = mysqli_init()) && (mysqli_real_connect($GLOBALS["___mysqli_ston"], $host_str, - $user, $pass, NULL, 3306, NULL, MYSQLI_CLIENT_SSL))) ? $GLOBALS["___mysqli_ston"] : FALSE); - } - else { - $conn = ($GLOBALS["___mysqli_ston"] = mysqli_connect($host_str, $user, $pass)); - } - if (!$conn ) { - die("MySQL: " . ((is_object($GLOBALS["___mysqli_ston"])) ? mysqli_error($GLOBALS["___mysqli_ston"]) : - (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false))); - } + $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); $cache_file = "$cache_dir/agent-local-mysql"; @@ -292,12 +298,12 @@ function ss_get_mysql_stats( $options ) { # First, check the cache. $fp = null; - if (!isset($options['nocache']) ) { - if ($fp = fopen($cache_file, 'a+') ) { + if ( $cache_dir && !array_key_exists('nocache', $options) ) { + if ( $fp = fopen($cache_file, 'a+') ) { $locked = flock($fp, 1); # LOCK_SH - if ($locked ) { - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( $locked ) { + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -307,12 +313,12 @@ function ss_get_mysql_stats( $options ) { else { debug("The cache file seems too small or stale"); # Escalate the lock to exclusive, so we can write to it. - if (flock($fp, 2) ) { # LOCK_EX + if ( flock($fp, 2) ) { # LOCK_EX # We might have blocked while waiting for that LOCK_EX, and # another process ran and updated it. Let's see if we can just # return the data now: - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -324,48 +330,79 @@ function ss_get_mysql_stats( $options ) { } } else { - debug("Couldn't lock the cache file, ignoring it."); $fp = null; + debug("Couldn't lock the cache file, ignoring it"); } } + else { + $fp = null; + debug("Couldn't open the cache file"); + } } else { - $fp = null; - debug("Couldn't open the cache file"); + debug("Caching is disabled."); } + # Connect to MySQL. + debug(array('Connecting to', $host, $port, $user, $pass)); + if ( !extension_loaded('mysqli') ) { + debug("PHP MySQLi extension is not loaded"); + die("PHP MySQLi extension is not loaded"); + } + if ( $mysql_ssl ) { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + else { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + if ( mysqli_connect_errno() ) { + debug("MySQL connection failed: " . mysqli_connect_error()); + die("ERROR: " . mysqli_connect_error()); + } + + # MySQL server version. + # The form of this version number is main_version * 10000 + minor_version * 100 + sub_version + # i.e. version 5.5.44 is 50544. + $mysql_version = mysqli_get_server_version($conn); + debug("MySQL server version is " . $mysql_version); + # Set up variables. $status = array( # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc # Define some indexes so they don't cause errors with += operations. 'relay_log_space' => null, 'binary_log_space' => null, - 'current_transactions' => null, - 'locked_transactions' => null, - 'active_transactions' => null, - 'innodb_locked_tables' => null, - 'innodb_tables_in_use' => null, - 'innodb_lock_structs' => null, - 'innodb_lock_wait_secs' => null, - 'innodb_sem_waits' => null, - 'innodb_sem_wait_time_ms'=> null, + 'current_transactions' => 0, + 'locked_transactions' => 0, + 'active_transactions' => 0, + 'innodb_locked_tables' => 0, + 'innodb_tables_in_use' => 0, + 'innodb_lock_structs' => 0, + 'innodb_lock_wait_secs' => 0, + 'innodb_sem_waits' => 0, + 'innodb_sem_wait_time_ms'=> 0, # Values for the 'state' column from SHOW PROCESSLIST (converted to # lowercase, with spaces replaced by underscores) - 'State_closing_tables' => null, - 'State_copying_to_tmp_table' => null, - 'State_end' => null, - 'State_freeing_items' => null, - 'State_init' => null, - 'State_locked' => null, - 'State_login' => null, - 'State_preparing' => null, - 'State_reading_from_net' => null, - 'State_sending_data' => null, - 'State_sorting_result' => null, - 'State_statistics' => null, - 'State_updating' => null, - 'State_writing_to_net' => null, - 'State_none' => null, - 'State_other' => null, # Everything not listed above + 'State_closing_tables' => 0, + 'State_copying_to_tmp_table' => 0, + 'State_end' => 0, + 'State_freeing_items' => 0, + 'State_init' => 0, + 'State_locked' => 0, + 'State_login' => 0, + 'State_preparing' => 0, + 'State_reading_from_net' => 0, + 'State_sending_data' => 0, + 'State_sorting_result' => 0, + 'State_statistics' => 0, + 'State_updating' => 0, + 'State_writing_to_net' => 0, + 'State_none' => 0, + 'State_other' => 0, # Everything not listed above ); # Get SHOW STATUS and convert the name-value array into a simple @@ -382,8 +419,15 @@ function ss_get_mysql_stats( $options ) { } # Get SHOW SLAVE STATUS, and add it to the $status array. - if ($chk_options['slave'] ) { - $result = run_query("SHOW SLAVE STATUS", $conn); + if ( $chk_options['slave'] ) { + # Leverage lock-free SHOW SLAVE STATUS if available + $result = run_query("SHOW SLAVE STATUS NONBLOCKING", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS NOLOCK", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS", $conn); + } + } $slave_status_rows_gotten = 0; foreach ( $result as $row ) { $slave_status_rows_gotten++; @@ -394,23 +438,30 @@ function ss_get_mysql_stats( $options ) { $status['slave_lag'] = $row['seconds_behind_master']; # Check replication heartbeat, if present. - if ($heartbeat ) { + if ( $heartbeat ) { + if ( $heartbeat_utc ) { + $now_func = 'UNIX_TIMESTAMP(UTC_TIMESTAMP)'; + } + else { + $now_func = 'UNIX_TIMESTAMP()'; + } $result2 = run_query( - "SELECT GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)" - . " AS delay FROM $heartbeat WHERE id = 1", $conn); + "SELECT MAX($now_func - ROUND(UNIX_TIMESTAMP(ts)))" + . " AS delay FROM $heartbeat_table" + . " WHERE $heartbeat_server_id = 0 OR server_id = $heartbeat_server_id", $conn); $slave_delay_rows_gotten = 0; foreach ( $result2 as $row2 ) { $slave_delay_rows_gotten++; - if ($row2 && is_array($row2) + if ( $row2 && is_array($row2) && array_key_exists('delay', $row2) ) { $status['slave_lag'] = $row2['delay']; } else { - debug("Couldn't get slave lag from $heartbeat"); + debug("Couldn't get slave lag from $heartbeat_table"); } } - if ($slave_delay_rows_gotten == 0 ) { + if ( $slave_delay_rows_gotten == 0 ) { debug("Got nothing from heartbeat query"); } } @@ -421,11 +472,11 @@ function ss_get_mysql_stats( $options ) { $status['slave_stopped'] = ($row['slave_sql_running'] == 'Yes') ? 0 : $status['slave_lag']; } - if ($slave_status_rows_gotten == 0 ) { + if ( $slave_status_rows_gotten == 0 ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) @@ -449,18 +500,22 @@ function ss_get_mysql_stats( $options ) { # Get SHOW PROCESSLIST and aggregate it by state, then add it to the array # too. - if ($chk_options['procs'] ) { + if ( $chk_options['procs'] ) { $result = run_query('SHOW PROCESSLIST', $conn); foreach ( $result as $row ) { $state = $row['State']; - if (is_null($state) ) { + if ( is_null($state) ) { $state = 'NULL'; } - if ($state == '' ) { + if ( $state == '' ) { $state = 'none'; } + # MySQL 5.5 replaces the 'Locked' state with a variety of "Waiting for + # X lock" types of statuses. Wrap these all back into "Locked" because + # we don't really care about the type of locking it is. + $state = preg_replace('/^(Table lock|Waiting for .*lock)$/', 'Locked', $state); $state = str_replace(' ', '_', strtolower($state)); - if (array_key_exists("State_$state", $status) ) { + if ( array_key_exists("State_$state", $status) ) { increment($status, "State_$state", 1); } else { @@ -469,15 +524,63 @@ function ss_get_mysql_stats( $options ) { } } + # Get SHOW ENGINES to be able to determine whether InnoDB is present. + $engines = array(); + $result = run_query("SHOW ENGINES", $conn); + foreach ( $result as $row ) { + $engines[$row[0]] = $row[1]; + } + # Get SHOW INNODB STATUS and extract the desired metrics from it, then add # those to the array too. if ($chk_options['innodb'] - && array_key_exists('have_innodb', $status) - && $status['have_innodb'] == 'YES' + && array_key_exists('InnoDB', $engines) + && $engines['InnoDB'] == 'YES' + || $engines['InnoDB'] == 'DEFAULT' ) { $result = run_query("SHOW /*!50000 ENGINE*/ INNODB STATUS", $conn); $istatus_text = $result[0]['Status']; - $istatus_vals = get_innodb_array($istatus_text); + $istatus_vals = get_innodb_array($istatus_text, $mysql_version); + + # Get response time histogram from Percona Server or MariaDB if enabled. + if ( $chk_options['get_qrt'] + && (( isset($status['have_response_time_distribution']) + && $status['have_response_time_distribution'] == 'YES') + || (isset($status['query_response_time_stats']) + && $status['query_response_time_stats'] == 'ON')) ) + { + debug('Getting query time histogram'); + $i = 0; + $result = run_query( + "SELECT `count`, ROUND(total * 1000000) AS total " + . "FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME " + . "WHERE `time` <> 'TOO LONG'", + $conn); + foreach ( $result as $row ) { + if ( $i > 13 ) { + # It's possible that the number of rows returned isn't 14. + # Don't add extra status counters. + break; + } + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = $row['count']; + $status[$total_key] = $row['total']; + $i++; + } + # It's also possible that the number of rows returned is too few. + # Don't leave any status counters unassigned; it will break graphs. + while ( $i <= 13 ) { + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = 0; + $status[$total_key] = 0; + $i++; + } + } + else { + debug('Not getting time histogram because it is not enabled'); + } # Override values from InnoDB parsing with values from SHOW STATUS, # because InnoDB status might not have everything and the SHOW STATUS is @@ -498,6 +601,8 @@ function ss_get_mysql_stats( $options ) { 'Innodb_rows_inserted' => 'rows_inserted', 'Innodb_rows_read' => 'rows_read', 'Innodb_rows_updated' => 'rows_updated', + 'Innodb_buffer_pool_reads' => 'pool_reads', + 'Innodb_buffer_pool_read_requests' => 'pool_read_requests', ); # If the SHOW STATUS value exists, override... @@ -540,9 +645,9 @@ function ss_get_mysql_stats( $options ) { } # Define the variables to output. I use shortened variable names so maybe - # it'll all fit in 1024 bytes for Cactid and Spine's benefit. This list must - # come right after the word MAGIC_VARS_DEFINITIONS. The Perl script parses - # it and uses it as a Perl variable. + # it'll all fit in 1024 bytes for Cactid and Spine's benefit. + # This list must come right after the word MAGIC_VARS_DEFINITIONS. The Perl script + # parses it and uses it as a Perl variable. $keys = array( 'Key_read_requests' => 'a0', 'Key_reads' => 'a1', @@ -654,7 +759,6 @@ function ss_get_mysql_stats( $options ) { 'binary_log_space' => 'cz', 'innodb_locked_tables' => 'd0', 'innodb_lock_structs' => 'd1', - 'State_closing_tables' => 'd2', 'State_copying_to_tmp_table' => 'd3', 'State_end' => 'd4', @@ -671,7 +775,6 @@ function ss_get_mysql_stats( $options ) { 'State_writing_to_net' => 'df', 'State_none' => 'dg', 'State_other' => 'dh', - 'Handler_commit' => 'di', 'Handler_delete' => 'dj', 'Handler_discover' => 'dk', @@ -713,6 +816,53 @@ function ss_get_mysql_stats( $options ) { 'key_buffer_size' => 'ei', 'Innodb_row_lock_time' => 'ej', 'Innodb_row_lock_waits' => 'ek', + + # Values not parsed by LibreNMS + 'Query_time_count_00' => 'ol', + 'Query_time_count_01' => 'om', + 'Query_time_count_02' => 'on', + 'Query_time_count_03' => 'oo', + 'Query_time_count_04' => 'op', + 'Query_time_count_05' => 'oq', + 'Query_time_count_06' => 'or', + 'Query_time_count_07' => 'os', + 'Query_time_count_08' => 'ot', + 'Query_time_count_09' => 'ou', + 'Query_time_count_10' => 'ov', + 'Query_time_count_11' => 'ow', + 'Query_time_count_12' => 'ox', + 'Query_time_count_13' => 'oy', + 'Query_time_total_00' => 'oz', + 'Query_time_total_01' => 'pg', + 'Query_time_total_02' => 'ph', + 'Query_time_total_03' => 'pi', + 'Query_time_total_04' => 'pj', + 'Query_time_total_05' => 'pk', + 'Query_time_total_06' => 'pl', + 'Query_time_total_07' => 'pm', + 'Query_time_total_08' => 'pn', + 'Query_time_total_09' => 'po', + 'Query_time_total_10' => 'pp', + 'Query_time_total_11' => 'pq', + 'Query_time_total_12' => 'pr', + 'Query_time_total_13' => 'ps', + 'wsrep_replicated_bytes' => 'pt', + 'wsrep_received_bytes' => 'pu', + 'wsrep_replicated' => 'pv', + 'wsrep_received' => 'pw', + 'wsrep_local_cert_failures' => 'px', + 'wsrep_local_bf_aborts' => 'py', + 'wsrep_local_send_queue' => 'pz', + 'wsrep_local_recv_queue' => 'qg', + 'wsrep_cluster_size' => 'qh', + 'wsrep_cert_deps_distance' => 'qi', + 'wsrep_apply_window' => 'qj', + 'wsrep_commit_window' => 'qk', + 'wsrep_flow_control_paused' => 'ql', + 'wsrep_flow_control_sent' => 'qm', + 'wsrep_flow_control_recv' => 'qn', + 'pool_reads' => 'qo', + 'pool_read_requests' => 'qp', ); # Return the output. @@ -741,7 +891,7 @@ function ss_get_mysql_stats( $options ) { # MySQL 5.0, and XtraDB or enhanced InnoDB from Percona if applicable. Note # that extra leading spaces are ignored due to trim(). # ============================================================================ -function get_innodb_array($text) { +function get_innodb_array($text, $mysql_version) { $results = array( 'spin_waits' => array(), 'spin_rounds' => array(), @@ -815,13 +965,26 @@ function get_innodb_array($text) { $results['spin_rounds'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[8]); } - elseif (strpos($line, 'RW-shared spins') === 0 ) { + elseif (strpos($line, 'RW-shared spins') === 0 + && strpos($line, ';') > 0 ) { # RW-shared spins 3859028, OS waits 2100750; RW-excl spins 4641946, OS waits 1530310 $results['spin_waits'][] = to_int($row[2]); $results['spin_waits'][] = to_int($row[8]); $results['os_waits'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[11]); } + elseif (strpos($line, 'RW-shared spins') === 0 && strpos($line, '; RW-excl spins') === FALSE) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-shared spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } + elseif (strpos($line, 'RW-excl spins') === 0) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-excl spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } elseif (strpos($line, 'seconds the semaphore:') > 0) { # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: increment($results, 'innodb_sem_waits', 1); @@ -830,18 +993,35 @@ function get_innodb_array($text) { } # TRANSACTIONS - elseif (strpos($line, 'Trx id counter') === 0 ) { + elseif ( strpos($line, 'Trx id counter') === 0 ) { # The beginning of the TRANSACTIONS section: start counting # transactions - # Trx id counter 0 1170664159 - # Trx id counter 861B144C - $results['innodb_transactions'] = make_bigint($row[3], $row[4]); + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Trx id counter 0 1170664159 + # Trx id counter 861B144C + $results['innodb_transactions'] = isset($row[4]) ? make_bigint( + $row[3], $row[4]) : base_convert($row[3], 16, 10); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Trx id counter 2903813 + $results['innodb_transactions'] = $row[3]; + } $txn_seen = TRUE; } - elseif (strpos($line, 'Purge done for trx') === 0 ) { - # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 - # Purge done for trx's n:o < 861B135D undo n:o < 0 - $purged_to = make_bigint($row[6], $row[7] == 'undo' ? null : $row[7]); + elseif ( strpos($line, 'Purge done for trx') === 0 ) { + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 + # Purge done for trx's n:o < 861B135D undo n:o < 0 + $purged_to = $row[7] == 'undo' ? base_convert($row[6], 16, 10) : make_bigint($row[6], $row[7]); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Purge done for trx's n:o < 2903354 undo n:o < 0 state: running but idle + $purged_to = $row[6]; + } $results['unpurged_txns'] = big_sub($results['innodb_transactions'], $purged_to); } @@ -849,31 +1029,31 @@ function get_innodb_array($text) { # History list length 132 $results['history_list'] = to_int($row[3]); } - elseif ($txn_seen && strpos($line, '---TRANSACTION') === 0 ) { + elseif ( $txn_seen && strpos($line, '---TRANSACTION') === 0 ) { # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 increment($results, 'current_transactions', 1); - if (strpos($line, 'ACTIVE') > 0 ) { + if ( strpos($line, 'ACTIVE') > 0 ) { increment($results, 'active_transactions', 1); } } - elseif ($txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { + elseif ( $txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { # ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED: increment($results, 'innodb_lock_wait_secs', to_int($row[5])); } - elseif (strpos($line, 'read views open inside InnoDB') > 0 ) { + elseif ( strpos($line, 'read views open inside InnoDB') > 0 ) { # 1 read views open inside InnoDB $results['read_views'] = to_int($row[0]); } - elseif (strpos($line, 'mysql tables in use') === 0 ) { + elseif ( strpos($line, 'mysql tables in use') === 0 ) { # mysql tables in use 2, locked 2 increment($results, 'innodb_tables_in_use', to_int($row[4])); increment($results, 'innodb_locked_tables', to_int($row[6])); } - elseif ($txn_seen && strpos($line, 'lock struct(s)') > 0 ) { + elseif ( $txn_seen && strpos($line, 'lock struct(s)') > 0 ) { # 23 lock struct(s), heap size 3024, undo log entries 27 # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 # LOCK WAIT 2 lock struct(s), heap size 368 - if (strpos($line, 'LOCK WAIT') === 0 ) { + if ( strpos($line, 'LOCK WAIT') === 0 ) { increment($results, 'innodb_lock_structs', to_int($row[2])); increment($results, 'locked_transactions', 1); } @@ -900,7 +1080,7 @@ function get_innodb_array($text) { $results['pending_aio_log_ios'] = to_int($row[6]); $results['pending_aio_sync_ios'] = to_int($row[9]); } - elseif (strpos($line, 'Pending flushes (fsync)') === 0 ) { + elseif ( strpos($line, 'Pending flushes (fsync)') === 0 ) { # Pending flushes (fsync) log: 0; buffer pool: 0 $results['pending_log_flushes'] = to_int($row[4]); $results['pending_buf_pool_flushes'] = to_int($row[7]); @@ -921,6 +1101,16 @@ function get_innodb_array($text) { $results['ibuf_used_cells'] = to_int($row[2]); $results['ibuf_free_cells'] = to_int($row[6]); $results['ibuf_cell_count'] = to_int($row[9]); + if (strpos($line, 'merges')) { + $results['ibuf_merges'] = to_int($row[10]); + } + } + elseif (strpos($line, ', delete mark ') > 0 && strpos($prev_line, 'merged operations:') === 0 ) { + # Output of show engine innodb status has changed in 5.5 + # merged operations: + # insert 593983, delete mark 387006, delete 73092 + $results['ibuf_inserts'] = to_int($row[1]); + $results['ibuf_merged'] = to_int($row[1]) + to_int($row[4]) + to_int($row[6]); } elseif (strpos($line, ' merged recs, ') > 0 ) { # 19817685 inserts, 19817684 merged recs, 3552620 merges @@ -976,40 +1166,41 @@ function get_innodb_array($text) { } # BUFFER POOL AND MEMORY - elseif (strpos($line, "Total memory allocated") === 0 ) { + elseif (strpos($line, "Total memory allocated") === 0 && strpos($line, "in additional pool allocated") > 0 ) { # Total memory allocated 29642194944; in additional pool allocated 0 + # Total memory allocated by read views 96 $results['total_mem_alloc'] = to_int($row[3]); $results['additional_pool_alloc'] = to_int($row[8]); } - elseif (strpos($line, 'Adaptive hash index ') === 0 ) { + elseif(strpos($line, 'Adaptive hash index ') === 0 ) { # Adaptive hash index 1538240664 (186998824 + 1351241840) $results['adaptive_hash_memory'] = to_int($row[3]); } - elseif (strpos($line, 'Page hash ') === 0 ) { + elseif(strpos($line, 'Page hash ') === 0 ) { # Page hash 11688584 $results['page_hash_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Dictionary cache ') === 0 ) { + elseif(strpos($line, 'Dictionary cache ') === 0 ) { # Dictionary cache 145525560 (140250984 + 5274576) $results['dictionary_cache_memory'] = to_int($row[2]); } - elseif (strpos($line, 'File system ') === 0 ) { + elseif(strpos($line, 'File system ') === 0 ) { # File system 313848 (82672 + 231176) $results['file_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Lock system ') === 0 ) { + elseif(strpos($line, 'Lock system ') === 0 ) { # Lock system 29232616 (29219368 + 13248) $results['lock_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Recovery system ') === 0 ) { + elseif(strpos($line, 'Recovery system ') === 0 ) { # Recovery system 0 (0 + 0) $results['recovery_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Threads ') === 0 ) { + elseif(strpos($line, 'Threads ') === 0 ) { # Threads 409336 (406936 + 2400) $results['thread_hash_memory'] = to_int($row[1]); } - elseif (strpos($line, 'innodb_io_pattern ') === 0 ) { + elseif(strpos($line, 'innodb_io_pattern ') === 0 ) { # innodb_io_pattern 0 (0 + 0) $results['innodb_io_pattern_memory'] = to_int($row[1]); } @@ -1057,6 +1248,7 @@ function get_innodb_array($text) { $results['queries_inside'] = to_int($row[0]); $results['queries_queued'] = to_int($row[4]); } + $prev_line = $line; } foreach ( array('spin_waits', 'spin_rounds', 'os_waits') as $key ) { @@ -1067,16 +1259,9 @@ function get_innodb_array($text) { $results['uncheckpointed_bytes'] = big_sub($results['log_bytes_written'], $results['last_checkpoint']); - -# foreach ($results as $key => $value) { -# echo(strtolower($key).":".strtolower($value)."\n"); -# } - - return $results; } - # ============================================================================ # Returns a bigint from two ulint or a single hex number. This is tested in # t/mysql_stats.php and copied, without tests, to ss_get_by_ssh.php. @@ -1121,27 +1306,34 @@ function to_int ( $str ) { # ============================================================================ # Wrap mysql_query in error-handling, and instead of returning the result, # return an array of arrays in the result. +# ============================================================================ + # ============================================================================ function run_query($sql, $conn) { global $debug; debug($sql); - $result = @mysqli_query( $conn, $sql); - if ($debug ) { - $error = @((is_object($conn)) ? mysqli_error($conn) : (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false)); - if ($error ) { + $result = @mysqli_query($conn, $sql); + if ( $debug && strpos($sql, 'SHOW SLAVE STATUS ') === false ) { + $error = @mysqli_error($conn); + if ( $error ) { debug(array($sql, $error)); die("SQLERR $error in $sql"); } } $array = array(); - while ( $row = @mysqli_fetch_array($result) ) { - $array[] = $row; + $count = @mysqli_num_rows($result); + if ( $count > 10000 ) { + debug('Abnormal number of rows returned: ' . $count); + } + else { + while ( $row = @mysqli_fetch_array($result) ) { + $array[] = $row; + } } debug(array($sql, $array)); return $array; } -# ============================================================================ # Safely increments a value that might be null. # ============================================================================ function increment(&$arr, $key, $howmuch) { From 7fb48df8579a8e113153c1439a4fa92829847d9f Mon Sep 17 00:00:00 2001 From: Daniel Bull Date: Fri, 27 Oct 2017 06:41:05 +0100 Subject: [PATCH 012/332] Fix: Apache SNMP extend IndexError (#116) See issue for more information: https://github.com/librenms/librenms-agent/issues/95 --- snmp/apache-stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 9fb62e644..378d858e8 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -60,7 +60,7 @@ elif fields[0] == 'Total kBytes': # turn into base (byte) value params[fields[0]] = int(fields[1])*1024 - else: + elif len(fields) > 1: # just store everything else params[fields[0]] = fields[1] From 3b9d632a8d6dbd6ac3f42f75ba36faa235ef4440 Mon Sep 17 00:00:00 2001 From: arrmo Date: Mon, 4 Dec 2017 14:11:17 -0600 Subject: [PATCH 013/332] hddtemp, ignore devices not supporting SMART (#153) --- agent-local/hddtemp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 9098ec53a..9f776829c 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -32,7 +32,7 @@ if [ "${hddtemp}" != "" ]; then else output=`${hddtemp} -w -q ${disks} 2>/dev/null` fi - content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` + content=`echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From 8920cd3f290e8c13a3bb7db96ceb8db05845869d Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Wed, 13 Dec 2017 16:13:10 +1300 Subject: [PATCH 014/332] freeradius.sh: new agent for incoming main PR (#151) * Update os-updates.sh * Update os-updates.sh * Update os-updates.sh * Create freeradius.sh * Update freeradius.sh * Update freeradius.sh --- snmp/freeradius.sh | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 snmp/freeradius.sh diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh new file mode 100644 index 000000000..088acf3c1 --- /dev/null +++ b/snmp/freeradius.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Set 0 for SNMP extend; set to 1 for Check_MK agent +AGENT=0 + +# Set FreeRADIUS status_server details +RADIUS_SERVER='localhost' +RADIUS_PORT='18121' +RADIUS_KEY='adminsecret' + +# Default radclient access request, shouldn't need to be changed +RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' + +# Pathes for grep and radclient executables, should work if within PATH +BIN_GREP="$(command -v grep)" +BIN_RADCLIENT="$(command -v radclient)" + +if [ $AGENT == 1 ]; then + echo "<<>>" +fi + +RESULT=`echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY` + +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' From c7cae0765e0f5072fdf3dd224f357290e2697fb5 Mon Sep 17 00:00:00 2001 From: VVelox Date: Sat, 30 Dec 2017 05:39:36 -0600 Subject: [PATCH 015/332] update the fail2ban stuff (#155) Dropping firewall checking as the new fail2ban uses pf and anchors on FreeBSD, which while esoteric as fuck works nicely and is reliable. --- snmp/fail2ban | 250 +++++++++++++++++++++++++++++--------------------- 1 file changed, 146 insertions(+), 104 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 95cf9e31c..117d2c162 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -1,68 +1,103 @@ #!/usr/bin/env perl +# Author: Zane C. Bowers-Hadley -# Add this to your snmpd.conf as below. -# extend fail2ban /etc/snmp/fail2ban -# -# Then add to your cron tab, if you wish to use caching. -# */3 * * * * /etc/snmp/fail2ban.pl -u +# https://docs.librenms.org/#Extensions/Applications/#fail2ban +# See the above for additional information not documented in the POD below. -#make sure this path is correct -my $f2bc="/usr/bin/env fail2ban-client"; +=head1 DESCRIPTION -#make sure this path is correct -my $iptablesPath="/usr/bin/env iptables"; +A basic SNMP extend for polling fail2ban for LibreNMS. -# The cache file to use, if using caching. -my $cache='/var/cache/fail2ban'; +=head1 SWITCHES + +=head2 -c + +Prints the cache file. + +=head2 -C + +Uses the specified file as the cache file. + +If not specified, /var/cache/fail2ban is used. + +=head2 -f + +This is the path to the fail2ban-client if needed. + +If not specified, "/usr/bin/env fail2ban-client" is used. + +=head2 -u + +Updates the cache. + +=head2 -U + +When used with -c, allows attempted cache updating if the file is older +than 360 seconds or does not exist. + +=head1 CRON EXAMPLE + + */3 * * * * /etc/snmp/fail2ban -u + +or + + */3 * * * * /etc/snmp/fail2ban -u -C /foo/bar/cache + +3 minutes is used as LibreNMS runs every 5 minutes, this helps ensure it +is most likely up to date in between runs. + + +=head1 SNMPD SETUP EXAMPLES + + extend fail2ban /etc/snmp/fail2ban -# Please verify that the tables below are correct for your installation -my @linuxChains=('failban','f2b'); -my $freebsdPFtable='fail2ban'; +The above will set it up for basic uncached usage. + +This is likely fine for most configurations. + + extend fail2ban /etc/snmp/fail2ban -c + +Will use the cache. + + extend fail2ban /etc/snmp/fail2ban -c -U + +Will use the cache and update if needed. + + extend fail2ban /etc/snmp/fail2ban -f /foo/bin/fail2ban-client + +Run it with fail2ban being installed under /foo the the path to +fail2ban-cleint being /foo/bin/fail2ban-client. + +=cut -## -## you should not have to touch anything below this -## use strict; use warnings; use Getopt::Std; +#fail2ban-client path +my $f2bc="/usr/bin/env fail2ban-client"; + +#the path to the cache +my $cache='/var/cache/fail2ban'; + $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "fail2ban-client SNMP extend 0.0.0\n"; + print "fail2ban-client SNMP extend 1.0.0\n"; }; - sub main::HELP_MESSAGE { print "\n". - "-u Update '".$cache."'\n"; + "-c Print from the cache.\n". + "-C Use this as the cache file.\n". + "-f The fail2ban-client path if needed.". + "-u Update the cache, '".$cache."'\n". + "-U When used with -c, allow update of the cache file if it does not exist or is older than 360 seconds.". + "\n". + "Unless -c or -u is given, it just talks to fail2ban-client and prints the results.\n"; } -#gets the options -my %opts=(); -getopts('u', \%opts); - -#if set to 1, no cache will be written and it will be printed instead -my $noWrite=0; - -if ( ! defined( $opts{u} ) ){ - my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, - $atime,$mtime,$ctime,$blksize,$blocks) = stat($cache); - - if (( -f $cache ) && defined( $mtime ) && ( (time-$mtime) < 360 )){ - my $old=''; - open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; - # if this is over 2048, something is most likely wrong - read($readfh , $old , 10240); - close($readfh); - print $old; - }else{ - $opts{u}=1; - $noWrite=1; - } -} - -if (defined( $opts{u} )){ - +#generats stats +sub stats{ #gets a list of jails my $jailsOutput=`$f2bc status`; my @jailsOutputA=split(/\n/, $jailsOutput); @@ -91,69 +126,76 @@ if (defined( $opts{u} )){ $int++; } - ## - ## process the firewall - ## - - my $os=`uname`; + return $total."\n".$toReturn; +} - my $firewalled=0; - - if ( $os =~ '^FreeBSD' ){ - $firewalled=`/sbin/pfctl -t $freebsdPFtable -T show | /usr/bin/grep -c .`; - chomp($firewalled); - }; +#updates $cache +sub cacheUpdate{ + my $stats=stats; + + open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; + print $writefh $stats; + close($writefh); +} + +#prints $cache +sub cachePrint{ + my $old=''; + open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; + # if this is over 2048, something is most likely wrong + read($readfh , $old , 10240); + close($readfh); + print $old; +} + +#gets the options +my %opts=(); +getopts('uUcC:f:', \%opts); + +#use custom cache file if needed +if ( defined( $opts{C} ) ){ + $cache=$opts{C}; +} + +#use custom fail2ban location if needed +if ( defined( $opts{f} ) ){ + $f2bc=$opts{f}; +} + +#use the cache +if ( defined( $opts{c} ) ){ + my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, + $atime,$mtime,$ctime,$blksize,$blocks) = stat($cache); - if ( $os =~ '^Linux' ){ - my $iptables=`$iptablesPath -L -n`; - my @iptablesA=split( /\n/, $iptables ); - - #check each line - my $int=0; - my $count=0; - while( defined( $iptablesA[$int] ) ){ - my $line=$iptablesA[$int]; - - #stop counting if we have a blank line - if ( $line =~ /^$/ ){ - $count=0; - } - - #count /^REJECT/ lines, if we are counting - if ( ( $line =~ /^REJECT/ ) && ( $count ) ){ - $firewalled++; - } - - #check if this is a chain we should count - if ( $line =~ /^Chain/ ){ - my $linuxChainsInt=0; - # check if any of the specified names hit and if so start counting - while( defined( $linuxChains[$linuxChainsInt] ) ){ - my $chain=$linuxChains[$linuxChainsInt]; - if ( $line =~ /^Chain $chain/ ){ - $count=1; - } - - $linuxChainsInt++; - } - } - - $int++; + if (( -f $cache ) && defined( $mtime ) && ( (time-$mtime) < 360 )){ + #cache exists and time is fine + cachePrint; + exit 0; + }else{ + #cache does not exist or is old + if ( $opts{U} ){ + #allowed to update it via -U + cacheUpdate; + cachePrint; + exit 0; + }else{ + #-U not given + warn("'".$cache."' does not exist or is to old and -U was not given"); + exit 1; } - } + warn('we should never get here...'); + exit 2; +} - ## - ## render the output - ## - if ( ! $noWrite ){ - open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; - print $writefh $total."\n".$firewalled."\n".$toReturn; - close($writefh); - }else{ - print $total."\n".$firewalled."\n".$toReturn; - } - +#update the cache +if (defined( $opts{u} )){ + cacheUpdate; exit 0; } + +#no cache opions given, just print it +print &stats; + +exit 0; From bacaca0be4104cc003222b941e433d5470cae76d Mon Sep 17 00:00:00 2001 From: VVelox Date: Sat, 30 Dec 2017 05:42:37 -0600 Subject: [PATCH 016/332] ZFS SNMP agent :3 <3 (#156) * Add it as it currently is. Needs to be moved over to JSON * rename it to zfs-freebsd as it is FreeBSD specific now uses JSON * misc. updates and document it all * minor spelling correction --- snmp/zfs-freebsd | 266 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100755 snmp/zfs-freebsd diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd new file mode 100755 index 000000000..db6f33b2d --- /dev/null +++ b/snmp/zfs-freebsd @@ -0,0 +1,266 @@ +#!/usr/bin/env perl + +=head1 DESCRIPTION + +This is a SNMP extend for ZFS and FreeBSD for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + + extend zfs /etc/snmp/zfs-freebsd + +=cut + +#Copyright (c) 2017, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska +# for zfs-stats and figuring out the math for all the stats + +use strict; +use warnings; +use JSON; +use Getopt::Std; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "FreeBSD ZFS stats extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + +} + +#this will be dumped to json at the end +my %tojson; + +#gets the options +my %opts=(); +getopts('p', \%opts); + +my $sysctls; +my @to_pull=( + 'kstat.zfs', + 'vfs.zfs', + ); +my @sysctls_pull = `/sbin/sysctl -q @to_pull`; +foreach my $stat (@sysctls_pull) { + chomp( $stat ); + my ( $var, $val ) = split(/:/, $stat, 2); + + $val =~ s/^ //; + $sysctls->{$var}=$val; +} + +# does not seem to exist for me, but some of these don't seem to be created till needed +if ( ! defined( $sysctls->{"kstat.zfs.misc.arcstats.recycle_miss"} ) ) { + $sysctls->{"kstat.zfs.misc.arcstats.recycle_miss"}=0; +} + +## +## ARC misc +## +$tojson{deleted}=$sysctls->{"kstat.zfs.misc.arcstats.deleted"}; +$tojson{evict_skip}=$sysctls->{"kstat.zfs.misc.arcstats.evict_skip"}; +$tojson{mutex_skip}=$sysctls->{'kstat.zfs.misc.arcstats.mutex_miss'}; +$tojson{recycle_miss}=$sysctls->{"kstat.zfs.misc.arcstats.recycle_miss"}; + +## +## ARC size +## +my $target_size_percent = $sysctls->{"kstat.zfs.misc.arcstats.c"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"} * 100; +my $arc_size_percent = $sysctls->{"kstat.zfs.misc.arcstats.size"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"} * 100; +my $target_size_adaptive_ratio = $sysctls->{"kstat.zfs.misc.arcstats.c"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"}; +my $min_size_percent = $sysctls->{"kstat.zfs.misc.arcstats.c_min"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"} * 100; + +$tojson{arc_size}=$sysctls->{"kstat.zfs.misc.arcstats.size"}; +$tojson{target_size_max}=$sysctls->{"kstat.zfs.misc.arcstats.c_max"}; +$tojson{target_size_min}=$sysctls->{"kstat.zfs.misc.arcstats.c_min"}; +$tojson{target_size}=$sysctls->{"kstat.zfs.misc.arcstats.c"}; +$tojson{target_size_per}=$target_size_percent; +$tojson{arc_size_per}=$arc_size_percent; +$tojson{target_size_arat}=$target_size_adaptive_ratio; +$tojson{min_size_per}=$min_size_percent; + +## +## ARC size breakdown +## +my $mfu_size; +my $recently_used_percent; +my $frequently_used_percent; +if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.size"} ){ + $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.size"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; + $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; + $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; +}else{ + $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.c"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; + $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.c"} * 100; + $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.c"} * 100; +} + +$tojson{mfu_size}=$mfu_size; +$tojson{p}=$sysctls->{"kstat.zfs.misc.arcstats.p"}; +$tojson{rec_used_per}=$recently_used_percent; +$tojson{freq_used_per}=$frequently_used_percent; + +## +## ARC efficiency +## +my $arc_hits = $sysctls->{"kstat.zfs.misc.arcstats.hits"}; +my $arc_misses = $sysctls->{"kstat.zfs.misc.arcstats.misses"}; +my $demand_data_hits = $sysctls->{"kstat.zfs.misc.arcstats.demand_data_hits"}; +my $demand_data_misses = $sysctls->{"kstat.zfs.misc.arcstats.demand_data_misses"}; +my $demand_metadata_hits = $sysctls->{"kstat.zfs.misc.arcstats.demand_metadata_hits"}; +my $demand_metadata_misses = $sysctls->{"kstat.zfs.misc.arcstats.demand_metadata_misses"}; +my $mfu_ghost_hits = $sysctls->{"kstat.zfs.misc.arcstats.mfu_ghost_hits"}; +my $mfu_hits = $sysctls->{"kstat.zfs.misc.arcstats.mfu_hits"}; +my $mru_ghost_hits = $sysctls->{"kstat.zfs.misc.arcstats.mru_ghost_hits"}; +my $mru_hits = $sysctls->{"kstat.zfs.misc.arcstats.mru_hits"}; +my $prefetch_data_hits = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_data_hits"}; +my $prefetch_data_misses = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_data_misses"}; +my $prefetch_metadata_hits = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_metadata_hits"}; +my $prefetch_metadata_misses = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_metadata_misses"}; + +my $anon_hits = $arc_hits - ($mfu_hits + $mru_hits + $mfu_ghost_hits + $mru_ghost_hits); +my $arc_accesses_total = $arc_hits + $arc_misses; +my $demand_data_total = $demand_data_hits + $demand_data_misses; +my $prefetch_data_total = $prefetch_data_hits + $prefetch_data_misses; +my $real_hits = $mfu_hits + $mru_hits; + +my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; +my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; +my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; +my $data_demand_percent = $demand_data_hits / $demand_data_total * 100; + +my $data_prefetch_percent; +if ( $prefetch_data_total != 0 ) { + $data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100; +}else{ + $data_prefetch_percent = 0; +} + +my $anon_hits_percent; +if ( $anon_hits != 0 ) { + $anon_hits_percent = $anon_hits / $arc_hits * 100; +}else{ + $anon_hits_percent=0; +} + +my $mru_percent = $mru_hits / $arc_hits * 100; +my $mfu_percent = $mfu_hits / $arc_hits * 100; +my $mru_ghost_percent = $mru_ghost_hits / $arc_hits * 100; +my $mfu_ghost_percent = $mfu_ghost_hits / $arc_hits * 100; + +my $demand_hits_percent = $demand_data_hits / $arc_hits * 100; +my $prefetch_hits_percent = $prefetch_data_hits / $arc_hits * 100; +my $metadata_hits_percent = $demand_metadata_hits / $arc_hits * 100; +my $prefetch_metadata_hits_percent = $prefetch_metadata_hits / $arc_hits * 100; + +my $demand_misses_percent = $demand_data_misses / $arc_misses * 100; +my $prefetch_misses_percent = $prefetch_data_misses / $arc_misses * 100; +my $metadata_misses_percent = $demand_metadata_misses / $arc_misses * 100; +my $prefetch_metadata_misses_percent = $prefetch_metadata_misses / $arc_misses * 100; + +# ARC misc. efficient stats +$tojson{arc_hits}=$arc_hits; +$tojson{arc_misses}=$arc_misses; +$tojson{demand_data_hits}=$demand_data_hits; +$tojson{demand_data_misses}=$demand_data_misses; +$tojson{demand_meta_hits}=$demand_metadata_hits; +$tojson{demand_meta_misses}=$demand_metadata_misses; +$tojson{mfu_ghost_hits}=$mfu_ghost_hits; +$tojson{mfu_hits}=$mfu_hits; +$tojson{mru_ghost_hits}=$mru_ghost_hits; +$tojson{mru_hits}=$mru_hits; +$tojson{pre_data_hits}=$prefetch_data_hits; +$tojson{pre_data_misses}=$prefetch_data_misses; +$tojson{pre_meta_hits}=$prefetch_metadata_hits; +$tojson{pre_meta_misses}=$prefetch_metadata_misses; +$tojson{anon_hits}=$anon_hits; +$tojson{arc_accesses_total}=$arc_accesses_total; +$tojson{demand_data_total}=$demand_data_total; +$tojson{pre_data_total}=$prefetch_data_total; +$tojson{real_hits}=$real_hits; + +# ARC efficient percents +$tojson{cache_hits_per}=$cache_hit_percent; +$tojson{cache_miss_per}=$cache_miss_percent; +$tojson{actual_hit_per}=$actual_hit_percent; +$tojson{data_demand_per}=$data_demand_percent; +$tojson{data_pre_per}=$data_prefetch_percent; +$tojson{anon_hits_per}=$anon_hits_percent; +$tojson{mru_per}=$mru_percent; +$tojson{mfu_per}=$mfu_percent; +$tojson{mru_ghost_per}=$mru_ghost_percent; +$tojson{mfu_ghost_per}=$mfu_ghost_percent; +$tojson{demand_hits_per}=$demand_hits_percent; +$tojson{pre_hits_per}=$prefetch_hits_percent; +$tojson{meta_hits_per}=$metadata_hits_percent; +$tojson{pre_meta_hits_per}=$prefetch_metadata_hits_percent; +$tojson{demand_misses_per}=$demand_misses_percent; +$tojson{pre_misses_per}=$prefetch_misses_percent; +$tojson{meta_misses_per}=$metadata_misses_percent; +$tojson{pre_meta_misses_per}=$prefetch_metadata_misses_percent; + +#process each pool and shove them into JSON +my $zpool_output=`/sbin/zpool list -pH`; +my @pools=split( /\n/, $zpool_output ); +my $pools_int=0; +my @toShoveIntoJSON; +while ( defined( $pools[$pools_int] ) ) { + my %newPool; + + my $pool=$pools[$pools_int]; + $pool =~ s/\t/,/g; + $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\%//g; + $pool =~ s/\,([0-1\.]*)x\,/,$1,/; + + ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); + + push(@toShoveIntoJSON, \%newPool); + + $pools_int++; +} +$tojson{pools}=\@toShoveIntoJSON; + +my $j=JSON->new; + +if ( $opts{p} ){ + $j->pretty(1); +} + +print $j->encode( \%tojson ); + +if (! $opts{p} ){ + print "\n"; +} + +exit 0; From fd9fd178a4b43feafb414822167b3033693c8efc Mon Sep 17 00:00:00 2001 From: crcro Date: Sat, 6 Jan 2018 22:06:45 +0200 Subject: [PATCH 017/332] extend: powerdns-dnsdist (#158) * powerdns-dnsdist app * fix script in help * removed local data manipulation * again name of file in script help * removed personal api info --- snmp/powerdns-dnsdist | 165 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 snmp/powerdns-dnsdist diff --git a/snmp/powerdns-dnsdist b/snmp/powerdns-dnsdist new file mode 100644 index 000000000..87eda58bd --- /dev/null +++ b/snmp/powerdns-dnsdist @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' + +API_AUTH_USER="admin" +API_AUTH_PASS="" +API_URL="" +API_STATS="jsonstat?command=stats" +TMP_FILE="/tmp/dnsdist_current.stats" + +#/ Description: BASH script to get PowerDNS dnsdist stats +#/ Examples: ./powerdns-dnsdist +#/ Options: +#/ --help: Display this help message +#/ --debug: Brief check of system env and script vars + +usage() { + grep '^#/' "$0" | cut -c4- ; + exit 0 ; +} + +debug() { + if [ -z "$API_AUTH_USER" ]; then + echo '[error] var API_AUTH_USER is not set' + else + echo '[ok] var API_AUTH_USER is set' + fi + + if [ -z "$API_AUTH_PASS" ]; then + echo '[error] var API_AUTH_PASS is not set' + else + echo '[ok] var API_AUTH_PASS is set' + fi + + if [ -z "$API_URL" ]; then + echo '[error] var API_URL is not set' + else + echo '[ok] var API_URL is set' + fi + + if [ -z "$API_STATS" ]; then + echo '[error] var API_STATS is not set' + else + echo '[ok] var API_STATS is set' + fi + + if ! [ -x "$(command -v curl)" ]; then + echo '[error] bin curl not available, please install it' + else + echo '[ok] bin curl' + fi + + if ! [ -x "$(command -v jq)" ]; then + echo '[error] bin jq not available, please install it' + else + echo '[ok] bin jq' + fi + + if ! [ -x "$(command -v cat)" ]; then + echo '[error] bin cat not available, please install it' + else + echo '[ok] bin cat' + fi +} + +exportdata() { + # get current data + curl -s -u$API_AUTH_USER:$API_AUTH_PASS $API_URL$API_STATS | jq '.' > $TMP_FILE + + # generate export values + JSON_VALUES=$(cat $TMP_FILE) + + STAT_CACHE_HIT=$(echo $JSON_VALUES | jq '."cache-hits"') + echo $STAT_CACHE_HIT + + STAT_CACHE_MISS=$(echo $JSON_VALUES | jq '."cache-misses"') + echo $STAT_CACHE_MISS + + STAT_DOWNSTREAM_ERR=$(echo $JSON_VALUES | jq '."downstream-send-errors"') + echo $STAT_DOWNSTREAM_ERR + + STAT_DOWNSTREAM_TIMEOUT=$(echo $JSON_VALUES | jq '."downstream-timeouts"') + echo $STAT_DOWNSTREAM_TIMEOUT + + STAT_DYNAMIC_BLOCK_SIZE=$(echo $JSON_VALUES | jq '."dyn-block-nmg-size"') + echo $STAT_DYNAMIC_BLOCK_SIZE + + STAT_DYNAMIC_BLOCK=$(echo $JSON_VALUES | jq '."dyn-blocked"') + echo $STAT_DYNAMIC_BLOCK + + STAT_QUERIES_COUNT=$(echo $JSON_VALUES | jq '.queries') + echo $STAT_QUERIES_COUNT + + STAT_QUERIES_RECURSIVE=$(echo $JSON_VALUES | jq '.rdqueries') + echo $STAT_QUERIES_RECURSIVE + + STAT_QUERIES_EMPTY=$(echo $JSON_VALUES | jq '."empty-queries"') + echo $STAT_QUERIES_EMPTY + + STAT_QUERIES_DROP_NO_POLICY=$(echo $JSON_VALUES | jq '."no-policy"') + echo $STAT_QUERIES_DROP_NO_POLICY + + STAT_QUERIES_DROP_NC=$(echo $JSON_VALUES | jq '."noncompliant-queries"') + echo $STAT_QUERIES_DROP_NC + + STAT_QUERIES_DROP_NC_ANSWER=$(echo $JSON_VALUES | jq '."noncompliant-responses"') + echo $STAT_QUERIES_DROP_NC_ANSWER + + STAT_QUERIES_SELF_ANSWER=$(echo $JSON_VALUES | jq '."self-answered"') + echo $STAT_QUERIES_SELF_ANSWER + + STAT_QUERIES_SERVFAIL=$(echo $JSON_VALUES | jq '."servfail-responses"') + echo $STAT_QUERIES_SERVFAIL + + STAT_QUERIES_FAILURE=$(echo $JSON_VALUES | jq '."trunc-failures"') + echo $STAT_QUERIES_FAILURE + + STAT_QUERIES_ACL_DROPS=$(echo $JSON_VALUES | jq '."acl-drops"') + echo $STAT_QUERIES_ACL_DROPS + + STAT_RULE_DROP=$(echo $JSON_VALUES | jq '."rule-drop"') + echo $STAT_RULE_DROP + + STAT_RULE_NXDOMAIN=$(echo $JSON_VALUES | jq '."rule-nxdomain"') + echo $STAT_RULE_NXDOMAIN + + STAT_RULE_REFUSED=$(echo $JSON_VALUES | jq '."rule-refused"') + echo $STAT_RULE_REFUSED + + STAT_LATENCY_AVG_100=$(echo $JSON_VALUES | jq '."latency-avg100"') + echo $STAT_LATENCY_AVG_100 + + STAT_LATENCY_AVG_1000=$(echo $JSON_VALUES | jq '."latency-avg1000"') + echo $STAT_LATENCY_AVG_1000 + + STAT_LATENCY_AVG_10000=$(echo $JSON_VALUES | jq '."latency-avg10000"') + echo $STAT_LATENCY_AVG_10000 + + STAT_LATENCY_AVG_1000000=$(echo $JSON_VALUES | jq '."latency-avg1000000"') + echo $STAT_LATENCY_AVG_1000000 + + STAT_LATENCY_SLOW=$(echo $JSON_VALUES | jq '."latency-slow"') + echo $STAT_LATENCY_SLOW + + STAT_LATENCY_0_1=$(echo $JSON_VALUES | jq '."latency0-1"') + echo $STAT_LATENCY_0_1 + + STAT_LATENCY_1_10=$(echo $JSON_VALUES | jq '."latency1-10"') + echo $STAT_LATENCY_1_10 + + STAT_LATENCY_10_50=$(echo $JSON_VALUES | jq '."latency10-50"') + echo $STAT_LATENCY_10_50 + + STAT_LATENCY_50_100=$(echo $JSON_VALUES | jq '."latency50-100"') + echo $STAT_LATENCY_50_100 + + STAT_LATENCY_100_1000=$(echo $JSON_VALUES | jq '."latency100-1000"') + echo $STAT_LATENCY_100_1000 +} + +if [ -z $* ]; then + exportdata +fi +expr "$*" : ".*--help" > /dev/null && usage +expr "$*" : ".*--debug" > /dev/null && debug From bbd3b1309aaa3ecaf6f502e92718719539715c58 Mon Sep 17 00:00:00 2001 From: endofline Date: Sun, 18 Feb 2018 22:33:42 +0200 Subject: [PATCH 018/332] Fix Command_Timeout missing from SMART output (#163) --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 35e059916..ab07690c3 100755 --- a/snmp/smart +++ b/snmp/smart @@ -299,7 +299,7 @@ while ( defined($disks[$int]) ) { $total=$total+$rawA[$rawAint]; $rawAint++; } - + $IDs{$id}=$total; } # 190, airflow temp From 3a8462461595535a53554b0ad66bc922118e83d1 Mon Sep 17 00:00:00 2001 From: endofline Date: Tue, 27 Feb 2018 23:10:35 +0200 Subject: [PATCH 019/332] Replace disk identifier with disk serial in S.M.A.R.T snmp script (#164) --- snmp/smart | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index ab07690c3..567e509ba 100755 --- a/snmp/smart +++ b/snmp/smart @@ -239,6 +239,7 @@ my $toReturn=''; my $int=0; while ( defined($disks[$int]) ) { my $disk=$disks[$int]; + my $disk_sn=$disk; my $output=`$smartctl -A /dev/$disk`; my %IDs=( '5'=>'null', @@ -329,8 +330,13 @@ while ( defined($disks[$int]) ) { my $conveyance=scalar grep(/Conveyance/, @outputA); my $selective=scalar grep(/Selective/, @outputA); + # get the drive serial number + while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { + $disk_sn = $1; + $disk_sn =~ s/^\s+|\s+$//g; + } - $toReturn=$toReturn.$disk.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} + $toReturn=$toReturn.$disk_sn.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; From 90fd6f60f3aed5f71140d23a8d022ae9909e7473 Mon Sep 17 00:00:00 2001 From: Dylan Underwood Date: Fri, 23 Mar 2018 11:24:02 -0500 Subject: [PATCH 020/332] Should be greater than or equal to (#167) --- snmp/os-updates.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 6986c1d8c..33e1f9c62 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -31,7 +31,7 @@ CMD_PACMAN='-Sup' if [ -f $BIN_ZYPPER ]; then # OpenSUSE UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 2 ]; then + if [ $UPDATES -ge 2 ]; then echo $(($UPDATES-2)); else echo "0"; @@ -39,7 +39,7 @@ if [ -f $BIN_ZYPPER ]; then elif [ -f $BIN_DNF ]; then # Fedora UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; @@ -47,7 +47,7 @@ elif [ -f $BIN_DNF ]; then elif [ -f $BIN_PACMAN ]; then # Arch UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; @@ -55,7 +55,7 @@ elif [ -f $BIN_PACMAN ]; then elif [ -f $BIN_YUM ]; then # CentOS / Redhat UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; @@ -63,7 +63,7 @@ elif [ -f $BIN_YUM ]; then elif [ -f $BIN_APT ]; then # Debian / Devuan / Ubuntu UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $UPDATES; else echo "0"; From 3ddb1d6be6b4a4a0cd006251b497bb1ccf8170e8 Mon Sep 17 00:00:00 2001 From: VVelox Date: Tue, 10 Apr 2018 22:04:07 -0500 Subject: [PATCH 021/332] correct arc size breakdown --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index db6f33b2d..cea6e1e95 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -115,7 +115,7 @@ $tojson{min_size_per}=$min_size_percent; my $mfu_size; my $recently_used_percent; my $frequently_used_percent; -if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.size"} ){ +if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.c"} ){ $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.size"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; From 8ec6017246edc9784e670d84bd8b52ec094dbb82 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 11 Apr 2018 02:34:39 -0500 Subject: [PATCH 022/332] correct arc size breakdown (#171) --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index db6f33b2d..cea6e1e95 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -115,7 +115,7 @@ $tojson{min_size_per}=$min_size_percent; my $mfu_size; my $recently_used_percent; my $frequently_used_percent; -if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.size"} ){ +if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.c"} ){ $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.size"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; From a56adb467a1cdf9785f977420dd07a48335f41b3 Mon Sep 17 00:00:00 2001 From: Serphentas Date: Wed, 11 Apr 2018 10:39:32 +0200 Subject: [PATCH 023/332] add zfs support for linux (#170) * add zfs support for linux * fix pools and anon_hits_per * strip percent sign for pool cap * fix anon_hits json key typo * fix demand_data_hits json key typo * fix comparison as in #169 * fix min_size_percent --- snmp/zfs-linux | 178 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 snmp/zfs-linux diff --git a/snmp/zfs-linux b/snmp/zfs-linux new file mode 100644 index 000000000..f8bc5a3e1 --- /dev/null +++ b/snmp/zfs-linux @@ -0,0 +1,178 @@ +#!/usr/bin/python3 +import json +import subprocess + +def main(args): + res = {} + + ARCSTATS = open('/proc/spl/kstat/zfs/arcstats', 'r') + LINES = ARCSTATS.readlines() + LINES = [x.strip() for x in LINES] + + STATS = {} + for line in LINES[2:]: + splitline = line.split() + STATS[splitline[0]] = int(splitline[2]) + + # ARC misc + DELETED = STATS['deleted'] + EVICT_SKIP = STATS['evict_skip'] + MUTEX_SKIP = STATS['mutex_miss'] + RECYCLE_MISS = STATS['recycle_miss'] if 'recycle_miss' in STATS else 0 + + # ARC size + ARC_SIZE = STATS['size'] + TARGET_SIZE_MAX = STATS['c_max'] + TARGET_SIZE_MIN = STATS['c_min'] + TARGET_SIZE = STATS['c'] + + TARGET_SIZE_PERCENT = TARGET_SIZE / TARGET_SIZE_MAX * 100 + ARC_SIZE_PERCENT = ARC_SIZE / TARGET_SIZE_MAX * 100 + TARGET_SIZE_ADAPTIVE_RATIO = TARGET_SIZE / TARGET_SIZE_MAX + MIN_SIZE_PERCENT = TARGET_SIZE_MIN / TARGET_SIZE_MAX * 100 + + # ARC size breakdown + MFU_SIZE = 0 + RECENTLY_USED_PERCENT = 0 + FREQUENTLY_USED_PERCENT = 0 + P = STATS['p'] + + if ARC_SIZE >= TARGET_SIZE: + MFU_SIZE = ARC_SIZE - P + RECENTLY_USED_PERCENT = P / ARC_SIZE * 100 + FREQUENTLY_USED_PERCENT = MFU_SIZE / ARC_SIZE * 100 + else: + MFU_SIZE = TARGET_SIZE - P + RECENTLY_USED_PERCENT = P / TARGET_SIZE * 100 + FREQUENTLY_USED_PERCENT = MFU_SIZE / TARGET_SIZE * 100 + + + # ARC misc. efficient stats + ARC_HITS = STATS['hits'] + ARC_MISSES = STATS['misses'] + DEMAND_DATA_HITS = STATS['demand_data_hits'] + DEMAND_DATA_MISSES = STATS['demand_data_misses'] + DEMAND_METADATA_HITS = STATS['demand_metadata_hits'] + DEMAND_METADATA_MISSES = STATS['demand_metadata_misses'] + MFU_GHOST_HITS = STATS['mfu_ghost_hits'] + MFU_HITS = STATS['mfu_hits'] + MRU_GHOST_HITS = STATS['mru_ghost_hits'] + MRU_HITS = STATS['mru_hits'] + PREFETCH_DATA_HITS = STATS['prefetch_data_hits'] + PREFETCH_DATA_MISSES = STATS['prefetch_data_misses'] + PREFETCH_METADATA_HITS = STATS['prefetch_metadata_hits'] + PREFETCH_METADATA_MISSES = STATS['prefetch_metadata_misses'] + + ANON_HITS = ARC_HITS - (MFU_HITS + MRU_HITS + MFU_GHOST_HITS + MRU_GHOST_HITS) + ARC_ACCESSES_TOTAL = ARC_HITS + ARC_MISSES + DEMAND_DATA_TOTAL = DEMAND_DATA_HITS + DEMAND_DATA_MISSES + PREFETCH_DATA_TOTAL = PREFETCH_DATA_HITS + PREFETCH_DATA_MISSES + REAL_HITS = MFU_HITS + MRU_HITS + + # ARC efficiency percentages + CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100 + CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100 + ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100 + DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 + + DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0 + + ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0 + + MRU_PERCENT = MRU_HITS / ARC_HITS * 100 + MFU_PERCENT = MFU_HITS / ARC_HITS * 100 + MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 + MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 + + DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 + PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 + METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 + PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 + + DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 + PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 + METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + + # pools + proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) + if proc.returncode != 0: + return proc.returncode + + pools = [] + FIELDS = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] + for line in proc.stdout.splitlines(): + info = dict(zip(FIELDS, line.split('\t'))) + + info['expandsz'] = 0 if info['expandsz'] == '-' else info['expandsz'] + info['frag'] = info['frag'].rstrip('%') + info['frag'] = 0 if info['frag'] == '-' else info['frag'] + info['dedup'] = info['dedup'].rstrip('x') + info['cap'] = info['cap'].rstrip('%') + + pools.append(info) + + res = { + 'deleted': DELETED, # ARC misc + 'evict_skip': EVICT_SKIP, + 'mutex_skip': MUTEX_SKIP, + 'recycle_miss': RECYCLE_MISS, + 'arc_size': ARC_SIZE, # ARC size + 'target_size_max': TARGET_SIZE_MAX, + 'target_size_min': TARGET_SIZE_MIN, + 'target_size': TARGET_SIZE, + 'target_size_per': TARGET_SIZE_PERCENT, + 'arc_size_per': ARC_SIZE_PERCENT, + 'target_size_arat': TARGET_SIZE_ADAPTIVE_RATIO, + 'min_size_per': MIN_SIZE_PERCENT, + 'mfu_size': MFU_SIZE, # ARC size breakdown + 'p': P, + 'rec_used_per': RECENTLY_USED_PERCENT, + 'freq_used_per': FREQUENTLY_USED_PERCENT, + 'arc_hits': ARC_HITS, # ARC efficiency + 'arc_misses': ARC_MISSES, + 'demand_data_hits': DEMAND_DATA_HITS, + 'demand_data_misses': DEMAND_DATA_MISSES, + 'demand_meta_hits': DEMAND_METADATA_HITS, + 'demand_meta_misses': DEMAND_METADATA_MISSES, + 'mfu_ghost_hits': MFU_GHOST_HITS, + 'mfu_hits': MFU_HITS, + 'mru_ghost_hits': MRU_GHOST_HITS, + 'mru_hits': MRU_HITS, + 'pre_data_hits': PREFETCH_DATA_HITS, + 'pre_data_misses': PREFETCH_DATA_MISSES, + 'pre_meta_hits': PREFETCH_METADATA_HITS, + 'pre_meta_misses': PREFETCH_METADATA_HITS, + 'anon_hits': ANON_HITS, + 'arc_accesses_total': ARC_ACCESSES_TOTAL, + 'demand_data_total': DEMAND_DATA_TOTAL, + 'pre_data_total': PREFETCH_DATA_TOTAL, + 'real_hits': REAL_HITS, + 'cache_hits_per': CACHE_HIT_PERCENT, # ARC efficiency percentages + 'cache_miss_per': CACHE_MISS_PERCENT, + 'actual_hit_per': ACTUAL_HIT_PERCENT, + 'data_demand_per': DATA_DEMAND_PERCENT, + 'data_pre_per': DATA_PREFETCH_PERCENT, + 'anon_hits_per': ANON_HITS_PERCENT, + 'mru_per': MRU_PERCENT, + 'mfu_per': MFU_PERCENT, + 'mru_ghost_per': MRU_GHOST_PERCENT, + 'mfu_ghost_per': MFU_GHOST_PERCENT, + 'demand_hits_per': DEMAND_HITS_PERCENT, + 'pre_hits_per': PREFETCH_HITS_PERCENT, + 'meta_hits_per': METADATA_HITS_PERCENT, + 'pre_meta_hits_per': PREFETCH_METADATA_HITS_PERCENT, + 'demand_misses_per': DEMAND_MISSES_PERCENT, + 'pre_misses_per': PREFETCH_MISSES_PERCENT, + 'meta_misses_per': METADATA_MISSES_PERCENT, + 'pre_meta_misses_per': PREFETCH_METADATA_MISSES_PERCENT, + 'pools': pools + } + + print(json.dumps(res)) + + return 0 + +if __name__ == '__main__': + import sys + sys.exit(main(sys.argv[1:])) From 385d466eee1adc06eecd4a84cfd6615f2e4ba2ec Mon Sep 17 00:00:00 2001 From: Sander Steffann Date: Fri, 13 Apr 2018 17:42:27 +0100 Subject: [PATCH 024/332] Add random entropy monitoring (#173) --- snmp/entropy.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 snmp/entropy.sh diff --git a/snmp/entropy.sh b/snmp/entropy.sh new file mode 100644 index 000000000..08bd801ca --- /dev/null +++ b/snmp/entropy.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +cat /proc/sys/kernel/random/entropy_avail From 41d36dc97f6886bae4ae6e8ba928892ef9d3c8c3 Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 27 Apr 2018 16:46:57 -0500 Subject: [PATCH 025/332] make using SN or device name selectable for SMART reporting (#168) * make using SN or device name selectable * change the default to SN --- snmp/smart | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/snmp/smart b/snmp/smart index 567e509ba..44b7a31e7 100755 --- a/snmp/smart +++ b/snmp/smart @@ -42,7 +42,8 @@ line with out a = or # are treated as a disk. #This is a comment cache=/var/cache/smart - smartctl=/usr/bin/env smartctl + smartctl=/usr/local/sbin/smartctl + useSN=0 ada0 ada1 @@ -50,6 +51,8 @@ The variables are as below. cache = The path to the cache file to use. Default: /var/cache/smart smartctl = The path to use for smartctl. Default: /usr/bin/env smartctl + useSN = If set to 1, it will use the disks SN for reporting instead of the device name. + 1 is the default. 0 will use the device name. If you want to guess at the configuration, call it with -g and it will print out what it thinks it should be. @@ -59,14 +62,15 @@ it should be. ## ## You should not need to touch anything below here. ## -my $cache='/var/cache/smart'; -my $smartctl='/usr/bin/env smartctl'; -my @disks; - use warnings; use strict; use Getopt::Std; +my $cache='/var/cache/smart'; +my $smartctl='/usr/bin/env smartctl'; +my @disks; +my $useSN=1; + $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { print "SMART SNMP extend 0.0.0\n"; @@ -169,7 +173,7 @@ if ( defined( $opts{g} ) ){ $matches_int++; } - print 'smartctl='.$smartctl."\n". + print "useSN=0\n".'smartctl='.$smartctl."\n". $cache. join( "\n", keys(%found_disks) )."\n"; @@ -209,6 +213,10 @@ while ( defined( $configA[$configA_int] ) ){ $smartctl=$val; } + if ( $var eq 'useSN' ){ + $useSN=$val; + } + if ( !defined( $val ) ){ push(@disks, $var); } @@ -330,13 +338,16 @@ while ( defined($disks[$int]) ) { my $conveyance=scalar grep(/Conveyance/, @outputA); my $selective=scalar grep(/Selective/, @outputA); - # get the drive serial number - while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { - $disk_sn = $1; - $disk_sn =~ s/^\s+|\s+$//g; + # get the drive serial number, if needed + my $disk_id=$disk; + if ( $useSN ){ + while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { + $disk_id = $1; + $disk_id =~ s/^\s+|\s+$//g; + } } - $toReturn=$toReturn.$disk_sn.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} + $toReturn=$toReturn.$disk_id.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; From b5d77f1a999c5e0f08bc02550fd24e7c37b759c7 Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 28 May 2018 07:22:09 -0500 Subject: [PATCH 026/332] convert fail2ban-client to JSON (#172) * convert to JSON * add version return * change the version number of the returned data to 1 --- snmp/fail2ban | 88 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 26 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 117d2c162..f965c558d 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -26,6 +26,10 @@ This is the path to the fail2ban-client if needed. If not specified, "/usr/bin/env fail2ban-client" is used. +=head2 -p + +Pretty prints the JSON. + =head2 -u Updates the cache. @@ -73,6 +77,7 @@ fail2ban-cleint being /foo/bin/fail2ban-client. use strict; use warnings; use Getopt::Std; +use JSON; #fail2ban-client path my $f2bc="/usr/bin/env fail2ban-client"; @@ -90,6 +95,7 @@ sub main::HELP_MESSAGE { "-c Print from the cache.\n". "-C Use this as the cache file.\n". "-f The fail2ban-client path if needed.". + "-p Pretty prints the JSON.\n". "-u Update the cache, '".$cache."'\n". "-U When used with -c, allow update of the cache file if it does not exist or is older than 360 seconds.". "\n". @@ -98,40 +104,70 @@ sub main::HELP_MESSAGE { #generats stats sub stats{ + my %toReturn; + $toReturn{total}=0; # total number in jails + $toReturn{jails}={}; # each jail + $toReturn{error}=0; # error code, 0 if good + $toReturn{errorString}=''; # detailed description of any errors + $toReturn{version}='1'; # format version of the returned data + #gets a list of jails my $jailsOutput=`$f2bc status`; - my @jailsOutputA=split(/\n/, $jailsOutput); - my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); - $jailsS=~s/.*\://; - $jailsS=~s/\s//g; - my @jails=split(/\,/, $jailsS); + $toReturn{error}=$?; - #process jail - my $int=0; - my $total=0; - my $toReturn=''; - while(defined($jails[$int])){ + if ( $? == -1){ + $toReturn{errorString}='failed to run fail2ban-client'; + } + elsif ($? & 127) { + $toReturn{errorString}= sprintf "fail2ban-client died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + $toReturn{error}=$? >> 8; + $toReturn{errorString}="fail2ban-client exited with ".$toReturn{error}; + } + + if ( $toReturn{error} == 0 ){ - #get the total for this jail - my $jailStatusOutput=`$f2bc status $jails[$int]`; - my @jailStatusOutputA=split(/\n/, $jailStatusOutput); - my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); - $jailTotal=~s/.*\://; - $jailTotal=~s/\s//g; + my @jailsOutputA=split(/\n/, $jailsOutput); + my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); + $jailsS=~s/.*\://; + $jailsS=~s/\s//g; + my @jails=split(/\,/, $jailsS); - #tally the total and add this jail to the list - $total=$total+$jailTotal; - $toReturn=$toReturn.$jails[$int].' '.$jailTotal."\n"; + #process jails + my $int=0; + while(defined($jails[$int])){ + + #get the total for this jail + my $jailStatusOutput=`$f2bc status $jails[$int]`; + my @jailStatusOutputA=split(/\n/, $jailStatusOutput); + my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); + $jailTotal=~s/.*\://; + $jailTotal=~s/\s//g; + + #tally the total and add this jail to the list + $toReturn{total} = $toReturn{total} + $jailTotal; + $toReturn{jails}{ $jails[$int] } = $jailTotal; + + $int++; + } - $int++; + } + + my $j=JSON->new; + + if ( $_[0] ){ + $j->pretty(1); + return $j->encode( \%toReturn ); } - return $total."\n".$toReturn; + return $j->encode( \%toReturn )."\n"; } #updates $cache sub cacheUpdate{ - my $stats=stats; + my $stats=stats($_[0]); open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; print $writefh $stats; @@ -150,7 +186,7 @@ sub cachePrint{ #gets the options my %opts=(); -getopts('uUcC:f:', \%opts); +getopts('puUcC:f:', \%opts); #use custom cache file if needed if ( defined( $opts{C} ) ){ @@ -175,7 +211,7 @@ if ( defined( $opts{c} ) ){ #cache does not exist or is old if ( $opts{U} ){ #allowed to update it via -U - cacheUpdate; + cacheUpdate( $opts{p} ); cachePrint; exit 0; }else{ @@ -190,12 +226,12 @@ if ( defined( $opts{c} ) ){ #update the cache if (defined( $opts{u} )){ - cacheUpdate; + cacheUpdate( $opts{p} ); exit 0; } #no cache opions given, just print it -print &stats; +print &stats( $opts{p} ); exit 0; From 7e55d1cd5db04019de09aff7b134a85df71e901a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20S=C3=A1r=C3=A1ndi?= Date: Mon, 25 Jun 2018 16:10:00 +0200 Subject: [PATCH 027/332] Update fail2ban extend script to new JSON format (#181) As seen at [this location](https://github.com/librenms/librenms/blob/7fab99cfc13b80a543fb779d68c659b52fc074b1/includes/polling/functions.inc.php#L768) the JSON output needs to contain a `data` field. The poller php script actually also extracts this `data` field as one of the first steps, see at [this line](https://github.com/librenms/librenms/blob/c3007b483a12758042e5d0c6009a8ef48e3e1a39/includes/polling/applications/fail2ban.inc.php#L36). Before I changed these parts the graph didn't show up because the RRD files simply weren't generated as an exception occurred in the poller. This fixes this problem. --- snmp/fail2ban | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index f965c558d..85640021b 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -105,8 +105,9 @@ sub main::HELP_MESSAGE { #generats stats sub stats{ my %toReturn; - $toReturn{total}=0; # total number in jails - $toReturn{jails}={}; # each jail + $toReturn{data}={}; + $toReturn{data}{total}=0; # total number in jails + $toReturn{data}{jails}={}; # each jail $toReturn{error}=0; # error code, 0 if good $toReturn{errorString}=''; # detailed description of any errors $toReturn{version}='1'; # format version of the returned data @@ -147,8 +148,8 @@ sub stats{ $jailTotal=~s/\s//g; #tally the total and add this jail to the list - $toReturn{total} = $toReturn{total} + $jailTotal; - $toReturn{jails}{ $jails[$int] } = $jailTotal; + $toReturn{data}{total} = $toReturn{data}{total} + $jailTotal; + $toReturn{data}{jails}{ $jails[$int] } = $jailTotal; $int++; } From c535b1286c7701a2cefcd10ffd799fba65e56dd2 Mon Sep 17 00:00:00 2001 From: TheGreatDoc <32565115+TheGreatDoc@users.noreply.github.com> Date: Thu, 19 Jul 2018 22:39:08 +0200 Subject: [PATCH 028/332] Asterisk Script (#183) Asterisk App support. - Channels - Calls - Total SIP Peers - Monitored Online - Monitored Offline - Unmonitored Online - Unmonitored Offline --- snmp/asterisk | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 snmp/asterisk diff --git a/snmp/asterisk b/snmp/asterisk new file mode 100644 index 000000000..109aec4a4 --- /dev/null +++ b/snmp/asterisk @@ -0,0 +1,19 @@ +#!/bin/bash + +ASCLI=/usr/sbin/asterisk + +if [ -f $ASCLI ]; +then + $ASCLI -rx "core show uptime" > /dev/null + if [ $? -ne 0 ]; then + # Asterisk not running, silently exit. + exit 0 + fi + + echo "<<>>" + $ASCLI -rx "core show channels" | awk '/active calls/ { print "Calls=" $1 } /active channels/ { print "Channels=" $1}' + $ASCLI -rx 'sip show peers' | awk '/sip peers/ { print "SipPeers=" $1 "\nSipMonOnline=" $5 "\nSipMonOffline=" $7 "\nSipUnMonOnline=" $10 "\nSipUnMonOffline=" $12}' + +else + exit 0 +fi From c772ac97d3f5b805c311fd13d924513b4561d10b Mon Sep 17 00:00:00 2001 From: crcro Date: Fri, 10 Aug 2018 00:44:02 +0300 Subject: [PATCH 029/332] added rockstor nas distro detection (#187) --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index 61ad2488c..d13af0629 100755 --- a/snmp/distro +++ b/snmp/distro @@ -30,6 +30,8 @@ elif [ "${OS}" = "Linux" ] ; then REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` elif [ -f /etc/oracle-release ]; then DIST="Oracle" + elif [ -f /etc/rockstor-release ]; then + DIST="Rockstor" else DIST="RedHat" fi From 99ad80740cb2fcea1c33e59caf1c05af5a53a14f Mon Sep 17 00:00:00 2001 From: VVelox Date: Sun, 19 Aug 2018 17:47:07 -0500 Subject: [PATCH 030/332] update for the new json_app_get stuff (#179) --- snmp/zfs-freebsd | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index cea6e1e95..d78658c2d 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.0.0\n"; + print "FreeBSD ZFS stats extend 0.1.0\n"; } sub main::HELP_MESSAGE { @@ -251,13 +251,19 @@ while ( defined( $pools[$pools_int] ) ) { } $tojson{pools}=\@toShoveIntoJSON; +my %head_hash; +$head_hash{'data'}=\%tojson; +$head_hash{'version'}=1; +$head_hash{'error'}=0; +$head_hash{'errorString'}=''; + my $j=JSON->new; if ( $opts{p} ){ $j->pretty(1); } -print $j->encode( \%tojson ); +print $j->encode( \%head_hash ); if (! $opts{p} ){ print "\n"; From 7c173b160c5be401fa36d85edf15add61a3146d7 Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 27 Aug 2018 04:03:01 -0500 Subject: [PATCH 031/332] convert all the NTP stuff to JSON (#174) This requires https://github.com/librenms/librenms/pull/8571 and is for https://github.com/librenms/librenms/pull/8608 . Also converted this to regular sh instead of bash, so it will work on more systems with less dependencies. Has been tested as working on DD-WRT and FreeBSD. --- snmp/ntp-client.sh | 65 +++++++++++++++++--------- snmp/ntp-server.sh | 112 +++++++++++++++++++++++++++++++-------------- 2 files changed, 121 insertions(+), 56 deletions(-) diff --git a/snmp/ntp-client.sh b/snmp/ntp-client.sh index aa56f810d..c04e8b680 100755 --- a/snmp/ntp-client.sh +++ b/snmp/ntp-client.sh @@ -1,25 +1,48 @@ -#!/usr/bin/env bash -################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf and include # -# extend ntp-client /opt/ntp-client.sh # -# restart snmpd and activate the app for desired host # -# please make sure you have the path/binaries below # -################################################################ -# Binaries and paths required # -################################################################ -BIN_NTPQ="$(command -v ntpq)" -BIN_GREP="$(command -v grep)" -BIN_TR="$(command -v tr)" -BIN_CUT="$(command -v cut)" +#!/bin/sh +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script ntp-client.sh then it must go in ntp-client.sh.conf . +# +# NTPQV output version of "ntpq -c rv" +# p1 DD-WRT and some other outdated linux distros +# p11 FreeBSD 11 and any linux distro that is up to date +# +# If you are unsure, which to set, run this script and make sure that +# the JSON output variables match that in "ntpq -c rv". +# +BIN_NTPQ='/usr/bin/env ntpq' +BIN_GREP='/usr/bin/env grep' +BIN_SED="/usr/bin/env sed" +BIN_AWK='/usr/bin/env awk' +NTPQV="p11" ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -CMD1=`$BIN_NTPQ -c rv | $BIN_GREP 'jitter' | $BIN_TR '\n' ' '` -IFS=', ' read -r -a array <<< "$CMD1" - -for value in 2 3 4 5 6 -do - echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 -done +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi +VERSION=1 +#error and errorString are hardcoded as if the above fails bad json will be generated +RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` +if [ $NTPQV = "p11" ]; then + echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$3\ + "\",\"frequency\":\""$4\ + "\",\"sys_jitter\":\""$5\ + "\",\"clk_jitter\":\""$6\ + "\",\"clk_wander\":\""$7\ + "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" + }' + exit 0 +fi +if [ $NTPQV = "p1" ]; then + echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$2\ + "\",\"frequency\":\""$3\ + "\",\"sys_jitter\":\""$4\ + "\",\"clk_jitter\":\""$5\ + "\",\"clk_wander\":\""$6\ + "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" + }' + exit 0 +fi diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 506b2f8d3..bbf5c737a 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -1,47 +1,89 @@ -#!/usr/bin/env bash -################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf and include # -# extend ntp-server /opt/ntp-server.sh # -# restart snmpd and activate the app for desired host # -# please make sure you have the path/binaries below # -################################################################ -# Binaries and paths required # -################################################################ -BIN_NTPD='/usr/sbin/ntpd' -BIN_NTPQ='/usr/sbin/ntpq' -BIN_NTPDC='/usr/sbin/ntpdc' -BIN_GREP='/usr/bin/grep' -BIN_TR='/usr/bin/tr' -BIN_CUT='/usr/bin/cut' -BIN_SED='/usr/bin/sed' +#!/bin/sh +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script ntp-client.sh then it must go in ntp-client.sh.conf . +# +# NTPQV output version of "ntpq -c rv" +# p1 DD-WRT and some other outdated linux distros +# p11 FreeBSD 11 and any linux distro that is up to date +# +# If you are unsure, which to set, run this script and make sure that +# the JSON output variables match that in "ntpq -c rv". +# +BIN_NTPD='/usr/bin/env ntpd' +BIN_NTPQ='/usr/bin/env ntpq' +BIN_NTPDC='/usr/bin/env ntpdc' +BIN_GREP='/usr/bin/env grep' +BIN_TR='/usr/bin/env tr' +BIN_CUT='/usr/bin/env cut' +BIN_SED="/usr/bin/env sed" +BIN_AWK='/usr/bin/env awk' +NTPQV="p11" ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -VER=`$BIN_NTPD --version` - -CMD0=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` -echo $CMD0 +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi +VERSION=1 -CMD1=`$BIN_NTPQ -c rv | $BIN_GREP 'jitter' | $BIN_TR '\n' ' '` -IFS=', ' read -r -a array <<< "$CMD1" +STRATUM=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` -for value in 2 3 4 5 6 -do - echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 -done +# parse the ntpq info that requires version specific info +NTPQ_RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` +if [ $NTPQV = "p11" ]; then + OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` + FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` + SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` + CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` + CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}'` +fi +if [ $NTPQV = "p1" ]; then + OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}'` + FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` + SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` + CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` + CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` +fi -if [[ "$VER" =~ '4.2.6p5' ]] -then +VER=`$BIN_NTPD --version` +if [ "$VER" = '4.2.6p5' ]; then USECMD=`echo $BIN_NTPDC -c iostats` else USECMD=`echo $BIN_NTPQ -c iostats localhost` fi -CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' '` -IFS=',' read -r -a array <<< "$CMD2" +TIMESINCERESET=`echo $CMD2 | $BIN_AWK -F ' ' '{print $1}'` +RECEIVEDBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $2}'` +FREERECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $3}'` +USEDRECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $4}'` +LOWWATERREFILLS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $5}'` +DROPPEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $6}'` +IGNOREDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $7}'` +RECEIVEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $8}'` +PACKETSSENT=`echo $CMD2 | $BIN_AWK -F ' ' '{print $9}'` +PACKETSENDFAILURES=`echo $CMD2 | $BIN_AWK -F ' ' '{print $10}'` +INPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $11}'` +USEFULINPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $12}'` -for value in 0 1 2 3 5 6 7 8 -do - echo ${array["$value"]} | $BIN_SED -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' -done +echo '{"data":{"offset":"'$OFFSET\ +'","frequency":"'$FREQUENCY\ +'","sys_jitter":"'$SYS_JITTER\ +'","clk_jitter":"'$CLK_JITTER\ +'","clk_wander":"'$CLK_WANDER\ +'","stratum":"'$STRATUM\ +'","time_since_reset":"'$TIMESINCERESET\ +'","receive_buffers":"'$RECEIVEDBUFFERS\ +'","free_receive_buffers":"'$FREERECEIVEBUFFERS\ +'","used_receive_buffers":"'$USEDRECEIVEBUFFERS\ +'","low_water_refills":"'$LOWWATERREFILLS\ +'","dropped_packets":"'$DROPPEDPACKETS\ +'","ignored_packets":"'$IGNOREDPACKETS\ +'","received_packets":"'$RECEIVEDPACKETS\ +'","packets_sent":"'$PACKETSSENT\ +'","packet_send_failures":"'$PACKETSENDFAILURES\ +'","input_wakeups":"'$PACKETSENDFAILURES\ +'","useful_input_wakeups":"'$USEFULINPUTWAKEUPS\ +'"},"error":"0","errorString":"","version":"'$VERSION'"}' From 7542bd26f4c883c7e622056a1a34909d1dc9aa2c Mon Sep 17 00:00:00 2001 From: Allison Date: Tue, 18 Sep 2018 20:20:23 -0700 Subject: [PATCH 032/332] Update distro (#194) Adding full detection for ASUSWRT-Merlin --- snmp/distro | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snmp/distro b/snmp/distro index d13af0629..d5bd53754 100755 --- a/snmp/distro +++ b/snmp/distro @@ -97,6 +97,11 @@ elif [ "${OS}" = "Linux" ] ; then DIST="dd-wrt" fi + if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then + DIST="ASUSWRT-Merlin" + REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 + fi + if [ -n "${REV}" ] then OSSTR="${DIST} ${REV}" From ca7a5cdafe6dd603538aad8f63bc624143f98377 Mon Sep 17 00:00:00 2001 From: Brock Alberry Date: Wed, 19 Sep 2018 09:09:04 -0400 Subject: [PATCH 033/332] PhotonOS distro detection (#193) * PhotonOS distro detection Detection before `/etc/os-release` since that is present yet missing the build number. * awk detection combining https://github.com/librenms/librenms-agent/pull/193 and https://github.com/librenms/librenms-agent/pull/194 --- snmp/distro | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/snmp/distro b/snmp/distro index d5bd53754..d833a0e25 100755 --- a/snmp/distro +++ b/snmp/distro @@ -64,6 +64,11 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Arch Linux" REV="" # Omit version since Arch Linux uses rolling releases IGNORE_LSB=1 # /etc/lsb-release would overwrite $REV with "rolling" + + elif [ -f /etc/photon-release ] ; then + DIST=$(head -1 < /etc/photon-release) + REV=$(sed -n -e 's/^.*PHOTON_BUILD_NUMBER=//p' /etc/photon-release) + IGNORE_LSB=1 # photon os does not have /etc/lsb-release nor lsb_release elif [ -f /etc/os-release ] ; then DIST=$(grep '^NAME=' /etc/os-release | cut -d= -f2- | tr -d '"') @@ -93,13 +98,14 @@ elif [ "${OS}" = "Linux" ] ; then fi fi - if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then - DIST="dd-wrt" - fi - - if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then - DIST="ASUSWRT-Merlin" - REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 + if [ -x "$(command -v awk)" ]; then # some distros do not ship with awk + if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then + DIST="dd-wrt" + fi + if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then + DIST="ASUSWRT-Merlin" + REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 + fi fi if [ -n "${REV}" ] From 28a2f8ae55db7ca773f881560017b4890bc4bbce Mon Sep 17 00:00:00 2001 From: voxnil <14983067+voxnil@users.noreply.github.com> Date: Mon, 15 Oct 2018 13:00:16 -0700 Subject: [PATCH 034/332] Update zfs-linux to use env for python --- snmp/zfs-linux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index f8bc5a3e1..e9c19e1c6 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 import json import subprocess From 1c61a96344317c13fce90811c11c0fa4cb7efb36 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:26:45 -0400 Subject: [PATCH 035/332] ntp-client data correction (#196) NTP was not displaying data right for linux servers. It was putting the frequency data into the offset data. This was giving bad graphs in the UI. Tested the correction on both RHEL and Debian based operating systems and all passes. Remove the .sh to simplify for configuration management orchestration scripts. --- snmp/ntp-client | 35 +++++++++++++++++++++++++++++++++ snmp/ntp-client.sh | 48 ---------------------------------------------- 2 files changed, 35 insertions(+), 48 deletions(-) create mode 100755 snmp/ntp-client delete mode 100755 snmp/ntp-client.sh diff --git a/snmp/ntp-client b/snmp/ntp-client new file mode 100755 index 000000000..04db80655 --- /dev/null +++ b/snmp/ntp-client @@ -0,0 +1,35 @@ +#!/bin/sh +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script ntp-client then it must go in ntp-client.conf . +# +# NTPQV output version of "ntpq -c rv" +# Version 4 is the most common and up to date version. +# +# If you are unsure, which to set, run this script and make sure that +# the JSON output variables match that in "ntpq -c rv". +# +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +BIN_NTPQ='/usr/bin/env ntpq' +BIN_NTPD='/usr/bin/env ntpd' +BIN_GREP='/usr/bin/env grep' +BIN_AWK='/usr/bin/env awk' +BIN_HEAD='/usr/bin/env head' + +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi + +NTP_OFFSET=`$BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_VERSION=`$BIN_NTPD --version | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_HEAD -c 1` + +echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}' + +exit 0 diff --git a/snmp/ntp-client.sh b/snmp/ntp-client.sh deleted file mode 100755 index c04e8b680..000000000 --- a/snmp/ntp-client.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/sh -# Please make sure the paths below are correct. -# Alternatively you can put them in $0.conf, meaning if you've named -# this script ntp-client.sh then it must go in ntp-client.sh.conf . -# -# NTPQV output version of "ntpq -c rv" -# p1 DD-WRT and some other outdated linux distros -# p11 FreeBSD 11 and any linux distro that is up to date -# -# If you are unsure, which to set, run this script and make sure that -# the JSON output variables match that in "ntpq -c rv". -# -BIN_NTPQ='/usr/bin/env ntpq' -BIN_GREP='/usr/bin/env grep' -BIN_SED="/usr/bin/env sed" -BIN_AWK='/usr/bin/env awk' -NTPQV="p11" -################################################################ -# Don't change anything unless you know what are you doing # -################################################################ -CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG -fi -VERSION=1 -#error and errorString are hardcoded as if the above fails bad json will be generated -RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` -if [ $NTPQV = "p11" ]; then - echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$3\ - "\",\"frequency\":\""$4\ - "\",\"sys_jitter\":\""$5\ - "\",\"clk_jitter\":\""$6\ - "\",\"clk_wander\":\""$7\ - "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" - }' - exit 0 -fi - -if [ $NTPQV = "p1" ]; then - echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$2\ - "\",\"frequency\":\""$3\ - "\",\"sys_jitter\":\""$4\ - "\",\"clk_jitter\":\""$5\ - "\",\"clk_wander\":\""$6\ - "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" - }' - exit 0 -fi From e0dcd4a064cedb09241e4af17198bf61e8fd1bf3 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:27:21 -0400 Subject: [PATCH 036/332] nginx script clean up (#197) - Change script name for simplify of configuration management orchestration scripts. - Change 172.0.0.1 to localhost for better nginx handling. --- snmp/nginx | 34 ++++++++++++++++++++++++++++++++++ snmp/nginx-stats | 37 ------------------------------------- 2 files changed, 34 insertions(+), 37 deletions(-) create mode 100755 snmp/nginx delete mode 100755 snmp/nginx-stats diff --git a/snmp/nginx b/snmp/nginx new file mode 100755 index 000000000..19f16592f --- /dev/null +++ b/snmp/nginx @@ -0,0 +1,34 @@ +#!/usr/bin/env python +import urllib2 +import re + +data = urllib2.urlopen('http://localhost/nginx-status').read() + +params = {} + +for line in data.split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = [ + "Active", + "Reading", + "Writing", + "Waiting", + "Requests" + ] + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] diff --git a/snmp/nginx-stats b/snmp/nginx-stats deleted file mode 100755 index 1cedca5ba..000000000 --- a/snmp/nginx-stats +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python2 -import urllib2 -import re - - -data = urllib2.urlopen('http://127.0.0.1/nginx-status').read() - -params = {} - -for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass - - -dataorder = [ - "Active", - "Reading", - "Writing", - "Waiting", - "Requests" - ] - - -for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] From f0f34b4a2d1a36836f6bffe4307d5d51524009b4 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:28:07 -0400 Subject: [PATCH 037/332] phpfpmsf script clean up (#198) - Change script name for simplify of configuration management orchestration scripts. - Update code syntax. --- snmp/{phpfpm-sp => phpfpmsp} | 102 +++++++++++++++++------------------ 1 file changed, 50 insertions(+), 52 deletions(-) rename snmp/{phpfpm-sp => phpfpmsp} (52%) diff --git a/snmp/phpfpm-sp b/snmp/phpfpmsp similarity index 52% rename from snmp/phpfpm-sp rename to snmp/phpfpmsp index 2ae5a5e95..3eb0e0c50 100644 --- a/snmp/phpfpm-sp +++ b/snmp/phpfpmsp @@ -78,60 +78,58 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - phpfpm_response=($(curl -Ss ${opts} "${url}")) - [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 + phpfpm_response=($(curl -Ss ${opts} "${url}")) + [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 - if [[ "${phpfpm_response[0]}" != "pool:" \ - || "${phpfpm_response[2]}" != "process" \ - || "${phpfpm_response[5]}" != "start" \ - || "${phpfpm_response[12]}" != "accepted" \ - || "${phpfpm_response[15]}" != "listen" \ - || "${phpfpm_response[16]}" != "queue:" \ - || "${phpfpm_response[26]}" != "idle" \ - || "${phpfpm_response[29]}" != "active" \ - || "${phpfpm_response[32]}" != "total" \ - ]] - then - echo "invalid response from phpfpm status server: ${phpfpm_response[*]}" - exit 1; - fi + if [[ "${phpfpm_response[0]}" != "pool:" \ + || "${phpfpm_response[2]}" != "process" \ + || "${phpfpm_response[5]}" != "start" \ + || "${phpfpm_response[12]}" != "accepted" \ + || "${phpfpm_response[15]}" != "listen" \ + || "${phpfpm_response[16]}" != "queue:" \ + || "${phpfpm_response[26]}" != "idle" \ + || "${phpfpm_response[29]}" != "active" \ + || "${phpfpm_response[32]}" != "total" \ + ]] + then + echo "invalid response from phpfpm status server: ${phpfpm_response[*]}" + exit 1; + fi - phpfpm_pool="${phpfpm_response[1]}" - phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" - phpfpm_start_since="${phpfpm_response[11]}" - phpfpm_accepted_conn="${phpfpm_response[14]}" - phpfpm_listen_queue="${phpfpm_response[17]}" - phpfpm_max_listen_queue="${phpfpm_response[21]}" - phpfpm_listen_queue_len="${phpfpm_response[25]}" - phpfpm_idle_processes="${phpfpm_response[28]}" - phpfpm_active_processes="${phpfpm_response[31]}" - phpfpm_total_processes="${phpfpm_response[34]}" - phpfpm_max_active_processes="${phpfpm_response[38]}" - phpfpm_max_children_reached="${phpfpm_response[42]}" - if [ "${phpfpm_response[43]}" == "slow" ] - then - phpfpm_slow_requests="${phpfpm_response[45]}" - else - phpfpm_slow_requests="-1" - fi - - if [[ -z "${phpfpm_pool}" \ - || -z "${phpfpm_start_time}" \ - || -z "${phpfpm_start_since}" \ - || -z "${phpfpm_accepted_conn}" \ - || -z "${phpfpm_listen_queue}" \ - || -z "${phpfpm_max_listen_queue}" \ - || -z "${phpfpm_listen_queue_len}" \ - || -z "${phpfpm_idle_processes}" \ - || -z "${phpfpm_active_processes}" \ - || -z "${phpfpm_total_processes}" \ - || -z "${phpfpm_max_active_processes}" \ - || -z "${phpfpm_max_children_reached}" \ - ]] - then - echo "empty values got from phpfpm status server: ${phpfpm_response[*]}" - exit 1 - fi + phpfpm_pool="${phpfpm_response[1]}" + phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" + phpfpm_start_since="${phpfpm_response[11]}" + phpfpm_accepted_conn="${phpfpm_response[14]}" + phpfpm_listen_queue="${phpfpm_response[17]}" + phpfpm_max_listen_queue="${phpfpm_response[21]}" + phpfpm_listen_queue_len="${phpfpm_response[25]}" + phpfpm_idle_processes="${phpfpm_response[28]}" + phpfpm_active_processes="${phpfpm_response[31]}" + phpfpm_total_processes="${phpfpm_response[34]}" + phpfpm_max_active_processes="${phpfpm_response[38]}" + phpfpm_max_children_reached="${phpfpm_response[42]}" + if [ "${phpfpm_response[43]}" == "slow" ]; then + phpfpm_slow_requests="${phpfpm_response[45]}" + else + phpfpm_slow_requests="-1" + fi + + if [[ -z "${phpfpm_pool}" \ + || -z "${phpfpm_start_time}" \ + || -z "${phpfpm_start_since}" \ + || -z "${phpfpm_accepted_conn}" \ + || -z "${phpfpm_listen_queue}" \ + || -z "${phpfpm_max_listen_queue}" \ + || -z "${phpfpm_listen_queue_len}" \ + || -z "${phpfpm_idle_processes}" \ + || -z "${phpfpm_active_processes}" \ + || -z "${phpfpm_total_processes}" \ + || -z "${phpfpm_max_active_processes}" \ + || -z "${phpfpm_max_children_reached}" \ + ]]; then + echo "empty values got from phpfpm status server: ${phpfpm_response[*]}" + exit 1 + fi echo $phpfpm_pool echo $phpfpm_start_time From ccb244aa09de36e4e4dd85120702580144e86383 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:28:39 -0400 Subject: [PATCH 038/332] osupdate script clean up (#199) - Change script name for simplify of configuration management orchestration scripts. - Update code syntax. --- snmp/{os-updates.sh => osupdate} | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) rename snmp/{os-updates.sh => osupdate} (85%) diff --git a/snmp/os-updates.sh b/snmp/osupdate similarity index 85% rename from snmp/os-updates.sh rename to snmp/osupdate index 33e1f9c62..e4185d408 100755 --- a/snmp/os-updates.sh +++ b/snmp/osupdate @@ -1,28 +1,28 @@ #!/usr/bin/env bash ################################################################ # copy this script to /etc/snmp/ and make it executable: # -# chmod +x /etc/snmp/os-updates.sh # +# chmod +x /etc/snmp/osupdate # # ------------------------------------------------------------ # # edit your snmpd.conf and include: # -# extend osupdate /opt/os-updates.sh # +# extend osupdate /etc/snmp/osupdate # #--------------------------------------------------------------# # restart snmpd and activate the app for desired host # #--------------------------------------------------------------# # please make sure you have the path/binaries below # -################################################################ -BIN_WC='/usr/bin/wc' -BIN_GREP='/bin/grep' +################################################################ +BIN_WC='/usr/bin/env wc' +BIN_GREP='/usr/bin/env grep' CMD_GREP='-c' CMD_WC='-l' -BIN_ZYPPER='/usr/bin/zypper' +BIN_ZYPPER='/usr/bin/env zypper' CMD_ZYPPER='-q lu' -BIN_YUM='/usr/bin/yum' +BIN_YUM='/usr/bin/env yum' CMD_YUM='-q check-update' -BIN_DNF='/usr/bin/dnf' +BIN_DNF='/usr/bin/env dnf' CMD_DNF='-q check-update' -BIN_APT='/usr/bin/apt-get' +BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' -BIN_PACMAN='/usr/bin/pacman' +BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' ################################################################ From 3dada041e433318592e137678d24c32dd1a134b4 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Thu, 18 Oct 2018 10:37:10 -0400 Subject: [PATCH 039/332] Fix binary operator expected error (#203) --- snmp/osupdate | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index e4185d408..ed8e68888 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -28,7 +28,7 @@ CMD_PACMAN='-Sup' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -if [ -f $BIN_ZYPPER ]; then +if [ -f "$BIN_ZYPPER" ]; then # OpenSUSE UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` if [ $UPDATES -ge 2 ]; then @@ -36,7 +36,7 @@ if [ -f $BIN_ZYPPER ]; then else echo "0"; fi -elif [ -f $BIN_DNF ]; then +elif [ -f "$BIN_DNF" ]; then # Fedora UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -44,7 +44,7 @@ elif [ -f $BIN_DNF ]; then else echo "0"; fi -elif [ -f $BIN_PACMAN ]; then +elif [ -f "$BIN_PACMAN" ]; then # Arch UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -52,7 +52,7 @@ elif [ -f $BIN_PACMAN ]; then else echo "0"; fi -elif [ -f $BIN_YUM ]; then +elif [ -f "$BIN_YUM" ]; then # CentOS / Redhat UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -60,7 +60,7 @@ elif [ -f $BIN_YUM ]; then else echo "0"; fi -elif [ -f $BIN_APT ]; then +elif [ -f "$BIN_APT" ]; then # Debian / Devuan / Ubuntu UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` if [ $UPDATES -ge 1 ]; then From 381cc2466af521772607c682a9a707471a38ff4b Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Tue, 23 Oct 2018 08:51:12 -0400 Subject: [PATCH 040/332] fix nginx script indentation (#205) --- snmp/nginx | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/snmp/nginx b/snmp/nginx index 19f16592f..06efab6e6 100755 --- a/snmp/nginx +++ b/snmp/nginx @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 import urllib2 import re @@ -7,28 +7,22 @@ data = urllib2.urlopen('http://localhost/nginx-status').read() params = {} for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass -dataorder = [ - "Active", - "Reading", - "Writing", - "Waiting", - "Requests" - ] +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] From d49fe954dfdeffbeee091051f1f0c515d020f281 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lim=20Whiteley?= Date: Tue, 23 Oct 2018 17:46:54 +0100 Subject: [PATCH 041/332] Add divide by zero check (#191) On several servers (Ubuntu 18.04) DEMAND_DATA_TOTAL is 0 currently and is causing an error Traceback (most recent call last): File "/usr/local/bin/zfs-linux", line 178, in sys.exit(main(sys.argv[1:])) File "/usr/local/bin/zfs-linux", line 76, in main DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 ZeroDivisionError: division by zero --- snmp/zfs-linux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index e9c19e1c6..c5f36256c 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -73,7 +73,7 @@ def main(args): CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100 CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100 ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100 - DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 + DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0 DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0 From 8d66211adc47d3bad5dd042e3ddbc59a23a28819 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Thu, 25 Oct 2018 07:17:42 -0400 Subject: [PATCH 042/332] Fix package manager detection (#204) * Fix package manager detection * use release file for os detection * Use command to to validate package manager type * check if exists and the execute permission is granted * make script more portable --- snmp/osupdate | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index ed8e68888..f45493dc4 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -28,7 +28,7 @@ CMD_PACMAN='-Sup' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -if [ -f "$BIN_ZYPPER" ]; then +if command -v zypper &>/dev/null ; then # OpenSUSE UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` if [ $UPDATES -ge 2 ]; then @@ -36,7 +36,7 @@ if [ -f "$BIN_ZYPPER" ]; then else echo "0"; fi -elif [ -f "$BIN_DNF" ]; then +elif command -v dnf &>/dev/null ; then # Fedora UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -44,7 +44,7 @@ elif [ -f "$BIN_DNF" ]; then else echo "0"; fi -elif [ -f "$BIN_PACMAN" ]; then +elif command -v pacman &>/dev/null ; then # Arch UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -52,7 +52,7 @@ elif [ -f "$BIN_PACMAN" ]; then else echo "0"; fi -elif [ -f "$BIN_YUM" ]; then +elif command -v yum &>/dev/null ; then # CentOS / Redhat UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -60,7 +60,7 @@ elif [ -f "$BIN_YUM" ]; then else echo "0"; fi -elif [ -f "$BIN_APT" ]; then +elif command -v apt-get &>/dev/null ; then # Debian / Devuan / Ubuntu UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` if [ $UPDATES -ge 1 ]; then From a827734c0ec0e0cdf5e2a04730ec68dbad3fd477 Mon Sep 17 00:00:00 2001 From: gardar Date: Thu, 25 Oct 2018 19:19:20 +0000 Subject: [PATCH 043/332] CloudLinux distro detection (#208) Added CloudLinux distro detection, previously CloudLinux got identified as RedHat --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index d833a0e25..ce146801c 100755 --- a/snmp/distro +++ b/snmp/distro @@ -24,6 +24,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then DIST="CentOS" + elif [ "${DIST}" = "CloudLinux" ]; then + DIST="CloudLinux" elif [ "${DIST}" = "Mandriva" ]; then DIST="Mandriva" PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` From ff124a1358755ceddc0ae6a4187d358da0d54d06 Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 22 Nov 2018 09:04:58 -0600 Subject: [PATCH 044/332] add portactivity SNMP extend (#159) * add portactivity SNMP extend in its initial form * update for the current json_app_get * add version to the returned JSON * add basic POD documentation --- snmp/portactivity | 352 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 352 insertions(+) create mode 100755 snmp/portactivity diff --git a/snmp/portactivity b/snmp/portactivity new file mode 100755 index 000000000..430ae5190 --- /dev/null +++ b/snmp/portactivity @@ -0,0 +1,352 @@ +#!/usr/bin/env perl + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# FreeBSD /usr/include/netinet/tcp_fsm.h +# Linux netstat(8) +# FreeBSD --> Linux +# LISTEN --> LISTEN +# CLOSED --> CLOSED +# SYN_SENT --> SYN_SENT +# SYN_RECEIVED -->SYN_RECV +# ESTABLISHED --> ESTABLISHED +# CLOSE_WAIT --> CLOSE_WAIT +# FIN_WAIT_1 --> FIN_WAIT1 +# CLOSING --> CLOSING +# LAST_ACK --> LAST_ACK +# FIN_WAIT_2 --> FIN_WAIT2 +# TIME_WAIT --> TIME_WAIT +# ((no equivalent)) --> UNKNOWN +# +# UNKNOWN is being regarded as a valid state for all and will be used on OSes that supported it +# The names returned by default are those used by FreeBSD. + +=head1 NAME + +portactivity - Generates JSON output based on netstat data for the specificied TCP services. + +=head1 SYNOPSIS + +portactivity [B<-P>] B<-p> + +=head1 USAGE + +This is meant to be used as a SNMP extend for use with json_app_get in LibreNMS. + +Below is a example of its usage with netsnmpd and checking HTTP and SSH. + + extend portactivity /etc/snmp/portactivity -p http,ssh + +=head1 SWITCHES + +=head2 B<-P> + +Prints the JSON in easily human readable format. + +=head2 B<-p> + +This is a comma seperated list of TCP services to check. + +=head1 SERVICES + +NSS is used to resolve the TCP service protocol names. All the ones listed with -p +must be findable that way or it will error. + +If you are running something on a non-standard port and want to check for it, you either +have to use the name of the port it is on, add it to the database, or change it in the +database(if it is already there under a undesired name). + +In general the file in question on most systems is going to be '/etc/services' and you +will need to run services_mkdb(8) after updating it. But for specifics you will want to +consult services(5). + +=cut + +use strict; +use warnings; +use JSON; +use Getopt::Std; +use Parse::Netstat qw(parse_netstat); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "Port Activity SNMP stats extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n". + "-p A comma seperated list of TCP protocols to check for in netstat.\n". + "-P Print the output in a human readable manner.\n"; +} + +#returns aa new hash with all zeroed values for a new protocol +sub newProto{ + + return { + 'total_conns'=>0, + 'total_to'=>0, + 'total_from'=>0, + 'total'=>{ + 'LISTEN'=>0, + 'CLOSED'=>0, + 'SYN_SENT'=>0, + 'SYN_RECEIVED'=>0, + 'ESTABLISHED'=>0, + 'CLOSE_WAIT'=>0, + 'FIN_WAIT_1'=>0, + 'CLOSING'=>0, + 'LAST_ACK'=>0, + 'FIN_WAIT_2'=>0, + 'TIME_WAIT'=>0, + 'UNKNOWN'=>0, + 'other'=>0, + }, + 'to'=>{ + 'LISTEN'=>0, + 'CLOSED'=>0, + 'SYN_SENT'=>0, + 'SYN_RECEIVED'=>0, + 'ESTABLISHED'=>0, + 'CLOSE_WAIT'=>0, + 'FIN_WAIT_1'=>0, + 'CLOSING'=>0, + 'LAST_ACK'=>0, + 'FIN_WAIT_2'=>0, + 'TIME_WAIT'=>0, + 'UNKNOWN'=>0, + 'other'=>0, + }, + 'from'=>{ + 'LISTEN'=>0, + 'CLOSED'=>0, + 'SYN_SENT'=>0, + 'SYN_RECEIVED'=>0, + 'ESTABLISHED'=>0, + 'CLOSE_WAIT'=>0, + 'FIN_WAIT_1'=>0, + 'CLOSING'=>0, + 'LAST_ACK'=>0, + 'FIN_WAIT_2'=>0, + 'TIME_WAIT'=>0, + 'UNKNOWN'=>0, + 'other'=>0, + }, + } + ; +} + +#returns the json output +sub return_json{ + my %to_return; + if(defined($_[0])){ + %to_return= %{$_[0]}; + } + my $pretty=$_[1]; + + if (!defined( $to_return{data} ) ){ + $to_return{data}={}; + } + + my $j=JSON->new; + + if ( $pretty ){ + $j->pretty(1); + } + + print $j->encode( \%to_return ); + + if ( ! $pretty ){ + print "\n"; + } +} + +my %valid_states=( + 'LISTEN'=>1, + 'CLOSED'=>1, + 'SYN_SENT'=>1, + 'SYN_RECEIVED'=>1, + 'ESTABLISHED'=>1, + 'CLOSE_WAIT'=>1, + 'FIN_WAIT_1'=>1, + 'CLOSING'=>1, + 'LAST_ACK'=>1, + 'FIN_WAIT_2'=>1, + 'TIME_WAIT'=>1, + 'UNKNOWN'=>1, + ); + +#gets the options +my %opts=(); +getopts('p:P', \%opts); + +#what will be returned +my %to_return; +$to_return{error}='0'; +$to_return{errorString}=''; +$to_return{version}=1; + +if (! defined( $opts{p} ) ){ + $to_return{errorString}='No services specificied to check for'; + $to_return{error}=1; + return_json(\%to_return, $opts{P}); + exit 1; +} + +#the list of protocols to check for +my @protos_array=split(/\,/, $opts{p}); + +#holds the various protocol hashes +my %protos; + +#make sure each one specificied is defined and build the hash that will be returned +my $protos_array_int=0; +while ( defined( $protos_array[$protos_array_int] ) ){ + $protos{ $protos_array[$protos_array_int] }=newProto; + + #check if it exists + my $port=getservbyname( $protos_array[$protos_array_int] , 'tcp' ); + + # if it is not defined, then we error + if ( !defined( $port ) ){ + $to_return{errorString}='"'.$protos_array[$protos_array_int].'" is not a known service either add it or double check your spelling'; + $to_return{error}=4; + return_json(\%to_return, $opts{P}); + exit 4; + } + + $protos_array_int++; +} + +my $os=$^O; + +my $netstat; + +#make sure this is a supported OS +if ( $os eq 'freebsd' ){ + $netstat='netstat -S -p tcp' +}elsif( $os eq 'linux' ){ + $netstat='netstat -n' +}else{ + $to_return{errorString}=$os.' is not a supported OS as of currently'; + $to_return{error}=3; + return_json(\%to_return, $opts{P}); + exit 3; +} + +my $res = parse_netstat(output => join("", `$netstat`), flavor=>$os); + +#check to make sure that it was able to parse the output +if ( + (!defined( $res->[1] )) || + ($res->[1] ne 'OK' ) + ){ + $to_return{errorString}='Unable to parse netstat output'; + $to_return{error}=2; + return_json(\%to_return, $opts{P}); + exit 2; +} + +#chew through each connection +my $active_conns_int=0; +while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ + my $conn=$res->[2]{active_conns}[$active_conns_int]; + + #we only care about TCP currently + if ( $conn->{proto} =~ /^[Tt][Cc][Pp]/ ){ + $protos_array_int=0; + my $service; + while( + ( defined( $protos_array[ $protos_array_int ] ) ) && + ( !defined( $service ) ) #stop once we find it + ){ + #check if this matches either ports + if ( + ( $protos_array[ $protos_array_int ] eq $conn->{'local_port'} ) || + ( $protos_array[ $protos_array_int ] eq $conn->{'foreign_port'} ) + ){ + $service=$protos_array[ $protos_array_int ]; + } + + $protos_array_int++; + } + + #only handle it if is a service we are watching for + if ( defined( $service ) ){ + my $processed=0; + + my $state=$conn->{'state'}; + #translate the state names + if ( $os eq 'linux' ){ + if ( $state eq 'SYN_RECV' ){ + $state='SYN_RECEIVED'; + }elsif( $state eq 'FIN_WAIT1' ){ + $state='FIN_WAIT_1'; + }elsif( $state eq 'FIN_WAIT2' ){ + $state='FIN_WAIT_2' + } + } + + #only count the state towards the total if not listening + if ( $state ne 'LISTEN' ){ + $protos{$service}{'total_conns'}++; + } + + #make sure the state is a valid one + # if it is not a valid one, set it to other, meaning something unexpected was set for the state that should not be + if ( ! defined( $valid_states{$state} ) ){ + $state='other'; + } + + #increment the total state + $protos{$service}{'total'}{$state}++; + + if ( + ( $conn->{'foreign_port'} eq $service ) && + ( $state ne 'LISTEN' ) + ){ + $protos{$service}{'total_from'}++; + $protos{$service}{'from'}{$state}++; + $processed=1; + } + + if ( + ( $conn->{'local_port'} eq $service ) && + ( $state ne 'LISTEN' ) && + ( ! $processed ) + ){ + $protos{$service}{'total_to'}++; + $protos{$service}{'to'}{$state}++; + } + + } + + } + + $active_conns_int++; +} + +#return the finished product +$to_return{data}=\%protos; +return_json(\%to_return, $opts{P}); +exit 0; From 43ab324f6597b83ac2dd99444f1b8f60bc0a74c3 Mon Sep 17 00:00:00 2001 From: Kovrinic Date: Wed, 28 Nov 2018 21:22:16 -0600 Subject: [PATCH 045/332] Added Ubuntu 14.04 zfs support --- snmp/zfs-linux | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) mode change 100644 => 100755 snmp/zfs-linux diff --git a/snmp/zfs-linux b/snmp/zfs-linux old mode 100644 new mode 100755 index c5f36256c..87677d0b5 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -2,6 +2,13 @@ import json import subprocess +def proc_err(cmd, proc): + # output process error and first line of error code + return "{}{}".format( + subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr), + " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "" + ) + def main(args): res = {} @@ -95,9 +102,24 @@ def main(args): PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 # pools - proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) - if proc.returncode != 0: - return proc.returncode + exact_size = True + zpool_cmd = ['/sbin/zpool'] + zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] + std = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'universal_newlines': True} + + ## account for variations between ZoL zfs versions + proc = subprocess.run(zpool_cmd_list, **std) + if (proc.returncode == 1) and (('root' in proc.stderr) or ('admin' in proc.stderr)): + zpool_cmd = ['sudo'] + zpool_cmd # elevate zpool with sudo + zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] + proc = subprocess.run(zpool_cmd_list, **std) + if (proc.returncode == 2): + # -p option is not present in older versions + del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue + proc = subprocess.run(zpool_cmd_list, **std) + exact_size = False + if (proc.returncode != 0): + return proc_err(zpool_cmd_list, proc) pools = [] FIELDS = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] @@ -110,6 +132,18 @@ def main(args): info['dedup'] = info['dedup'].rstrip('x') info['cap'] = info['cap'].rstrip('%') + # zfs-06.5.11 fix + if not exact_size: + zpool_cmd_get = zpool_cmd + ['get', '-pH', 'size,alloc,free', info['name']] + proc2 = subprocess.run(zpool_cmd_get, **std) + if (proc2.returncode != 0): + return proc_err(zpool_cmd_get, proc2) + + info2 = dict([tuple(s.split('\t')[1:3]) for s in proc2.stdout.splitlines()]) + info['size'] = info2['size'] + info['alloc'] = info2['allocated'] + info['free'] = info2['free'] + pools.append(info) res = { From 5b53ab54c8a6d9f3b81abf42725b5da2b3ebec3d Mon Sep 17 00:00:00 2001 From: dsgagi Date: Wed, 12 Dec 2018 16:09:25 +0100 Subject: [PATCH 046/332] Update distro --- snmp/distro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index ce146801c..f8926acca 100755 --- a/snmp/distro +++ b/snmp/distro @@ -50,7 +50,7 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Devuan `cat /etc/devuan_version`" REV="" - elif [ -f /etc/debian_version ] ; then + elif [ -f /etc/debian_version -a -f /usr/bin/lsb_release ] ; then DIST="Debian `cat /etc/debian_version`" REV="" ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` From 456d2e7672d8532af4df7f6da2b5c18b02778bf7 Mon Sep 17 00:00:00 2001 From: dsgagi Date: Fri, 14 Dec 2018 18:47:54 +0100 Subject: [PATCH 047/332] Update distro Minor changes to the code, for better output. --- snmp/distro | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index f8926acca..88cc60d64 100755 --- a/snmp/distro +++ b/snmp/distro @@ -50,12 +50,14 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Devuan `cat /etc/devuan_version`" REV="" - elif [ -f /etc/debian_version -a -f /usr/bin/lsb_release ] ; then + elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" - ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + if [ -f /usr/bin/lsb_release ] ; then + ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + fi if [ "${ID}" = "Raspbian" ] ; then - DIST="Raspbian `cat /etc/debian_version`" + DIST="Raspbian `cat /etc/debian_version`" fi elif [ -f /etc/gentoo-release ] ; then From dc3d2673ddc86d02ca2cd8d93bbf2fd53ca43c55 Mon Sep 17 00:00:00 2001 From: dsgagi Date: Fri, 14 Dec 2018 18:49:58 +0100 Subject: [PATCH 048/332] Update distro Remove extra white spaces. --- snmp/distro | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/distro b/snmp/distro index 88cc60d64..0d6578c37 100755 --- a/snmp/distro +++ b/snmp/distro @@ -54,10 +54,10 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Debian `cat /etc/debian_version`" REV="" if [ -f /usr/bin/lsb_release ] ; then - ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` fi if [ "${ID}" = "Raspbian" ] ; then - DIST="Raspbian `cat /etc/debian_version`" + DIST="Raspbian `cat /etc/debian_version`" fi elif [ -f /etc/gentoo-release ] ; then From 433d744953fa800ce49fa060b141c10663c0b952 Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Sun, 16 Dec 2018 22:21:00 +0800 Subject: [PATCH 049/332] Added FreeNAS Version support (#215) Hi, I added FreeNAS version information support, as shown in the figure: ![2018-12-15 11 53 31](https://user-images.githubusercontent.com/30381035/50044886-2329a580-00c5-11e9-817c-b89a8374270d.png) ![2018-12-15 11 53 49](https://user-images.githubusercontent.com/30381035/50044887-2329a580-00c5-11e9-93b4-b140809f84a3.png) --- snmp/distro | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index 0d6578c37..9f1e9d442 100755 --- a/snmp/distro +++ b/snmp/distro @@ -125,7 +125,12 @@ elif [ "${OS}" = "Darwin" ] ; then fi elif [ "${OS}" = "FreeBSD" ] ; then - OSSTR=`/usr/bin/uname -mior` + DIST=$(cat /etc/version | cut -d'-' -f 1) + if [ "${DIST}" = "FreeNAS" ]; then + OSSTR=`cat /etc/version | cut -d' ' -f 1` + else + OSSTR=`/usr/bin/uname -mior` + fi fi echo ${OSSTR} From 107d72e862c2e2a53870272859252a5d39bf8c72 Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Tue, 25 Dec 2018 09:15:22 +0800 Subject: [PATCH 050/332] Added Proxmox VE Versoin support --- snmp/distro | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index 9f1e9d442..cd9e814bf 100755 --- a/snmp/distro +++ b/snmp/distro @@ -59,7 +59,10 @@ elif [ "${OS}" = "Linux" ] ; then if [ "${ID}" = "Raspbian" ] ; then DIST="Raspbian `cat /etc/debian_version`" fi - + if [ -f /usr/bin/pveversion ]; then + DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" + fi + elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" REV=$(tr -d '[[:alpha:]]' Date: Fri, 28 Dec 2018 20:08:46 -0600 Subject: [PATCH 051/332] JSON SNMP extend for UPS-APC app. (#189) * add snmp/ups-apcups, a Perl rewrite of snmp/ups-apcups.sh to support JSON * finish documenting it * add version and remove units from the returned values --- snmp/ups-apcups | 133 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100755 snmp/ups-apcups diff --git a/snmp/ups-apcups b/snmp/ups-apcups new file mode 100755 index 000000000..f3f45d7df --- /dev/null +++ b/snmp/ups-apcups @@ -0,0 +1,133 @@ +#!/usr/bin/env perl +# Author: Zane C. Bowers-Hadley + +# https://docs.librenms.org/#Extensions/Applications/#ups-apcups +# See the above for additional information not documented in the POD below. + +=head1 DESCRIPTION + +This is a SNMP extend for apcupsd for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + +Below is a basic example of setting it up snmpd.conf for NetSNMP. + + extend ups-apcups /etc/snmp/ups-apcups + +Now if for example apcaccess is not in the PATH enviromental variables that snmpd is running +with, you may need to do something like below. + + extend /usr/bin/env PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin /etc/snmp/ups-apcups + +=cut + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use warnings; +use Getopt::Std; +use JSON; + +# should be no reason to change this +# better to use env to make sure it is in your path when you run this +my $apcaccess='apcaccess'; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "ups-apcups SNMP extend 0.0.0\n"; +}; + +sub main::HELP_MESSAGE { + print "\n"; +} + +#gets the options +my %opts=(); +getopts('p', \%opts); + +#holds what will be returned +my %data; +my %toReturn; +$toReturn{version}=1; + +# get the current status from apcupsd +my $apcaccess_output=`$apcaccess`; +$toReturn{error}=$?; + +# check for bad exit codes +if ( $? == -1){ + $toReturn{errorString}='failed to run apcaccess'; +} +elsif ($? & 127) { + $toReturn{errorString}= sprintf "apcaccess died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; +} else { + $toReturn{error}=$? >> 8; + $toReturn{errorString}="apcaccess exited with ".$toReturn{error}; +} + +# if no bad exit codes, we can process $apcaccess_output +if ( $toReturn{error} == 0 ){ + # holds the found data for the apcupsd status + my %status; + + # pulls apart the output + my @lines=split(/\n/, $apcaccess_output); + foreach my $line ( @lines ){ + my ( $var, $val )=split(/\ *\:\ */, $line, 2); + $val=~s/\ .*//; + $status{$var}=$val; + } + + #pull the desired variables from the output + $data{charge}=$status{BCHARGE}; + $data{time_remaining}=$status{TIMELEFT}; + $data{battery_nominal}=$status{NOMBATTV}; + $data{battery_voltage}=$status{BATTV}; + $data{input_voltage}=$status{LINEV}; + $data{nominal_voltage}=$status{NOMINV}; + $data{load}=$status{LOADPCT}; +} + +# add the data to be return to the return hah +$toReturn{data}=\%data; + +# convert $toReturn to JSON and pretty print if asked to +my $j=JSON->new; +if ( $opts{p} ){ + $j->pretty(1); +} +print $j->encode( \%toReturn ); +if (! $opts{p} ){ + print "\n"; +} +exit 0; From 1b90904f61c6d4078f2b427e17c82cf1f8b926ba Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 28 Dec 2018 20:10:13 -0600 Subject: [PATCH 052/332] convert the FreeBSD NFS stuff over to JSON and add in lots of sanity (#190) * convert fbsdnfsclient over to JSON * Convert the server stuff to JSON and fix the output of the client extend. * misc. stuff * lots of cleanup and sanity added to the FreeBSD NFS scripts * fix the #! line * update the docs at the top --- snmp/fbsdnfsclient | 275 +++++++++++++++++++++++---------------------- snmp/fbsdnfsserver | 225 +++++++++++++++++++++---------------- 2 files changed, 271 insertions(+), 229 deletions(-) diff --git a/snmp/fbsdnfsclient b/snmp/fbsdnfsclient index f41c7b606..7e3d57722 100644 --- a/snmp/fbsdnfsclient +++ b/snmp/fbsdnfsclient @@ -1,135 +1,174 @@ -#!/usr/local/bin/perl +#!/usr/bin/env perl -# Add this to snmpd.conf as below. -# extend fbsdnfsclient /etc/snmp/fbsdnfsclient +=head1 DESCRIPTION + +This is a SNMP extend for FreeBSD NFS server stats for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + +Below is a basic example of setting it up snmpd.conf for NetSNMP. + + extend fbsdnfsclient /etc/snmp/fbsdnfsclient + +=cut + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use warnings; +use Getopt::Std; +use JSON; + +#the version of returned data +my $VERSION=1; + +#gets the options +my %opts=(); +getopts('p', \%opts); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "fbsdnfsclient SNMP extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n". + "-p Print the JSON in a pretty manner.\n"; + exit 0; +} + +#the data to return +my %to_return; +$to_return{'version'}=$VERSION; +$to_return{'error'}='0'; +$to_return{'errorString'}=''; my $nfsstatOutput=`/usr/bin/nfsstat`; -my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); -my $int=0; +$to_return{error}=$?; -my ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access, - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit, - $TimedOut, - $Invalid, - $XReplies, - $Retries, - $Requests, - $AttrHits, - $AttrMisses, - $LkupHits, - $LkupMisses, - $BioRHits, - $BioRMisses, - $BioWHits, - $BioWMisses, - $BioRLHits, - $BioRLMisses, - $BioDHits, - $BioDMisses, - $DirEHits, - $DirEMisses, - $AccsHits, - $AccsMisses, - ); +# check for bad exit codes +if ( $? == -1){ + $to_return{errorString}='failed to run nfsstat'; +} +elsif ($? & 127) { + $to_return{errorString}= sprintf "nfsstat died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; +} else { + $to_return{error}=$? >> 8; + $to_return{errorString}="nfsstat exited with ".$to_return{error}; +} +# pull the output of nfssetat appart +my %data; +my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); +my $int=0; while( defined( $nfsstatOutputA[$int] ) ){ $nfsstatOutputA[$int]=~s/^ +//; $nfsstatOutputA[$int]=~s/ +/ /g; if ( $int == 3 ){ ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, + $data{Getattr}, + $data{Setattr}, + $data{Lookup}, + $data{Readlink}, + $data{Read}, + $data{Write}, + $data{Create}, + $data{Remove}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 5 ){ ( - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access, + $data{Rename}, + $data{Link}, + $data{Symlink}, + $data{Mkdir}, + $data{Rmdir}, + $data{Readdir}, + $data{RdirPlus}, + $data{Access}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 7 ){ ( - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit, + $data{Mknod}, + $data{Fsstat}, + $data{Fsinfo}, + $data{PathConf}, + $data{Commit}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 10 ){ ( - $TimedOut, - $Invalid, - $XReplies, - $Retries, - $Requests, + $data{TimedOut}, + $data{Invalid}, + $data{XReplies}, + $data{Retries}, + $data{Requests}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 13 ){ ( - $AttrHits, - $AttrMisses, - $LkupHits, - $LkupMisses, - $BioRHits, - $BioRMisses, - $BioWHits, - $BioWMisses, + $data{AttrHits}, + $data{AttrMisses}, + $data{LkupHits}, + $data{LkupMisses}, + $data{BioRHits}, + $data{BioRMisses}, + $data{BioWHits}, + $data{BioWMisses}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 15 ){ ( - $BioRLHits, - $BioRLMisses, - $BioDHits, - $BioDMisses, - $DirEHits, - $DirEMisses, - $AccsHits, - $AccsMisses, + $data{BioRLHits}, + $data{BioRLMisses}, + $data{BioDHits}, + $data{BioDMisses}, + $data{DirEHits}, + $data{DirEMisses}, + $data{AccsHits}, + $data{AccsMisses}, )=split( /\ /, $nfsstatOutputA[$int] ); } @@ -137,45 +176,15 @@ while( defined( $nfsstatOutputA[$int] ) ){ $int++; } -print $Getattr."\n". - $Setattr."\n". - $Lookup."\n". - $Readlink."\n". - $Read."\n". - $Write."\n". - $Create."\n". - $Remove."\n". - $Rename."\n". - $Link."\n". - $Symlink."\n". - $Mkdir."\n". - $Rmdir."\n". - $Readdir."\n". - $RdirPlus."\n". - $Access."\n". - $Mknod."\n". - $Fsstat."\n". - $Fsinfo."\n". - $PathConf."\n". - $Commit."\n". - $TimedOut."\n". - $Invalid."\n". - $XReplies."\n". - $Retries."\n". - $Requests."\n". - $AttrHits."\n". - $AttrMisses."\n". - $LkupHits."\n". - $LkupMisses."\n". - $BioRHits."\n". - $BioRMisses."\n". - $BioWHits."\n". - $BioWMisses."\n". - $BioRLHits."\n". - $BioRLMisses."\n". - $BioDHits."\n". - $BioDMisses."\n". - $DirEHits."\n". - $DirEMisses."\n". - $AccsHits."\n". - $AccsMisses."\n"; +#add the data has to the return hash +$to_return{data}=\%data; + +#finally render the JSON +my $j=JSON->new; +if ( $opts{p} ){ + $j->pretty(1); +} +print $j->encode( \%to_return ); +if ( ! $opts{p} ){ + print "\n"; +} diff --git a/snmp/fbsdnfsserver b/snmp/fbsdnfsserver index 4664cfa61..e9402b01a 100644 --- a/snmp/fbsdnfsserver +++ b/snmp/fbsdnfsserver @@ -1,117 +1,168 @@ #!/usr/local/bin/perl -# Add this to snmpd.conf as below. -# extend fbsdnfsserver /etc/snmp/fbsdnfsserver +=head1 DESCRIPTION + +This is a SNMP extend for FreeBSD NFS server stats for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + +Below is a basic example of setting it up snmpd.conf for NetSNMP. + + extend fbsdnfsserver /etc/snmp/fbsdnfsserver + +=cut + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + + +use strict; +use warnings; +use JSON; +use Getopt::Std; + +#gets the options +my %opts=(); +getopts('p', \%opts); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "fbsdnfsclient SNMP extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n". + "-p Print the JSON in a pretty manner.\n"; + exit 0; +} + +my $VERSION=1; + +#the data to return +my %to_return; +$to_return{'version'}=$VERSION; +$to_return{'error'}='0'; +$to_return{'errorString'}=''; my $nfsstatOutput=`/usr/bin/nfsstat`; -my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); -my $int=0; +$to_return{error}=$?; + +# check for bad exit codes +if ( $? == -1){ + $to_return{errorString}='failed to run nfsstat'; +} +elsif ($? & 127) { + $to_return{errorString}= sprintf "nfsstat died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; +} else { + $to_return{error}=$? >> 8; + $to_return{errorString}="nfsstat exited with ".$to_return{error}; +} -my ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access, - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit, - $RetFailed, - $Faults, - $Inprog, - $Idem, - $Nonidem, - $Misses, - $WriteOps, - $WriteRPC, - $Opsaved - ); +my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); +my $int=0; +my %data; while( defined( $nfsstatOutputA[$int] ) ){ $nfsstatOutputA[$int]=~s/^ +//; $nfsstatOutputA[$int]=~s/ +/ /g; if ( $int == 19 ){ ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, + $data{'Getattr'}, + $data{'Setattr'}, + $data{'Lookup'}, + $data{'Readlink'}, + $data{'Read'}, + $data{'Write'}, + $data{'Create'}, + $data{'Remove'}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 21 ){ ( - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access + $data{'Rename'}, + $data{'Link'}, + $data{'Symlink'}, + $data{'Mkdir'}, + $data{'Rmdir'}, + $data{'Readdir'}, + $data{'RdirPlus'}, + $data{'Access'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 23 ){ ( - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit + $data{'Mknod'}, + $data{'Fsstat'}, + $data{'Fsinfo'}, + $data{'PathConf'}, + $data{'Commit'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 25 ){ ( - $RetFailed + $data{'RetFailed'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 27 ){ ( - $Faults + $data{'Faults'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 30 ){ ( - $Inprog, - $Idem, - $Nonidem, - $Misses + $data{'Inprog'}, + $data{'Idem'}, + $data{'Nonidem'}, + $data{'Misses'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 33 ){ ( - $WriteOps, - $WriteRPC, - $Opsaved + $data{'WriteOps'}, + $data{'WriteRPC'}, + $data{'Opsaved'} )=split( /\ /, $nfsstatOutputA[$int] ); } @@ -119,33 +170,15 @@ while( defined( $nfsstatOutputA[$int] ) ){ $int++; } -print $Getattr."\n". - $Setattr."\n". - $Lookup."\n". - $Readlink."\n". - $Read."\n". - $Write."\n". - $Create."\n". - $Remove."\n". - $Rename."\n". - $Link."\n". - $Symlink."\n". - $Mkdir."\n". - $Rmdir."\n". - $Readdir."\n". - $RdirPlus."\n". - $Access."\n". - $Mknod."\n". - $Fsstat."\n". - $Fsinfo."\n". - $PathConf."\n". - $Commit."\n". - $RetFailed."\n". - $Faults."\n". - $Inprog."\n". - $Idem."\n". - $Nonidem."\n". - $Misses."\n". - $WriteOps."\n". - $WriteRPC."\n". - $Opsaved."\n"; +#add the data has to the return hash +$to_return{data}=\%data; + +#finally render the JSON +my $j=JSON->new; +if ( $opts{p} ){ + $j->pretty(1); +} +print $j->encode( \%to_return ); +if ( ! $opts{p} ){ + print "\n"; +} From 6fdaffa1b2ba8c49ed8bd38fb6445335b3146329 Mon Sep 17 00:00:00 2001 From: Mike Centola Date: Thu, 10 Jan 2019 00:35:28 -0500 Subject: [PATCH 053/332] Added gpsd script for SNMP Extend (#217) Fixed Typos Fixed another typo --- snmp/gpsd | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 snmp/gpsd diff --git a/snmp/gpsd b/snmp/gpsd new file mode 100755 index 000000000..48f1be4ad --- /dev/null +++ b/snmp/gpsd @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019 Mike Centola +# +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script gpsd.sh then it must go in gpsd.sh.conf . +# +# +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ + +BIN_GPIPE='/usr/bin/env gpspipe' +BIN_GREP='/usr/bin/env grep' +BIN_PYTHON='/usr/bin/env python' + +# Check for config file +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi + +# Create Temp File +TMPFILE=$(mktemp) +trap "rm -f $TMPFILE" 0 2 3 15 + +# Write GPSPIPE Data to Temp File +$BIN_GPIPE -w -n 10 > $TMPFILE + +# Parse Temp file for GPSD Data +VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'` +GPSDMODE=`cat $TMPFILE | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]'` +HDOP=`cat $TMPFILE | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]'` +VDOP=`cat $TMPFILE | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]'` +LAT=`cat $TMPFILE | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]'` +LONG=`cat $TMPFILE | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]'` +ALT=`cat $TMPFILE | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]'` +SATS=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])'` +SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])'` + +# Output info for SNMP Extend +echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}' + +rm $TMPFILE \ No newline at end of file From c40606140114b9059409f17a21b06fe8655b760e Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Thu, 10 Jan 2019 18:40:40 +1300 Subject: [PATCH 054/332] Fix: InnoDB stat support for MariaDB v10+ (#211) * mariadb innodb support for v10+ * fix newer innodb insert buffers * agent mysql to snmp extend --- snmp/mysql | 503 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 351 insertions(+), 152 deletions(-) diff --git a/snmp/mysql b/snmp/mysql index 8a2d05a97..e08ed6a7d 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -1,28 +1,17 @@ #!/usr/bin/env php true, # Do you want to check binary logging? 'slave' => true, # Do you want to check slave status? 'procs' => true, # Do you want to check SHOW PROCESSLIST? + 'get_qrt' => true, # Get query response times from Percona Server or MariaDB? ); $use_ss = FALSE; # Whether to use the script server or not @@ -71,9 +73,13 @@ $version = "1.1.7"; # ============================================================================ # Include settings from an external config file (issue 39). # ============================================================================ +if ($check_mk) { + echo("<<>>\n"); +} if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); + debug('Found configuration file ' . __FILE__ . '.cnf'); } else { echo("No ".__FILE__ . ".cnf found!\n"); exit(); @@ -111,6 +117,19 @@ function error_handler($errno, $errstr, $errfile, $errline) { # } #} +# ============================================================================ +# Set the default timezone either to the configured, system timezone, or the +# default set above in the script. +# ============================================================================ +if ( function_exists("date_default_timezone_set") + && function_exists("date_default_timezone_get") ) { + $tz = ($timezone ? $timezone : @date_default_timezone_get()); + if ( $tz ) { + @date_default_timezone_set($tz); + } +} + + # ============================================================================ # Make sure we can also be called as a script. # ============================================================================ @@ -168,7 +187,7 @@ if (!function_exists('array_change_key_case') ) { # ============================================================================ function validate_options($options) { debug($options); - $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port'); + $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port', 'server-id'); # Required command-line options foreach ( array() as $option ) { if (!isset($options[$option]) || !$options[$option] ) { @@ -186,21 +205,23 @@ function validate_options($options) { # Print out a brief usage summary # ============================================================================ function usage($message) { - global $mysql_host, $mysql_user, $mysql_pass, $mysql_port, $heartbeat; + global $mysql_host, $mysql_user, $mysql_pass, $mysql_port; $usage = << --items [OPTION] - - --host Hostname to connect to; use host:port syntax to specify a port - Use :/path/to/socket if you want to connect via a UNIX socket - --items Comma-separated list of the items whose data you want - --user MySQL username; defaults to $mysql_user if not given - --pass MySQL password; defaults to $mysql_pass if not given - --heartbeat MySQL heartbeat table; defaults to '$heartbeat' (see mk-heartbeat) - --nocache Do not cache results in a file - --port MySQL port; defaults to $mysql_port if not given - --mysql_ssl Add the MYSQL_CLIENT_SSL flag to mysql_connect() call +Usage: php ss_get_mysql_stats.php --host --items [OPTION] + + --host MySQL host + --items Comma-separated list of the items whose data you want + --user MySQL username + --pass MySQL password + --port MySQL port + --socket MySQL socket + --flags MySQL flags + --connection-timeout MySQL connection timeout + --server-id Server id to associate with a heartbeat if heartbeat usage is enabled + --nocache Do not cache results in a file + --help Show usage EOF; die($usage); @@ -252,8 +273,11 @@ function parse_cmdline( $args ) { # ============================================================================ function ss_get_mysql_stats( $options ) { # Process connection options and connect to MySQL. - global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $cache_time, - $chk_options, $mysql_host, $mysql_port, $mysql_ssl; + global $debug, $mysql_host, $mysql_user, $mysql_pass, $cache_dir, $poll_time, $chk_options, + $mysql_port, $mysql_socket, $mysql_flags, + $mysql_ssl, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, + $mysql_connection_timeout, + $heartbeat, $heartbeat_table, $heartbeat_server_id, $heartbeat_utc; # Connect to MySQL. $user = isset($options['user']) ? $options['user'] : $mysql_user; @@ -261,26 +285,15 @@ function ss_get_mysql_stats( $options ) { $port = isset($options['port']) ? $options['port'] : $mysql_port; $host = isset($options['host']) ? $options['host'] : $mysql_host; - $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; + $socket = isset($options['socket']) ? $options['socket'] : $mysql_socket; + $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; + $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; + $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); - debug(array('connecting to', $host_str, $user, $pass)); - if (!extension_loaded('mysqli') ) { - debug("The MySQL extension is not loaded"); - die("The MySQL extension is not loaded"); - } - if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { - $conn = ((($GLOBALS["___mysqli_ston"] = mysqli_init()) && (mysqli_real_connect($GLOBALS["___mysqli_ston"], $host_str, - $user, $pass, NULL, 3306, NULL, MYSQLI_CLIENT_SSL))) ? $GLOBALS["___mysqli_ston"] : FALSE); - } - else { - $conn = ($GLOBALS["___mysqli_ston"] = mysqli_connect($host_str, $user, $pass)); - } - if (!$conn ) { - die("MySQL: " . ((is_object($GLOBALS["___mysqli_ston"])) ? mysqli_error($GLOBALS["___mysqli_ston"]) : - (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false))); - } + $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); $cache_file = "$cache_dir/agent-local-mysql"; @@ -288,12 +301,12 @@ function ss_get_mysql_stats( $options ) { # First, check the cache. $fp = null; - if (!isset($options['nocache']) ) { - if ($fp = fopen($cache_file, 'a+') ) { + if ( $cache_dir && !array_key_exists('nocache', $options) ) { + if ( $fp = fopen($cache_file, 'a+') ) { $locked = flock($fp, 1); # LOCK_SH - if ($locked ) { - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( $locked ) { + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -303,12 +316,12 @@ function ss_get_mysql_stats( $options ) { else { debug("The cache file seems too small or stale"); # Escalate the lock to exclusive, so we can write to it. - if (flock($fp, 2) ) { # LOCK_EX + if ( flock($fp, 2) ) { # LOCK_EX # We might have blocked while waiting for that LOCK_EX, and # another process ran and updated it. Let's see if we can just # return the data now: - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -320,48 +333,79 @@ function ss_get_mysql_stats( $options ) { } } else { - debug("Couldn't lock the cache file, ignoring it."); $fp = null; + debug("Couldn't lock the cache file, ignoring it"); } } + else { + $fp = null; + debug("Couldn't open the cache file"); + } } else { - $fp = null; - debug("Couldn't open the cache file"); + debug("Caching is disabled."); } + # Connect to MySQL. + debug(array('Connecting to', $host, $port, $user, $pass)); + if ( !extension_loaded('mysqli') ) { + debug("PHP MySQLi extension is not loaded"); + die("PHP MySQLi extension is not loaded"); + } + if ( $mysql_ssl ) { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + else { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + if ( mysqli_connect_errno() ) { + debug("MySQL connection failed: " . mysqli_connect_error()); + die("ERROR: " . mysqli_connect_error()); + } + + # MySQL server version. + # The form of this version number is main_version * 10000 + minor_version * 100 + sub_version + # i.e. version 5.5.44 is 50544. + $mysql_version = mysqli_get_server_version($conn); + debug("MySQL server version is " . $mysql_version); + # Set up variables. $status = array( # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc # Define some indexes so they don't cause errors with += operations. 'relay_log_space' => null, 'binary_log_space' => null, - 'current_transactions' => null, - 'locked_transactions' => null, - 'active_transactions' => null, - 'innodb_locked_tables' => null, - 'innodb_tables_in_use' => null, - 'innodb_lock_structs' => null, - 'innodb_lock_wait_secs' => null, - 'innodb_sem_waits' => null, - 'innodb_sem_wait_time_ms'=> null, + 'current_transactions' => 0, + 'locked_transactions' => 0, + 'active_transactions' => 0, + 'innodb_locked_tables' => 0, + 'innodb_tables_in_use' => 0, + 'innodb_lock_structs' => 0, + 'innodb_lock_wait_secs' => 0, + 'innodb_sem_waits' => 0, + 'innodb_sem_wait_time_ms'=> 0, # Values for the 'state' column from SHOW PROCESSLIST (converted to # lowercase, with spaces replaced by underscores) - 'State_closing_tables' => null, - 'State_copying_to_tmp_table' => null, - 'State_end' => null, - 'State_freeing_items' => null, - 'State_init' => null, - 'State_locked' => null, - 'State_login' => null, - 'State_preparing' => null, - 'State_reading_from_net' => null, - 'State_sending_data' => null, - 'State_sorting_result' => null, - 'State_statistics' => null, - 'State_updating' => null, - 'State_writing_to_net' => null, - 'State_none' => null, - 'State_other' => null, # Everything not listed above + 'State_closing_tables' => 0, + 'State_copying_to_tmp_table' => 0, + 'State_end' => 0, + 'State_freeing_items' => 0, + 'State_init' => 0, + 'State_locked' => 0, + 'State_login' => 0, + 'State_preparing' => 0, + 'State_reading_from_net' => 0, + 'State_sending_data' => 0, + 'State_sorting_result' => 0, + 'State_statistics' => 0, + 'State_updating' => 0, + 'State_writing_to_net' => 0, + 'State_none' => 0, + 'State_other' => 0, # Everything not listed above ); # Get SHOW STATUS and convert the name-value array into a simple @@ -378,8 +422,15 @@ function ss_get_mysql_stats( $options ) { } # Get SHOW SLAVE STATUS, and add it to the $status array. - if ($chk_options['slave'] ) { - $result = run_query("SHOW SLAVE STATUS", $conn); + if ( $chk_options['slave'] ) { + # Leverage lock-free SHOW SLAVE STATUS if available + $result = run_query("SHOW SLAVE STATUS NONBLOCKING", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS NOLOCK", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS", $conn); + } + } $slave_status_rows_gotten = 0; foreach ( $result as $row ) { $slave_status_rows_gotten++; @@ -390,23 +441,30 @@ function ss_get_mysql_stats( $options ) { $status['slave_lag'] = $row['seconds_behind_master']; # Check replication heartbeat, if present. - if ($heartbeat ) { + if ( $heartbeat ) { + if ( $heartbeat_utc ) { + $now_func = 'UNIX_TIMESTAMP(UTC_TIMESTAMP)'; + } + else { + $now_func = 'UNIX_TIMESTAMP()'; + } $result2 = run_query( - "SELECT GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)" - . " AS delay FROM $heartbeat WHERE id = 1", $conn); + "SELECT MAX($now_func - ROUND(UNIX_TIMESTAMP(ts)))" + . " AS delay FROM $heartbeat_table" + . " WHERE $heartbeat_server_id = 0 OR server_id = $heartbeat_server_id", $conn); $slave_delay_rows_gotten = 0; foreach ( $result2 as $row2 ) { $slave_delay_rows_gotten++; - if ($row2 && is_array($row2) + if ( $row2 && is_array($row2) && array_key_exists('delay', $row2) ) { $status['slave_lag'] = $row2['delay']; } else { - debug("Couldn't get slave lag from $heartbeat"); + debug("Couldn't get slave lag from $heartbeat_table"); } } - if ($slave_delay_rows_gotten == 0 ) { + if ( $slave_delay_rows_gotten == 0 ) { debug("Got nothing from heartbeat query"); } } @@ -417,11 +475,11 @@ function ss_get_mysql_stats( $options ) { $status['slave_stopped'] = ($row['slave_sql_running'] == 'Yes') ? 0 : $status['slave_lag']; } - if ($slave_status_rows_gotten == 0 ) { + if ( $slave_status_rows_gotten == 0 ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) @@ -445,18 +503,22 @@ function ss_get_mysql_stats( $options ) { # Get SHOW PROCESSLIST and aggregate it by state, then add it to the array # too. - if ($chk_options['procs'] ) { + if ( $chk_options['procs'] ) { $result = run_query('SHOW PROCESSLIST', $conn); foreach ( $result as $row ) { $state = $row['State']; - if (is_null($state) ) { + if ( is_null($state) ) { $state = 'NULL'; } - if ($state == '' ) { + if ( $state == '' ) { $state = 'none'; } + # MySQL 5.5 replaces the 'Locked' state with a variety of "Waiting for + # X lock" types of statuses. Wrap these all back into "Locked" because + # we don't really care about the type of locking it is. + $state = preg_replace('/^(Table lock|Waiting for .*lock)$/', 'Locked', $state); $state = str_replace(' ', '_', strtolower($state)); - if (array_key_exists("State_$state", $status) ) { + if ( array_key_exists("State_$state", $status) ) { increment($status, "State_$state", 1); } else { @@ -465,15 +527,63 @@ function ss_get_mysql_stats( $options ) { } } + # Get SHOW ENGINES to be able to determine whether InnoDB is present. + $engines = array(); + $result = run_query("SHOW ENGINES", $conn); + foreach ( $result as $row ) { + $engines[$row[0]] = $row[1]; + } + # Get SHOW INNODB STATUS and extract the desired metrics from it, then add # those to the array too. if ($chk_options['innodb'] - && array_key_exists('have_innodb', $status) - && $status['have_innodb'] == 'YES' + && array_key_exists('InnoDB', $engines) + && ( $engines['InnoDB'] == 'YES' + || $engines['InnoDB'] == 'DEFAULT' ) ) { $result = run_query("SHOW /*!50000 ENGINE*/ INNODB STATUS", $conn); $istatus_text = $result[0]['Status']; - $istatus_vals = get_innodb_array($istatus_text); + $istatus_vals = get_innodb_array($istatus_text, $mysql_version); + + # Get response time histogram from Percona Server or MariaDB if enabled. + if ( $chk_options['get_qrt'] + && (( isset($status['have_response_time_distribution']) + && $status['have_response_time_distribution'] == 'YES') + || (isset($status['query_response_time_stats']) + && $status['query_response_time_stats'] == 'ON')) ) + { + debug('Getting query time histogram'); + $i = 0; + $result = run_query( + "SELECT `count`, ROUND(total * 1000000) AS total " + . "FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME " + . "WHERE `time` <> 'TOO LONG'", + $conn); + foreach ( $result as $row ) { + if ( $i > 13 ) { + # It's possible that the number of rows returned isn't 14. + # Don't add extra status counters. + break; + } + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = $row['count']; + $status[$total_key] = $row['total']; + $i++; + } + # It's also possible that the number of rows returned is too few. + # Don't leave any status counters unassigned; it will break graphs. + while ( $i <= 13 ) { + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = 0; + $status[$total_key] = 0; + $i++; + } + } + else { + debug('Not getting time histogram because it is not enabled'); + } # Override values from InnoDB parsing with values from SHOW STATUS, # because InnoDB status might not have everything and the SHOW STATUS is @@ -494,6 +604,8 @@ function ss_get_mysql_stats( $options ) { 'Innodb_rows_inserted' => 'rows_inserted', 'Innodb_rows_read' => 'rows_read', 'Innodb_rows_updated' => 'rows_updated', + 'Innodb_buffer_pool_reads' => 'pool_reads', + 'Innodb_buffer_pool_read_requests' => 'pool_read_requests', ); # If the SHOW STATUS value exists, override... @@ -536,9 +648,9 @@ function ss_get_mysql_stats( $options ) { } # Define the variables to output. I use shortened variable names so maybe - # it'll all fit in 1024 bytes for Cactid and Spine's benefit. This list must - # come right after the word MAGIC_VARS_DEFINITIONS. The Perl script parses - # it and uses it as a Perl variable. + # it'll all fit in 1024 bytes for Cactid and Spine's benefit. + # This list must come right after the word MAGIC_VARS_DEFINITIONS. The Perl script + # parses it and uses it as a Perl variable. $keys = array( 'Key_read_requests' => 'a0', 'Key_reads' => 'a1', @@ -650,7 +762,6 @@ function ss_get_mysql_stats( $options ) { 'binary_log_space' => 'cz', 'innodb_locked_tables' => 'd0', 'innodb_lock_structs' => 'd1', - 'State_closing_tables' => 'd2', 'State_copying_to_tmp_table' => 'd3', 'State_end' => 'd4', @@ -667,7 +778,6 @@ function ss_get_mysql_stats( $options ) { 'State_writing_to_net' => 'df', 'State_none' => 'dg', 'State_other' => 'dh', - 'Handler_commit' => 'di', 'Handler_delete' => 'dj', 'Handler_discover' => 'dk', @@ -709,6 +819,53 @@ function ss_get_mysql_stats( $options ) { 'key_buffer_size' => 'ei', 'Innodb_row_lock_time' => 'ej', 'Innodb_row_lock_waits' => 'ek', + + # Values not parsed by LibreNMS + 'Query_time_count_00' => 'ol', + 'Query_time_count_01' => 'om', + 'Query_time_count_02' => 'on', + 'Query_time_count_03' => 'oo', + 'Query_time_count_04' => 'op', + 'Query_time_count_05' => 'oq', + 'Query_time_count_06' => 'or', + 'Query_time_count_07' => 'os', + 'Query_time_count_08' => 'ot', + 'Query_time_count_09' => 'ou', + 'Query_time_count_10' => 'ov', + 'Query_time_count_11' => 'ow', + 'Query_time_count_12' => 'ox', + 'Query_time_count_13' => 'oy', + 'Query_time_total_00' => 'oz', + 'Query_time_total_01' => 'pg', + 'Query_time_total_02' => 'ph', + 'Query_time_total_03' => 'pi', + 'Query_time_total_04' => 'pj', + 'Query_time_total_05' => 'pk', + 'Query_time_total_06' => 'pl', + 'Query_time_total_07' => 'pm', + 'Query_time_total_08' => 'pn', + 'Query_time_total_09' => 'po', + 'Query_time_total_10' => 'pp', + 'Query_time_total_11' => 'pq', + 'Query_time_total_12' => 'pr', + 'Query_time_total_13' => 'ps', + 'wsrep_replicated_bytes' => 'pt', + 'wsrep_received_bytes' => 'pu', + 'wsrep_replicated' => 'pv', + 'wsrep_received' => 'pw', + 'wsrep_local_cert_failures' => 'px', + 'wsrep_local_bf_aborts' => 'py', + 'wsrep_local_send_queue' => 'pz', + 'wsrep_local_recv_queue' => 'qg', + 'wsrep_cluster_size' => 'qh', + 'wsrep_cert_deps_distance' => 'qi', + 'wsrep_apply_window' => 'qj', + 'wsrep_commit_window' => 'qk', + 'wsrep_flow_control_paused' => 'ql', + 'wsrep_flow_control_sent' => 'qm', + 'wsrep_flow_control_recv' => 'qn', + 'pool_reads' => 'qo', + 'pool_read_requests' => 'qp', ); # Return the output. @@ -737,7 +894,7 @@ function ss_get_mysql_stats( $options ) { # MySQL 5.0, and XtraDB or enhanced InnoDB from Percona if applicable. Note # that extra leading spaces are ignored due to trim(). # ============================================================================ -function get_innodb_array($text) { +function get_innodb_array($text, $mysql_version) { $results = array( 'spin_waits' => array(), 'spin_rounds' => array(), @@ -811,13 +968,26 @@ function get_innodb_array($text) { $results['spin_rounds'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[8]); } - elseif (strpos($line, 'RW-shared spins') === 0 ) { + elseif (strpos($line, 'RW-shared spins') === 0 + && strpos($line, ';') > 0 ) { # RW-shared spins 3859028, OS waits 2100750; RW-excl spins 4641946, OS waits 1530310 $results['spin_waits'][] = to_int($row[2]); $results['spin_waits'][] = to_int($row[8]); $results['os_waits'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[11]); } + elseif (strpos($line, 'RW-shared spins') === 0 && strpos($line, '; RW-excl spins') === FALSE) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-shared spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } + elseif (strpos($line, 'RW-excl spins') === 0) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-excl spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } elseif (strpos($line, 'seconds the semaphore:') > 0) { # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: increment($results, 'innodb_sem_waits', 1); @@ -826,18 +996,35 @@ function get_innodb_array($text) { } # TRANSACTIONS - elseif (strpos($line, 'Trx id counter') === 0 ) { + elseif ( strpos($line, 'Trx id counter') === 0 ) { # The beginning of the TRANSACTIONS section: start counting # transactions - # Trx id counter 0 1170664159 - # Trx id counter 861B144C - $results['innodb_transactions'] = make_bigint($row[3], $row[4]); + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Trx id counter 0 1170664159 + # Trx id counter 861B144C + $results['innodb_transactions'] = isset($row[4]) ? make_bigint( + $row[3], $row[4]) : base_convert($row[3], 16, 10); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Trx id counter 2903813 + $results['innodb_transactions'] = $row[3]; + } $txn_seen = TRUE; } - elseif (strpos($line, 'Purge done for trx') === 0 ) { - # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 - # Purge done for trx's n:o < 861B135D undo n:o < 0 - $purged_to = make_bigint($row[6], $row[7] == 'undo' ? null : $row[7]); + elseif ( strpos($line, 'Purge done for trx') === 0 ) { + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 + # Purge done for trx's n:o < 861B135D undo n:o < 0 + $purged_to = $row[7] == 'undo' ? base_convert($row[6], 16, 10) : make_bigint($row[6], $row[7]); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Purge done for trx's n:o < 2903354 undo n:o < 0 state: running but idle + $purged_to = $row[6]; + } $results['unpurged_txns'] = big_sub($results['innodb_transactions'], $purged_to); } @@ -845,31 +1032,31 @@ function get_innodb_array($text) { # History list length 132 $results['history_list'] = to_int($row[3]); } - elseif ($txn_seen && strpos($line, '---TRANSACTION') === 0 ) { + elseif ( $txn_seen && strpos($line, '---TRANSACTION') === 0 ) { # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 increment($results, 'current_transactions', 1); - if (strpos($line, 'ACTIVE') > 0 ) { + if ( strpos($line, 'ACTIVE') > 0 ) { increment($results, 'active_transactions', 1); } } - elseif ($txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { + elseif ( $txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { # ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED: increment($results, 'innodb_lock_wait_secs', to_int($row[5])); } - elseif (strpos($line, 'read views open inside InnoDB') > 0 ) { + elseif ( strpos($line, 'read views open inside InnoDB') > 0 ) { # 1 read views open inside InnoDB $results['read_views'] = to_int($row[0]); } - elseif (strpos($line, 'mysql tables in use') === 0 ) { + elseif ( strpos($line, 'mysql tables in use') === 0 ) { # mysql tables in use 2, locked 2 increment($results, 'innodb_tables_in_use', to_int($row[4])); increment($results, 'innodb_locked_tables', to_int($row[6])); } - elseif ($txn_seen && strpos($line, 'lock struct(s)') > 0 ) { + elseif ( $txn_seen && strpos($line, 'lock struct(s)') > 0 ) { # 23 lock struct(s), heap size 3024, undo log entries 27 # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 # LOCK WAIT 2 lock struct(s), heap size 368 - if (strpos($line, 'LOCK WAIT') === 0 ) { + if ( strpos($line, 'LOCK WAIT') === 0 ) { increment($results, 'innodb_lock_structs', to_int($row[2])); increment($results, 'locked_transactions', 1); } @@ -896,7 +1083,7 @@ function get_innodb_array($text) { $results['pending_aio_log_ios'] = to_int($row[6]); $results['pending_aio_sync_ios'] = to_int($row[9]); } - elseif (strpos($line, 'Pending flushes (fsync)') === 0 ) { + elseif ( strpos($line, 'Pending flushes (fsync)') === 0 ) { # Pending flushes (fsync) log: 0; buffer pool: 0 $results['pending_log_flushes'] = to_int($row[4]); $results['pending_buf_pool_flushes'] = to_int($row[7]); @@ -917,6 +1104,16 @@ function get_innodb_array($text) { $results['ibuf_used_cells'] = to_int($row[2]); $results['ibuf_free_cells'] = to_int($row[6]); $results['ibuf_cell_count'] = to_int($row[9]); + if (strpos($line, 'merges')) { + $results['ibuf_merges'] = to_int($row[10]); + } + } + elseif (strpos($line, ', delete mark ') > 0 && strpos($prev_line, 'merged operations:') === 0 ) { + # Output of show engine innodb status has changed in 5.5 + # merged operations: + # insert 593983, delete mark 387006, delete 73092 + $results['ibuf_inserts'] = to_int($row[1]); + $results['ibuf_merged'] = to_int($row[1]) + to_int($row[4]) + to_int($row[6]); } elseif (strpos($line, ' merged recs, ') > 0 ) { # 19817685 inserts, 19817684 merged recs, 3552620 merges @@ -972,40 +1169,41 @@ function get_innodb_array($text) { } # BUFFER POOL AND MEMORY - elseif (strpos($line, "Total memory allocated") === 0 ) { + elseif (strpos($line, "Total memory allocated") === 0 && strpos($line, "in additional pool allocated") > 0 ) { # Total memory allocated 29642194944; in additional pool allocated 0 + # Total memory allocated by read views 96 $results['total_mem_alloc'] = to_int($row[3]); $results['additional_pool_alloc'] = to_int($row[8]); } - elseif (strpos($line, 'Adaptive hash index ') === 0 ) { + elseif(strpos($line, 'Adaptive hash index ') === 0 ) { # Adaptive hash index 1538240664 (186998824 + 1351241840) $results['adaptive_hash_memory'] = to_int($row[3]); } - elseif (strpos($line, 'Page hash ') === 0 ) { + elseif(strpos($line, 'Page hash ') === 0 ) { # Page hash 11688584 $results['page_hash_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Dictionary cache ') === 0 ) { + elseif(strpos($line, 'Dictionary cache ') === 0 ) { # Dictionary cache 145525560 (140250984 + 5274576) $results['dictionary_cache_memory'] = to_int($row[2]); } - elseif (strpos($line, 'File system ') === 0 ) { + elseif(strpos($line, 'File system ') === 0 ) { # File system 313848 (82672 + 231176) $results['file_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Lock system ') === 0 ) { + elseif(strpos($line, 'Lock system ') === 0 ) { # Lock system 29232616 (29219368 + 13248) $results['lock_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Recovery system ') === 0 ) { + elseif(strpos($line, 'Recovery system ') === 0 ) { # Recovery system 0 (0 + 0) $results['recovery_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Threads ') === 0 ) { + elseif(strpos($line, 'Threads ') === 0 ) { # Threads 409336 (406936 + 2400) $results['thread_hash_memory'] = to_int($row[1]); } - elseif (strpos($line, 'innodb_io_pattern ') === 0 ) { + elseif(strpos($line, 'innodb_io_pattern ') === 0 ) { # innodb_io_pattern 0 (0 + 0) $results['innodb_io_pattern_memory'] = to_int($row[1]); } @@ -1053,6 +1251,7 @@ function get_innodb_array($text) { $results['queries_inside'] = to_int($row[0]); $results['queries_queued'] = to_int($row[4]); } + $prev_line = $line; } foreach ( array('spin_waits', 'spin_rounds', 'os_waits') as $key ) { @@ -1063,16 +1262,9 @@ function get_innodb_array($text) { $results['uncheckpointed_bytes'] = big_sub($results['log_bytes_written'], $results['last_checkpoint']); - -# foreach ($results as $key => $value) { -# echo(strtolower($key).":".strtolower($value)."\n"); -# } - - return $results; } - # ============================================================================ # Returns a bigint from two ulint or a single hex number. This is tested in # t/mysql_stats.php and copied, without tests, to ss_get_by_ssh.php. @@ -1117,27 +1309,34 @@ function to_int ( $str ) { # ============================================================================ # Wrap mysql_query in error-handling, and instead of returning the result, # return an array of arrays in the result. +# ============================================================================ + # ============================================================================ function run_query($sql, $conn) { global $debug; debug($sql); - $result = @mysqli_query( $conn, $sql); - if ($debug ) { - $error = @((is_object($conn)) ? mysqli_error($conn) : (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false)); - if ($error ) { + $result = @mysqli_query($conn, $sql); + if ( $debug && strpos($sql, 'SHOW SLAVE STATUS ') === false ) { + $error = @mysqli_error($conn); + if ( $error ) { debug(array($sql, $error)); die("SQLERR $error in $sql"); } } $array = array(); - while ( $row = @mysqli_fetch_array($result) ) { - $array[] = $row; + $count = @mysqli_num_rows($result); + if ( $count > 10000 ) { + debug('Abnormal number of rows returned: ' . $count); + } + else { + while ( $row = @mysqli_fetch_array($result) ) { + $array[] = $row; + } } debug(array($sql, $array)); return $array; } -# ============================================================================ # Safely increments a value that might be null. # ============================================================================ function increment(&$arr, $key, $howmuch) { From ad300c035a2be4a55553c2994d5ce7ba69d57432 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 9 Jan 2019 23:41:39 -0600 Subject: [PATCH 055/332] various misc fixes for the postfix poller (#112) * update postfix * move a few things to reduce the number of changed lines * move mself to the end * white space cleanup and another small cleanup of $chr * use $chrNew instead of $chrC when writing the current values * more white space cleanup * replace one more missed instance of iuoscp --- snmp/postfixdetailed | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) mode change 100644 => 100755 snmp/postfixdetailed diff --git a/snmp/postfixdetailed b/snmp/postfixdetailed old mode 100644 new mode 100755 index 9bf71e9c7..e6cb1e9ea --- a/snmp/postfixdetailed +++ b/snmp/postfixdetailed @@ -86,10 +86,10 @@ my ( $received, $rardnf, $rarnfqa, $iuscp, - $msefl, $sce, $scp, - $urr) = split ( /\n/, $old ); + $urr, + $msefl) = split ( /\n/, $old ); if ( ! defined( $received ) ){ $received=0; } if ( ! defined( $delivered ) ){ $delivered=0; } @@ -142,7 +142,6 @@ my $recipientsC=0; my $recipienthdC=0; my $deferralcrC=0; my $deferralhidC=0; -my $chrC=0; my $hcrnfqhC=0; my $sardnfC=0; my $sarnobuC=0; @@ -195,12 +194,13 @@ sub newValue{ my $output=`$pflogsumm /var/log/maillog`; -#holds client host rejected values till the end when it is compared to the old one -my $chrNew=0; - #holds RBL values till the end when it is compared to the old one my $buNew=0; + +#holds client host rejected values till the end when it is compared to the old one +my $chrNew=0; + # holds recipient address rejected values till the end when it is compared to the old one my $raruuNew=0; @@ -353,6 +353,7 @@ while ( defined( $outputA[$int] ) ){ # deferrals Host is down if ( ( $line =~ /Host is down$/ ) && ( ! $handled ) ){ $line=~s/ .*//; + $deferralcrC=$line; $deferralhidC=$line; $deferralhid=newValue( $deferralhid, $line ); $handled=1; @@ -429,8 +430,8 @@ while ( defined( $outputA[$int] ) ){ #Improper use of SMTP command pipelining if ( ( $line =~ /Improper use of SMTP command pipelining/ ) && ( ! $handled ) ){ $line=~s/.*\: //g; - $iuoscpC=$line; - $iuoscp=newValue( $iuoscp, $line ); + $iuscpC=$line; + $iuscp=newValue( $iuscp, $line ); } #Message size exceeds fixed limit @@ -453,16 +454,18 @@ while ( defined( $outputA[$int] ) ){ $scpC=$line; $scp=newValue( $scp, $line ); } - + #unknown reject reason if ( ( $line =~ /unknown reject reason/ ) && ( ! $handled ) ){ $line=~s/.*\: //g; $urrC=$line; $urr=newValue( $urr, $line ); } + $int++; } + # final client host rejected total $chr=newValue( $chr, $chrNew ); @@ -502,8 +505,8 @@ my $data=$received."\n". $iuscp."\n". $sce."\n". $scp."\n". - $urr."\n"; - $msefl."\n". + $urr."\n". + $msefl."\n"; print $data; @@ -535,10 +538,10 @@ my $current=$receivedC."\n". $rardnfC."\n". $rarnfqaC."\n". $iuscpC."\n". - $mseflC."\n". $sceC."\n". $scpC."\n". - $urrC."\n"; + $urrC."\n". + $mseflC."\n"; open(my $fh, ">", $cache) or die "Can't open '".$cache."'"; print $fh $current; From 3a407e3f721b7677fb2724af736ea87838d4dcc5 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Thu, 17 Jan 2019 11:44:02 -0600 Subject: [PATCH 056/332] Update powerdns script to json (#218) --- snmp/powerdns.php | 76 ----------------------------------------------- snmp/powerdns.py | 26 ++++++++++++++++ 2 files changed, 26 insertions(+), 76 deletions(-) delete mode 100755 snmp/powerdns.php create mode 100755 snmp/powerdns.py diff --git a/snmp/powerdns.php b/snmp/powerdns.php deleted file mode 100755 index 14103124c..000000000 --- a/snmp/powerdns.php +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env php - - -// START SETTINGS /// -$pdnscontrol = '/usr/bin/pdns_control'; -// END SETTINGS /// - -// DO NOT EDIT UNDER THIS LINE -// -$cmd = shell_exec($pdnscontrol.' show \*'); -$vars = array(); -$vars = explode(',', $cmd); - - -function doSNMP($vars) { - foreach ($vars as $item => $value) { - $value = trim($value); - if (!empty($value)) { - echo $value."\n"; - } - } - -}//end doSNMP() - -function doSNMPv2($vars) { - $pdns = array(); - foreach ($vars as $item => $value) { - if (!empty($value)) { - $temp = explode('=', $value); - if (isset($temp[1])) { - $pdns[$temp[0]] = $temp[1]; - } - } - } - - $var = array(); - $var['corrupt-packets'] = (isset($pdns['corrupt-packets']) ? $pdns['corrupt-packets'] : 'U'); - $var['deferred-cache-inserts'] = (isset($pdns['deferred-cache-inserts']) ? $pdns['deferred-cache-inserts'] : 'U'); - $var['deferred-cache-lookup'] = (isset($pdns['deferred-cache-lookup']) ? $pdns['deferred-cache-lookup'] : 'U'); - $var['latency'] = (isset($pdns['latency']) ? $pdns['latency'] : 'U'); - $var['packetcache-hit'] = (isset($pdns['packetcache-hit']) ? $pdns['packetcache-hit'] : 'U'); - $var['packetcache-miss'] = (isset($pdns['packetcache-miss']) ? $pdns['packetcache-miss'] : 'U'); - $var['packetcache-size'] = (isset($pdns['packetcache-size']) ? $pdns['packetcache-size'] : 'U'); - $var['qsize-q'] = (isset($pdns['qsize-q']) ? $pdns['qsize-q'] : 'U'); - $var['query-cache-hit'] = (isset($pdns['query-cache-hit']) ? $pdns['query-cache-hit'] : 'U'); - $var['query-cache-miss'] = (isset($pdns['query-cache-miss']) ? $pdns['query-cache-miss'] : 'U'); - $var['recursing-answers'] = (isset($pdns['recursing-answers']) ? $pdns['recursing-answers'] : 'U'); - $var['recursing-questions'] = (isset($pdns['recursing-questions']) ? $pdns['recursing-questions'] : 'U'); - $var['servfail-packets'] = (isset($pdns['servfail-packets']) ? $pdns['servfail-packets'] : 'U'); - $var['tcp-answers'] = (isset($pdns['tcp-answers']) ? $pdns['tcp-answers'] : 'U'); - $var['tcp-queries'] = (isset($pdns['tcp-queries']) ? $pdns['tcp-queries'] : 'U'); - $var['timedout-packets'] = (isset($pdns['timedout-packets']) ? $pdns['timedout-packets'] : 'U'); - $var['udp-answers'] = (isset($pdns['udp-answers']) ? $pdns['udp-answers'] : 'U'); - $var['udp-queries'] = (isset($pdns['udp-queries']) ? $pdns['udp-queries'] : 'U'); - $var['udp4-answers'] = (isset($pdns['udp4-answers']) ? $pdns['udp4-answers'] : 'U'); - $var['udp4-queries'] = (isset($pdns['udp4-queries']) ? $pdns['udp4-queries'] : 'U'); - $var['udp6-answers'] = (isset($pdns['udp6-answers']) ? $pdns['udp6-answers'] : 'U'); - $var['udp6-queries'] = (isset($pdns['udp6-queries']) ? $pdns['udp6-queries'] : 'U'); - foreach ($var as $item => $count) { - echo $count."\n"; - } - -}//end doSNMPv2() - - -doSNMPv2($vars); diff --git a/snmp/powerdns.py b/snmp/powerdns.py new file mode 100755 index 000000000..75cc1fae8 --- /dev/null +++ b/snmp/powerdns.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +import json +import subprocess + +pdnscontrol = '/usr/bin/pdns_control' + +process = subprocess.Popen([pdnscontrol, 'show', '*'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +input = process.communicate() +stdout = input[0].decode() +stderr = input[1].decode() + +data = {} +for var in stdout.split(','): + if '=' in var: + key, value = var.split('=') + data[key] = value + +output = { + 'version': 1, + 'error': process.returncode, + 'errorString': stderr, + 'data': data +} + +print(json.dumps(output)) From c9a0d2893e44f89f7c8c9450a9d42438eff1404d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felici=C3=A1n=20Hopp=C3=A1l?= Date: Mon, 11 Feb 2019 23:06:57 +0100 Subject: [PATCH 057/332] Fix: zpool list output changed, incorrect values (#219) * fix zpool data, output of zpool list -pH changed in freebsd 11 * fix zpool data, output of zpool list -pH changed in freebsd 11 * bump version * version dump to 2 --- snmp/zfs-freebsd | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index d78658c2d..12600e1e1 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.1.0\n"; + print "FreeBSD ZFS stats extend 0.2.0\n"; } sub main::HELP_MESSAGE { @@ -236,14 +236,14 @@ my $pools_int=0; my @toShoveIntoJSON; while ( defined( $pools[$pools_int] ) ) { my %newPool; - + my $pool=$pools[$pools_int]; $pool =~ s/\t/,/g; - $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\,\-\,\-\,/\,0\,0\,/g; $pool =~ s/\%//g; $pool =~ s/\,([0-1\.]*)x\,/,$1,/; - ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); + ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); push(@toShoveIntoJSON, \%newPool); @@ -253,7 +253,7 @@ $tojson{pools}=\@toShoveIntoJSON; my %head_hash; $head_hash{'data'}=\%tojson; -$head_hash{'version'}=1; +$head_hash{'version'}=2; $head_hash{'error'}=0; $head_hash{'errorString'}=''; From 147cb67824b213045826677946166c8ee807f23c Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Tue, 12 Feb 2019 20:33:05 -0600 Subject: [PATCH 058/332] Use os-release whenever possible for the distro script (#220) Except centos... https://bugs.centos.org/view.php?id=8359 --- snmp/distro | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/snmp/distro b/snmp/distro index cd9e814bf..75fa74d2a 100755 --- a/snmp/distro +++ b/snmp/distro @@ -24,6 +24,7 @@ elif [ "${OS}" = "Linux" ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then DIST="CentOS" + IGNORE_OS_RELEASE=1 # https://bugs.centos.org/view.php?id=8359 elif [ "${DIST}" = "CloudLinux" ]; then DIST="CloudLinux" elif [ "${DIST}" = "Mandriva" ]; then @@ -77,10 +78,6 @@ elif [ "${OS}" = "Linux" ] ; then REV=$(sed -n -e 's/^.*PHOTON_BUILD_NUMBER=//p' /etc/photon-release) IGNORE_LSB=1 # photon os does not have /etc/lsb-release nor lsb_release - elif [ -f /etc/os-release ] ; then - DIST=$(grep '^NAME=' /etc/os-release | cut -d= -f2- | tr -d '"') - REV=$(grep '^VERSION_ID=' /etc/os-release | cut -d= -f2- | tr -d '"') - elif [ -f /etc/openwrt_version ] ; then DIST="OpenWrt" REV=$(cat /etc/openwrt_version) @@ -94,29 +91,33 @@ elif [ "${OS}" = "Linux" ] ; then REV=$(echo SP$(grep PATCHLEVEL /etc/SuSE-release | cut -d = -f 2 | tr -d " ")) fi - if [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then - LSB_DIST=$(lsb_release -si) - LSB_REV=$(lsb_release -sr) - if [ "$LSB_DIST" != "" ] ; then - DIST=$LSB_DIST - fi - if [ "$LSB_REV" != "" ] ; then - REV=$LSB_REV - fi - fi - if [ -x "$(command -v awk)" ]; then # some distros do not ship with awk if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then DIST="dd-wrt" - fi + fi if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then DIST="ASUSWRT-Merlin" REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 fi fi - if [ -n "${REV}" ] - then + # try standardized os version methods + if [ -f /etc/os-release -a "${IGNORE_OS_RELEASE}" != 1 ] ; then + source /etc/os-release + STD_DIST="$NAME" + STD_REV="$VERSION_ID" + elif [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then + STD_DIST=$(lsb_release -si) + STD_REV=$(lsb_release -sr) + fi + if [ -n "${STD_DIST}" ]; then + DIST="${STD_DIST}" + fi + if [ -n "${STD_REV}" ]; then + REV="${STD_REV}" + fi + + if [ -n "${REV}" ]; then OSSTR="${DIST} ${REV}" else OSSTR="${DIST}" From 8114d88f9124eae43cbd91bfc8bc49ec73c78ad6 Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Sat, 16 Mar 2019 14:33:31 +0800 Subject: [PATCH 059/332] remove duplicate code --- agent-local/mysql | 11 +++-------- snmp/mysql | 11 +++-------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 3b8b30427..9277efc91 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -349,17 +349,12 @@ function ss_get_mysql_stats( $options ) { debug("PHP MySQLi extension is not loaded"); die("PHP MySQLi extension is not loaded"); } + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); if ( $mysql_ssl ) { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); - } - else { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); } + @mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); if ( mysqli_connect_errno() ) { debug("MySQL connection failed: " . mysqli_connect_error()); die("ERROR: " . mysqli_connect_error()); diff --git a/snmp/mysql b/snmp/mysql index e08ed6a7d..27833e016 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -352,17 +352,12 @@ function ss_get_mysql_stats( $options ) { debug("PHP MySQLi extension is not loaded"); die("PHP MySQLi extension is not loaded"); } + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); if ( $mysql_ssl ) { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); - } - else { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); } + @mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); if ( mysqli_connect_errno() ) { debug("MySQL connection failed: " . mysqli_connect_error()); die("ERROR: " . mysqli_connect_error()); From 3ce06d6defc63f200f2bbfec7718748c8ec9e832 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Mar 2019 05:55:33 -0500 Subject: [PATCH 060/332] freshly initilized ZFS pulls that are not in use don't have a $data_demand_total --- snmp/zfs-freebsd | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index cea6e1e95..ee185b6a6 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.0.0\n"; + print "FreeBSD ZFS stats extend 0.2.0\n"; } sub main::HELP_MESSAGE { @@ -157,7 +157,10 @@ my $real_hits = $mfu_hits + $mru_hits; my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; -my $data_demand_percent = $demand_data_hits / $demand_data_total * 100; +my $data_demand_percent = 'U'; +if ( $demand_data_total != 0 ){ + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; +} my $data_prefetch_percent; if ( $prefetch_data_total != 0 ) { @@ -236,14 +239,14 @@ my $pools_int=0; my @toShoveIntoJSON; while ( defined( $pools[$pools_int] ) ) { my %newPool; - + my $pool=$pools[$pools_int]; $pool =~ s/\t/,/g; - $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\,\-\,\-\,/\,0\,0\,/g; $pool =~ s/\%//g; $pool =~ s/\,([0-1\.]*)x\,/,$1,/; - ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); + ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); push(@toShoveIntoJSON, \%newPool); @@ -251,13 +254,19 @@ while ( defined( $pools[$pools_int] ) ) { } $tojson{pools}=\@toShoveIntoJSON; +my %head_hash; +$head_hash{'data'}=\%tojson; +$head_hash{'version'}=2; +$head_hash{'error'}=0; +$head_hash{'errorString'}=''; + my $j=JSON->new; if ( $opts{p} ){ $j->pretty(1); } -print $j->encode( \%tojson ); +print $j->encode( \%head_hash ); if (! $opts{p} ){ print "\n"; From 656412830564593cfefeee5dceeae89bfa371000 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Mar 2019 06:02:43 -0500 Subject: [PATCH 061/332] remove unneeded else statement and re-apply patch --- snmp/zfs-freebsd | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) mode change 100755 => 100644 snmp/zfs-freebsd diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd old mode 100755 new mode 100644 index ee185b6a6..842c255f9 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -157,16 +157,15 @@ my $real_hits = $mfu_hits + $mru_hits; my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; -my $data_demand_percent = 'U'; + +my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $data_demand_percent = $demand_data_hits / $demand_data_total * 100; + $demand_data_hits / $demand_data_total * 100; } -my $data_prefetch_percent; +my $data_prefetch_percent=0; if ( $prefetch_data_total != 0 ) { $data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100; -}else{ - $data_prefetch_percent = 0; } my $anon_hits_percent; From af32f56a74e0d9915b4beb419a28814e9bf058d8 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Mar 2019 06:07:59 -0500 Subject: [PATCH 062/332] merge... and update version --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index 842c255f9..93e162476 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.2.0\n"; + print "FreeBSD ZFS stats extend 0.2.1\n"; } sub main::HELP_MESSAGE { From 38acc2bd3d8e81414b4bfc2cb2bb3e955877fbc1 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 18 Mar 2019 03:39:17 -0500 Subject: [PATCH 063/332] actually make this work on system not FreeBSD and deal with the bug where a connection may not have a protocol --- snmp/portactivity | 47 ++++++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/snmp/portactivity b/snmp/portactivity index 430ae5190..9965e0d69 100755 --- a/snmp/portactivity +++ b/snmp/portactivity @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2018, Zane C. Bowers-Hadley +#Copyright (c) 2019, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -91,7 +91,7 @@ use Parse::Netstat qw(parse_netstat); $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "Port Activity SNMP stats extend 0.0.0\n"; + print "Port Activity SNMP stats extend 0.0.1\n"; } sub main::HELP_MESSAGE { @@ -175,7 +175,7 @@ sub return_json{ } print $j->encode( \%to_return ); - + if ( ! $pretty ){ print "\n"; } @@ -218,6 +218,7 @@ my @protos_array=split(/\,/, $opts{p}); #holds the various protocol hashes my %protos; +my %proto_lookup; #make sure each one specificied is defined and build the hash that will be returned my $protos_array_int=0; @@ -234,27 +235,17 @@ while ( defined( $protos_array[$protos_array_int] ) ){ return_json(\%to_return, $opts{P}); exit 4; } - + + $proto_lookup{ $port } = $protos_array [$protos_array_int ]; + $protos_array_int++; } -my $os=$^O; +my $netstat='netstat -n'; -my $netstat; - -#make sure this is a supported OS -if ( $os eq 'freebsd' ){ - $netstat='netstat -S -p tcp' -}elsif( $os eq 'linux' ){ - $netstat='netstat -n' -}else{ - $to_return{errorString}=$os.' is not a supported OS as of currently'; - $to_return{error}=3; - return_json(\%to_return, $opts{P}); - exit 3; -} +my $os=$^O; -my $res = parse_netstat(output => join("", `$netstat`), flavor=>$os); +my $res = parse_netstat(output => join("", `$netstat`), flavor=>$os, udp=>0, unix=>0); #check to make sure that it was able to parse the output if ( @@ -273,7 +264,9 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ my $conn=$res->[2]{active_conns}[$active_conns_int]; #we only care about TCP currently - if ( $conn->{proto} =~ /^[Tt][Cc][Pp]/ ){ + if ( defined( $conn->{proto} ) && + ( $conn->{proto} =~ /^[Tt][Cc][Pp]/ ) + ){ $protos_array_int=0; my $service; while( @@ -282,8 +275,8 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ ){ #check if this matches either ports if ( - ( $protos_array[ $protos_array_int ] eq $conn->{'local_port'} ) || - ( $protos_array[ $protos_array_int ] eq $conn->{'foreign_port'} ) + ( defined($proto_lookup{ $conn->{'local_port'} }) ) || + ( defined($proto_lookup{ $conn->{'foreign_port'} }) ) ){ $service=$protos_array[ $protos_array_int ]; } @@ -294,7 +287,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ #only handle it if is a service we are watching for if ( defined( $service ) ){ my $processed=0; - + my $state=$conn->{'state'}; #translate the state names if ( $os eq 'linux' ){ @@ -311,7 +304,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ if ( $state ne 'LISTEN' ){ $protos{$service}{'total_conns'}++; } - + #make sure the state is a valid one # if it is not a valid one, set it to other, meaning something unexpected was set for the state that should not be if ( ! defined( $valid_states{$state} ) ){ @@ -338,11 +331,11 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ $protos{$service}{'total_to'}++; $protos{$service}{'to'}{$state}++; } - + } - + } - + $active_conns_int++; } From bdfd0ceea948382684a2bd96659731f9ac5f15b1 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Mar 2019 00:40:06 -0500 Subject: [PATCH 064/332] update the guessing to only use smartctl --scan-open and generate with more complex options --- snmp/smart | 134 +++++++++++++++++++++++++---------------------------- 1 file changed, 64 insertions(+), 70 deletions(-) diff --git a/snmp/smart b/snmp/smart index 44b7a31e7..f6c681350 100755 --- a/snmp/smart +++ b/snmp/smart @@ -73,10 +73,9 @@ my $useSN=1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.0.0\n"; + print "SMART SNMP extend 0.1.0\n"; }; - sub main::HELP_MESSAGE { print "\n". "-u Update '".$cache."'\n". @@ -108,75 +107,70 @@ if ( defined( $opts{g} ) ){ $cache='cache='.$cache."\n"; } - my %found_disks; - - #check for drives named /dev/sd* - my @matches=glob('/dev/sd*'); - @matches=grep(!/[0-9]/, @matches); - my $matches_int=0; - while ( defined( $matches[$matches_int] ) ){ - my $device=$matches[$matches_int]; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; - } - - $matches_int++; - } - - #check for drives named /dev/ada* - @matches=glob('/dev/ada*'); - @matches=grep(!/[ps]/, @matches); - $matches_int=0; - while ( defined( $matches[$matches_int] ) ){ - my $device=$matches[$matches_int]; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; - } - - $matches_int++; - } - - #check for drives named /dev/da* - @matches=glob('/dev/da*'); - @matches=grep(!/[ps]/, @matches); - $matches_int=0; - while ( defined( $matches[$matches_int] ) ){ - my $device=$matches[$matches_int]; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; - } - - $matches_int++; - } + # used for checking if a disk has been found more than once + my %found_disks_names; + my @argumentsA; #have smartctl scan and see if it finds anythings not get found my $scan_output=`$smartctl --scan-open`; my @scan_outputA=split(/\n/, $scan_output); + + # remove non-SMART devices sometimes returned @scan_outputA=grep(!/ses[0-9]/, @scan_outputA); # not a disk, but may or may not have SMART attributes @scan_outputA=grep(!/pass[0-9]/, @scan_outputA); # very likely a duplicate and a disk under another name - $matches_int=0; - while ( defined( $scan_outputA[$matches_int] ) ){ - my $device=$scan_outputA[$matches_int]; - $device =~ s/ .*//; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; + @scan_outputA=grep(!/cd[0-9]/, @scan_outputA); # CD drive + if ( $^O eq 'freebsd' ){ + @scan_outputA=grep(!/sa[0-9]/, @scan_outputA); # tape drive + @scan_outputA=grep(!/ctl[0-9]/, @scan_outputA); # CAM target layer + }elsif( $^O eq 'linux' ){ + @scan_outputA=grep(!/st[0-9]/, @scan_outputA); # SCSI tape drive + @scan_outputA=grep(!/ht[0-9]/, @scan_outputA); # ATA tape drive + } + + # make the first pass, figuring out what all we have and trimming comments + foreach my $arguments ( @scan_outputA ){ + my $name = $arguments; + + $arguments =~ s/ \#.*//; # trim the comment out of the argument + $name =~ s/ .*//; + $name =~ s/\/dev\///; + if (defined( $found_disks_names{$name} )){ + $found_disks_names{$name}++; + }else{ + $found_disks_names{$name}=0; } - - $matches_int++; + + push( @argumentsA, $arguments ); + } - + + # second pass, putting the lines together + my %current_disk; + my $drive_lines=''; + foreach my $arguments ( @argumentsA ){ + my $name = $arguments; + $name =~ s/ .*//; + $name =~ s/\/dev\///; + + if ( $found_disks_names{$name} == 0 ){ + # If no other devices, just name it after the base device. + $drive_lines=$drive_lines.$name." ".$arguments."\n"; + }else{ + # if more than one, start at zero and increment, apennding comma number to the base device name + if (defined( $current_disk{$name} )){ + $current_disk{$name}++; + }else{ + $current_disk{$name}=0; + } + $drive_lines=$drive_lines.$name.",".$current_disk{$name}." ".$arguments."\n"; + } + + } + print "useSN=0\n".'smartctl='.$smartctl."\n". - $cache. - join( "\n", keys(%found_disks) )."\n"; - + $cache. + $drive_lines; + exit 0; } @@ -208,7 +202,7 @@ while ( defined( $configA[$configA_int] ) ){ if ( $var eq 'cache' ){ $cache=$val; } - + if ( $var eq 'smartctl' ){ $smartctl=$val; } @@ -216,11 +210,11 @@ while ( defined( $configA[$configA_int] ) ){ if ( $var eq 'useSN' ){ $useSN=$val; } - + if ( !defined( $val ) ){ push(@disks, $var); } - + $configA_int++; } @@ -267,7 +261,7 @@ while ( defined($disks[$int]) ) { '231'=>'null', '233'=>'null', ); - + my @outputA=split( /\n/, $output ); my $outputAint=0; while ( defined($outputA[$outputAint]) ) { @@ -281,7 +275,7 @@ while ( defined($disks[$int]) ) { my $id=$lineA[0]; # single int raw values - if ( + if ( ( $id == 5 ) || ( $id == 10 ) || ( $id == 173 ) || @@ -319,8 +313,8 @@ while ( defined($disks[$int]) ) { ) { my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; - } - + } + } $outputAint++; @@ -337,7 +331,7 @@ while ( defined($disks[$int]) ) { my $short=scalar grep(/Short/, @outputA); my $conveyance=scalar grep(/Conveyance/, @outputA); my $selective=scalar grep(/Selective/, @outputA); - + # get the drive serial number, if needed my $disk_id=$disk; if ( $useSN ){ From 503fb9f7389d8307074ed856f96a870a0d26dd72 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Mar 2019 02:25:17 -0500 Subject: [PATCH 065/332] tested and it appears to work properly... documentation updated --- snmp/smart | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/snmp/smart b/snmp/smart index f6c681350..2fd3c145a 100755 --- a/snmp/smart +++ b/snmp/smart @@ -38,14 +38,17 @@ will be /etc/snmp/smart.config. Alternatively you can also specific a config via Anything starting with a # is comment. The format for variables is $variable=$value. Empty lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any -line with out a = or # are treated as a disk. +line with out a matched variable or # are treated as a disk. #This is a comment cache=/var/cache/smart smartctl=/usr/local/sbin/smartctl useSN=0 ada0 - ada1 + da5 /dev/da5 -d sat + twl0,0 /dev/twl0 -d 3ware,0 + twl0,1 /dev/twl0 -d 3ware,1 + twl0,2 /dev/twl0 -d 3ware,2 The variables are as below. @@ -54,8 +57,13 @@ The variables are as below. useSN = If set to 1, it will use the disks SN for reporting instead of the device name. 1 is the default. 0 will use the device name. +A disk line is can be as simple as just a disk name under /dev/. Such as in the config above +The line "ada0" would resolve to "/dev/ada0" and would be called with no special argument. If +a line has a space in it, everything before the space is treated as the disk name and is what +used for reporting and everything after that is used as the argument to be passed to smartctl. + If you want to guess at the configuration, call it with -g and it will print out what it thinks -it should be. +it should be. =cut @@ -194,25 +202,30 @@ my @configA=split(/\n/, $config_file); my $configA_int=0; while ( defined( $configA[$configA_int] ) ){ my $line=$configA[$configA_int]; + chomp($line); $line=~s/^[\t\s]+//; $line=~s/[\t\s]+$//; my ( $var, $val )=split(/=/, $line, 2); + my $matched; if ( $var eq 'cache' ){ $cache=$val; + $matched=1; } if ( $var eq 'smartctl' ){ $smartctl=$val; + $matched=1; } if ( $var eq 'useSN' ){ $useSN=$val; + $matched=1; } if ( !defined( $val ) ){ - push(@disks, $var); + push(@disks, $line); } $configA_int++; @@ -238,11 +251,22 @@ if ( ! defined( $opts{u} ) ){ } my $toReturn=''; -my $int=0; -while ( defined($disks[$int]) ) { - my $disk=$disks[$int]; +foreach my $line ( @disks ){ + my $disk; + my $name; + if ( $line =~ /\ / ){ + ($name, $disk)=split(/\ /, $line, 2); + }else{ + $disk=$line; + $name=$line; + } my $disk_sn=$disk; - my $output=`$smartctl -A /dev/$disk`; + my $output; + if ( $disk =~ /\// ){ + $output=`$smartctl -A $disk`; + }else{ + $output=`$smartctl -A /dev/$disk`; + } my %IDs=( '5'=>'null', '10'=>'null', @@ -333,7 +357,7 @@ while ( defined($disks[$int]) ) { my $selective=scalar grep(/Selective/, @outputA); # get the drive serial number, if needed - my $disk_id=$disk; + my $disk_id=$name; if ( $useSN ){ while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { $disk_id = $1; @@ -345,7 +369,6 @@ while ( defined($disks[$int]) ) { .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; - $int++; } if ( ! $noWrite ){ From 544fd8bd6e525b3c29d9965c2b405b39ba49a98d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Mar 2019 02:58:30 -0500 Subject: [PATCH 066/332] update the date --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 2fd3c145a..1f68ccb33 100755 --- a/snmp/smart +++ b/snmp/smart @@ -1,5 +1,5 @@ #!/usr/bin/env perl -#Copyright (c) 2017, Zane C. Bowers-Hadley +#Copyright (c) 2019, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, From 166d1022f3671815af7d2d2cd6ae42b6d5d7f2d0 Mon Sep 17 00:00:00 2001 From: Kovrinic Date: Tue, 9 Apr 2019 22:04:26 -0500 Subject: [PATCH 067/332] Moved sudo command into the snmpd.conf. Added notes for settings to change if using older ZoL zfs. --- snmp/zfs-linux | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 87677d0b5..122eb9bbe 100755 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -109,12 +109,12 @@ def main(args): ## account for variations between ZoL zfs versions proc = subprocess.run(zpool_cmd_list, **std) - if (proc.returncode == 1) and (('root' in proc.stderr) or ('admin' in proc.stderr)): - zpool_cmd = ['sudo'] + zpool_cmd # elevate zpool with sudo - zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] - proc = subprocess.run(zpool_cmd_list, **std) if (proc.returncode == 2): # -p option is not present in older versions + # edit snmpd.conf zfs extend section to the following: + # extend zfs /usr/bin/sudo /etc/snmp/zfs-linux + # make sure to edit your sudo users (usually visudo) and add at the bottom: + # snmp ALL=(ALL) NOPASSWD: /etc/snmp/zfs-linux del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue proc = subprocess.run(zpool_cmd_list, **std) exact_size = False From f69b396c48652d7781c45020a14efbfdf51febcf Mon Sep 17 00:00:00 2001 From: Munzy Date: Tue, 16 Apr 2019 13:50:16 -0700 Subject: [PATCH 068/332] Added Wrapper for SAS drives. --- snmp/smart | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/snmp/smart b/snmp/smart index 1f68ccb33..31afbdc31 100755 --- a/snmp/smart +++ b/snmp/smart @@ -339,8 +339,42 @@ foreach my $line ( @disks ){ $IDs{$id}=$temp; } + + + } + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) + + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct + + if ($line =~ "Elements in grown defect list:"){ + + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[5]; + + # Reallocated Sector Count ID + $IDs{5}=$raw; + + } + + # Current Drive Temperature + # Marking as 194 Temperature_Celsius + + if ($line =~ "Current Drive Temperature:"){ + + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[3]; + + # Temperature C ID + $IDs{194}=$raw; + + } + + # End of SAS Wrapper + $outputAint++; } From 935373ee3c53be5005bf1f9c27d41fab4024f409 Mon Sep 17 00:00:00 2001 From: Munzy Date: Tue, 16 Apr 2019 13:55:34 -0700 Subject: [PATCH 069/332] Removed Excess Spacing --- snmp/smart | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/snmp/smart b/snmp/smart index 31afbdc31..6fde05255 100755 --- a/snmp/smart +++ b/snmp/smart @@ -338,10 +338,7 @@ foreach my $line ( @disks ){ my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; } - - - - + } # SAS Wrapping From 59938039cb08fe60e8f0272178597ea2a6909802 Mon Sep 17 00:00:00 2001 From: Munzy Date: Tue, 16 Apr 2019 13:57:30 -0700 Subject: [PATCH 070/332] Spacing Fixes --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 6fde05255..e31fd8838 100755 --- a/snmp/smart +++ b/snmp/smart @@ -338,7 +338,7 @@ foreach my $line ( @disks ){ my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; } - + } # SAS Wrapping From 230c53a3f3efd785ca2f02b13c6781e62b4a88e9 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 5 May 2019 23:14:58 +0200 Subject: [PATCH 071/332] dhcp-status - seperate binary path configuration from code --- snmp/dhcp-status.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 20bf2b66b..972214718 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -4,6 +4,7 @@ # edit your snmpd.conf add the below line and restart snmpd # # extend dhcpstats /opt/dhcp-status.sh # ################################################################ + FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' BIN_CAT='/usr/bin/cat' BIN_GREP='/usr/bin/grep' @@ -11,6 +12,12 @@ BIN_TR='/usr/bin/tr' BIN_SED='/usr/bin/sed' BIN_SORT='/usr/bin/sort' BIN_WC='/usr/bin/wc' + +CONFIGFILE=dhcp-status.conf +if [ -f $CONFIGFILE ] ; then + . dhcp-status.conf +fi + DHCP_LEASES='^lease' DHCP_ACTIVE='^lease|binding state active' DHCP_EXPIRED='^lease|binding state expired' From dadc369c1cbf94f590114a9c2edd506c5eeb0902 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 1 May 2019 14:55:54 +0200 Subject: [PATCH 072/332] add mdadm support --- qq | 2069 ++++++++++++++++++++++++++++++++++++++++++++++++++++ snmp/mdadm | 104 +++ 2 files changed, 2173 insertions(+) create mode 100644 qq create mode 100755 snmp/mdadm diff --git a/qq b/qq new file mode 100644 index 000000000..58a21ca28 --- /dev/null +++ b/qq @@ -0,0 +1,2069 @@ +commit 3361bf4c3ae5868b00d09215e10359f58a36ac12 +Author: SourceDoctor +Date: Wed May 1 14:55:54 2019 +0200 + + add mdadm support + +commit d822c899a78bdfd1e7d9f4df2bd5cd512b1696bd +Merge: 8fbfbd5 544fd8b +Author: VVelox +Date: Sun Mar 24 03:56:16 2019 -0500 + + Merge pull request #226 from VVelox/smart-update + + SMART monitoring update adding RAID support + +commit 544fd8bd6e525b3c29d9965c2b405b39ba49a98d +Author: Zane C. Bowers-Hadley +Date: Tue Mar 19 02:58:30 2019 -0500 + + update the date + +commit 8fbfbd5b39bbc22ca606327813c4fe54b38e4d30 +Merge: cb04f8c 38acc2b +Author: VVelox +Date: Tue Mar 19 02:53:30 2019 -0500 + + Merge pull request #225 from VVelox/pa-fix + + portactivity fixes + +commit 503fb9f7389d8307074ed856f96a870a0d26dd72 +Author: Zane C. Bowers-Hadley +Date: Tue Mar 19 02:25:17 2019 -0500 + + tested and it appears to work properly... documentation updated + +commit bdfd0ceea948382684a2bd96659731f9ac5f15b1 +Author: Zane C. Bowers-Hadley +Date: Tue Mar 19 00:40:06 2019 -0500 + + update the guessing to only use smartctl --scan-open and generate with more complex options + +commit 38acc2bd3d8e81414b4bfc2cb2bb3e955877fbc1 +Author: Zane C. Bowers-Hadley +Date: Mon Mar 18 03:39:17 2019 -0500 + + actually make this work on system not FreeBSD and deal with the bug where a connection may not have a protocol + +commit cb04f8c0ac148cb2b250d0a408f672db22e99ed5 +Merge: 147cb67 af32f56 +Author: VVelox +Date: Sun Mar 17 23:27:46 2019 -0500 + + Merge pull request #224 from VVelox/zfs-fix + + ZFS-FreeBSD divide by zero fix + +commit af32f56a74e0d9915b4beb419a28814e9bf058d8 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 06:07:59 2019 -0500 + + merge... and update version + +commit 658c3c6ead712837bbb763c6b9ecdd782b043629 +Merge: 6564128 147cb67 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 06:06:57 2019 -0500 + + Merge branch 'zfs-fix' of https://github.com/VVelox/librenms-agent into zfs-fix + +commit 656412830564593cfefeee5dceeae89bfa371000 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 06:02:43 2019 -0500 + + remove unneeded else statement and re-apply patch + +commit 3ce06d6defc63f200f2bbfec7718748c8ec9e832 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 05:55:33 2019 -0500 + + freshly initilized ZFS pulls that are not in use don't have a $data_demand_total + +commit 147cb67824b213045826677946166c8ee807f23c +Author: Tony Murray +Date: Tue Feb 12 20:33:05 2019 -0600 + + Use os-release whenever possible for the distro script (#220) + + Except centos... https://bugs.centos.org/view.php?id=8359 + +commit c9a0d2893e44f89f7c8c9450a9d42438eff1404d +Author: Felicián Hoppál +Date: Mon Feb 11 23:06:57 2019 +0100 + + Fix: zpool list output changed, incorrect values (#219) + + * fix zpool data, output of zpool list -pH changed in freebsd 11 + + * fix zpool data, output of zpool list -pH changed in freebsd 11 + + * bump version + + * version dump to 2 + +commit 3a407e3f721b7677fb2724af736ea87838d4dcc5 +Author: Tony Murray +Date: Thu Jan 17 11:44:02 2019 -0600 + + Update powerdns script to json (#218) + +commit ad300c035a2be4a55553c2994d5ce7ba69d57432 +Author: VVelox +Date: Wed Jan 9 23:41:39 2019 -0600 + + various misc fixes for the postfix poller (#112) + + * update postfix + + * move a few things to reduce the number of changed lines + + * move mself to the end + + * white space cleanup and another small cleanup of $chr + + * use $chrNew instead of $chrC when writing the current values + + * more white space cleanup + + * replace one more missed instance of iuoscp + +commit c40606140114b9059409f17a21b06fe8655b760e +Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> +Date: Thu Jan 10 18:40:40 2019 +1300 + + Fix: InnoDB stat support for MariaDB v10+ (#211) + + * mariadb innodb support for v10+ + + * fix newer innodb insert buffers + + * agent mysql to snmp extend + +commit 6fdaffa1b2ba8c49ed8bd38fb6445335b3146329 +Author: Mike Centola +Date: Thu Jan 10 00:35:28 2019 -0500 + + Added gpsd script for SNMP Extend (#217) + + Fixed Typos + + Fixed another typo + +commit f54c442d06abd7d2112dc4dc5db315524030308c +Merge: 1b90904 107d72e +Author: CrazyMax +Date: Sat Dec 29 22:17:13 2018 +0100 + + Merge pull request #216 from jasoncheng7115/patch-2 + + Added Proxmox VE Versoin support + +commit 1b90904f61c6d4078f2b427e17c82cf1f8b926ba +Author: VVelox +Date: Fri Dec 28 20:10:13 2018 -0600 + + convert the FreeBSD NFS stuff over to JSON and add in lots of sanity (#190) + + * convert fbsdnfsclient over to JSON + + * Convert the server stuff to JSON and fix the output of the client extend. + + * misc. stuff + + * lots of cleanup and sanity added to the FreeBSD NFS scripts + + * fix the #! line + + * update the docs at the top + +commit 5be1b168ba4e03ba3a58b3833a26587474ff7b29 +Author: VVelox +Date: Fri Dec 28 20:08:46 2018 -0600 + + JSON SNMP extend for UPS-APC app. (#189) + + * add snmp/ups-apcups, a Perl rewrite of snmp/ups-apcups.sh to support JSON + + * finish documenting it + + * add version and remove units from the returned values + +commit 107d72e862c2e2a53870272859252a5d39bf8c72 +Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> +Date: Tue Dec 25 09:15:22 2018 +0800 + + Added Proxmox VE Versoin support + +commit 433d744953fa800ce49fa060b141c10663c0b952 +Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> +Date: Sun Dec 16 22:21:00 2018 +0800 + + Added FreeNAS Version support (#215) + + Hi, + + I added FreeNAS version information support, as shown in the figure: + + ![2018-12-15 11 53 31](https://user-images.githubusercontent.com/30381035/50044886-2329a580-00c5-11e9-817c-b89a8374270d.png) + ![2018-12-15 11 53 49](https://user-images.githubusercontent.com/30381035/50044887-2329a580-00c5-11e9-93b4-b140809f84a3.png) + +commit 3c4511d987c2058bd6e8605bb0e87c6fc1d36861 +Merge: ff124a1 dc3d267 +Author: CrazyMax +Date: Fri Dec 14 19:03:01 2018 +0100 + + Merge pull request #214 from dsgagi/patch-1 + + Fix Debian detection on Proxmox - lsb_release binary doesn't exist + +commit dc3d2673ddc86d02ca2cd8d93bbf2fd53ca43c55 +Author: dsgagi +Date: Fri Dec 14 18:49:58 2018 +0100 + + Update distro + + Remove extra white spaces. + +commit 456d2e7672d8532af4df7f6da2b5c18b02778bf7 +Author: dsgagi +Date: Fri Dec 14 18:47:54 2018 +0100 + + Update distro + + Minor changes to the code, for better output. + +commit 5b53ab54c8a6d9f3b81abf42725b5da2b3ebec3d +Author: dsgagi +Date: Wed Dec 12 16:09:25 2018 +0100 + + Update distro + +commit ff124a1358755ceddc0ae6a4187d358da0d54d06 +Author: VVelox +Date: Thu Nov 22 09:04:58 2018 -0600 + + add portactivity SNMP extend (#159) + + * add portactivity SNMP extend in its initial form + + * update for the current json_app_get + + * add version to the returned JSON + + * add basic POD documentation + +commit a827734c0ec0e0cdf5e2a04730ec68dbad3fd477 +Author: gardar +Date: Thu Oct 25 19:19:20 2018 +0000 + + CloudLinux distro detection (#208) + + Added CloudLinux distro detection, previously CloudLinux got identified as RedHat + +commit 8d66211adc47d3bad5dd042e3ddbc59a23a28819 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Thu Oct 25 07:17:42 2018 -0400 + + Fix package manager detection (#204) + + * Fix package manager detection + + * use release file for os detection + + * Use command to to validate package manager type + + * check if exists and the execute permission is granted + + * make script more portable + +commit d49fe954dfdeffbeee091051f1f0c515d020f281 +Author: Félim Whiteley +Date: Tue Oct 23 17:46:54 2018 +0100 + + Add divide by zero check (#191) + + On several servers (Ubuntu 18.04) DEMAND_DATA_TOTAL is 0 currently and is causing an error + + Traceback (most recent call last): + File "/usr/local/bin/zfs-linux", line 178, in + sys.exit(main(sys.argv[1:])) + File "/usr/local/bin/zfs-linux", line 76, in main + DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 + ZeroDivisionError: division by zero + +commit 381cc2466af521772607c682a9a707471a38ff4b +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Tue Oct 23 08:51:12 2018 -0400 + + fix nginx script indentation (#205) + +commit 3dada041e433318592e137678d24c32dd1a134b4 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Thu Oct 18 10:37:10 2018 -0400 + + Fix binary operator expected error (#203) + +commit ccb244aa09de36e4e4dd85120702580144e86383 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:28:39 2018 -0400 + + osupdate script clean up (#199) + + - Change script name for simplify of configuration management orchestration scripts. + - Update code syntax. + +commit f0f34b4a2d1a36836f6bffe4307d5d51524009b4 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:28:07 2018 -0400 + + phpfpmsf script clean up (#198) + + - Change script name for simplify of configuration management orchestration scripts. + - Update code syntax. + +commit e0dcd4a064cedb09241e4af17198bf61e8fd1bf3 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:27:21 2018 -0400 + + nginx script clean up (#197) + + - Change script name for simplify of configuration management orchestration scripts. + - Change 172.0.0.1 to localhost for better nginx handling. + +commit 1c61a96344317c13fce90811c11c0fa4cb7efb36 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:26:45 2018 -0400 + + ntp-client data correction (#196) + + NTP was not displaying data right for linux servers. It was putting the frequency data into the offset data. This was giving bad graphs in the UI. Tested the correction on both RHEL and Debian based operating systems and all passes. + + Remove the .sh to simplify for configuration management orchestration scripts. + +commit 28a2f8ae55db7ca773f881560017b4890bc4bbce +Author: voxnil <14983067+voxnil@users.noreply.github.com> +Date: Mon Oct 15 13:00:16 2018 -0700 + + Update zfs-linux to use env for python + +commit ca7a5cdafe6dd603538aad8f63bc624143f98377 +Author: Brock Alberry +Date: Wed Sep 19 09:09:04 2018 -0400 + + PhotonOS distro detection (#193) + + * PhotonOS distro detection + + Detection before `/etc/os-release` since that is present yet missing the build number. + + * awk detection + + combining https://github.com/librenms/librenms-agent/pull/193 and https://github.com/librenms/librenms-agent/pull/194 + +commit 7542bd26f4c883c7e622056a1a34909d1dc9aa2c +Author: Allison +Date: Tue Sep 18 20:20:23 2018 -0700 + + Update distro (#194) + + Adding full detection for ASUSWRT-Merlin + +commit 7c173b160c5be401fa36d85edf15add61a3146d7 +Author: VVelox +Date: Mon Aug 27 04:03:01 2018 -0500 + + convert all the NTP stuff to JSON (#174) + + This requires https://github.com/librenms/librenms/pull/8571 and is for https://github.com/librenms/librenms/pull/8608 . + + Also converted this to regular sh instead of bash, so it will work on more systems with less dependencies. + + Has been tested as working on DD-WRT and FreeBSD. + +commit 99ad80740cb2fcea1c33e59caf1c05af5a53a14f +Author: VVelox +Date: Sun Aug 19 17:47:07 2018 -0500 + + update for the new json_app_get stuff (#179) + +commit c772ac97d3f5b805c311fd13d924513b4561d10b +Author: crcro +Date: Fri Aug 10 00:44:02 2018 +0300 + + added rockstor nas distro detection (#187) + +commit c535b1286c7701a2cefcd10ffd799fba65e56dd2 +Author: TheGreatDoc <32565115+TheGreatDoc@users.noreply.github.com> +Date: Thu Jul 19 22:39:08 2018 +0200 + + Asterisk Script (#183) + + Asterisk App support. + - Channels + - Calls + - Total SIP Peers + - Monitored Online + - Monitored Offline + - Unmonitored Online + - Unmonitored Offline + +commit 7e55d1cd5db04019de09aff7b134a85df71e901a +Author: István Sárándi +Date: Mon Jun 25 16:10:00 2018 +0200 + + Update fail2ban extend script to new JSON format (#181) + + As seen at [this location](https://github.com/librenms/librenms/blob/7fab99cfc13b80a543fb779d68c659b52fc074b1/includes/polling/functions.inc.php#L768) the JSON output needs to contain a `data` field. The poller php script actually also extracts this `data` field as one of the first steps, see at [this line](https://github.com/librenms/librenms/blob/c3007b483a12758042e5d0c6009a8ef48e3e1a39/includes/polling/applications/fail2ban.inc.php#L36). + Before I changed these parts the graph didn't show up because the RRD files simply weren't generated as an exception occurred in the poller. This fixes this problem. + +commit b5d77f1a999c5e0f08bc02550fd24e7c37b759c7 +Author: VVelox +Date: Mon May 28 07:22:09 2018 -0500 + + convert fail2ban-client to JSON (#172) + + * convert to JSON + + * add version return + + * change the version number of the returned data to 1 + +commit 41d36dc97f6886bae4ae6e8ba928892ef9d3c8c3 +Author: VVelox +Date: Fri Apr 27 16:46:57 2018 -0500 + + make using SN or device name selectable for SMART reporting (#168) + + * make using SN or device name selectable + + * change the default to SN + +commit 385d466eee1adc06eecd4a84cfd6615f2e4ba2ec +Author: Sander Steffann +Date: Fri Apr 13 17:42:27 2018 +0100 + + Add random entropy monitoring (#173) + +commit a56adb467a1cdf9785f977420dd07a48335f41b3 +Author: Serphentas +Date: Wed Apr 11 10:39:32 2018 +0200 + + add zfs support for linux (#170) + + * add zfs support for linux + + * fix pools and anon_hits_per + + * strip percent sign for pool cap + + * fix anon_hits json key typo + + * fix demand_data_hits json key typo + + * fix comparison as in #169 + + * fix min_size_percent + +commit 8ec6017246edc9784e670d84bd8b52ec094dbb82 +Author: VVelox +Date: Wed Apr 11 02:34:39 2018 -0500 + + correct arc size breakdown (#171) + +commit 3ddb1d6be6b4a4a0cd006251b497bb1ccf8170e8 +Author: VVelox +Date: Tue Apr 10 22:04:07 2018 -0500 + + correct arc size breakdown + +commit 90fd6f60f3aed5f71140d23a8d022ae9909e7473 +Author: Dylan Underwood +Date: Fri Mar 23 11:24:02 2018 -0500 + + Should be greater than or equal to (#167) + +commit 3a8462461595535a53554b0ad66bc922118e83d1 +Author: endofline +Date: Tue Feb 27 23:10:35 2018 +0200 + + Replace disk identifier with disk serial in S.M.A.R.T snmp script (#164) + +commit bbd3b1309aaa3ecaf6f502e92718719539715c58 +Author: endofline +Date: Sun Feb 18 22:33:42 2018 +0200 + + Fix Command_Timeout missing from SMART output (#163) + +commit fd9fd178a4b43feafb414822167b3033693c8efc +Author: crcro +Date: Sat Jan 6 22:06:45 2018 +0200 + + extend: powerdns-dnsdist (#158) + + * powerdns-dnsdist app + + * fix script in help + + * removed local data manipulation + + * again name of file in script help + + * removed personal api info + +commit bacaca0be4104cc003222b941e433d5470cae76d +Author: VVelox +Date: Sat Dec 30 05:42:37 2017 -0600 + + ZFS SNMP agent :3 <3 (#156) + + * Add it as it currently is. Needs to be moved over to JSON + + * rename it to zfs-freebsd as it is FreeBSD specific + + now uses JSON + + * misc. updates and document it all + + * minor spelling correction + +commit c7cae0765e0f5072fdf3dd224f357290e2697fb5 +Author: VVelox +Date: Sat Dec 30 05:39:36 2017 -0600 + + update the fail2ban stuff (#155) + + Dropping firewall checking as the new fail2ban uses pf and anchors on + FreeBSD, which while esoteric as fuck works nicely and is reliable. + +commit 8920cd3f290e8c13a3bb7db96ceb8db05845869d +Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> +Date: Wed Dec 13 16:13:10 2017 +1300 + + freeradius.sh: new agent for incoming main PR (#151) + + * Update os-updates.sh + + * Update os-updates.sh + + * Update os-updates.sh + + * Create freeradius.sh + + * Update freeradius.sh + + * Update freeradius.sh + +commit 3b9d632a8d6dbd6ac3f42f75ba36faa235ef4440 +Author: arrmo +Date: Mon Dec 4 14:11:17 2017 -0600 + + hddtemp, ignore devices not supporting SMART (#153) + +commit 7fb48df8579a8e113153c1439a4fa92829847d9f +Author: Daniel Bull +Date: Fri Oct 27 06:41:05 2017 +0100 + + Fix: Apache SNMP extend IndexError (#116) + + See issue for more information: + https://github.com/librenms/librenms-agent/issues/95 + +commit 2996ad88b00f24777c0e5629cb931b8b448dd515 +Author: dragans +Date: Fri Oct 27 07:39:09 2017 +0200 + + fix: Update mysql (#127) + + Update mysql agent script based on updated changes in newest version of Percona Monitoring Plugins (Cacti template). + + Changes enable correct parsing of status data for newer versions of MySQL/MariaDB database servers and should be backward compatible with older versions. + +commit d0762871b4cfb0a7cbfcc5ba99bc1fe0b0c51cf3 +Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> +Date: Tue Oct 10 08:02:05 2017 +1300 + + os-update.sh: back to package management based and count fixes (#149) + + * Update os-updates.sh + + * Update os-updates.sh + + * Update os-updates.sh + +commit 6a40ca1e9cc4319e6b7363541feb9681dcf5bc5f +Author: tomarch +Date: Wed Sep 20 21:47:11 2017 +0200 + + fix munin agent (#148) + + Without the full munin-scripts path, this script won't find munin file and return nothing. + +commit 1b03d2f9f74ca29b177e596c0ff2ba13a0e1292d +Author: Uwe Arzt +Date: Wed Sep 6 20:42:58 2017 +0200 + + Add Oracle Linux Distribution to distro script (#146) + + * Add Oracle Linux to distro script + + * Revert local change + +commit 45478555df856af51e707c3cd6ace716c709e0fb +Author: arrmo +Date: Sun Aug 27 14:59:15 2017 -0500 + + Update Distro, for Raspbian Support (#144) + +commit 3380a85ff13f0dad706690b71b2bd8e9d9452926 +Author: Zucht +Date: Sat Aug 12 17:30:02 2017 +0200 + + Update raspberry.sh (#143) + + Fix state WMV9 + +commit a50e1dffb89738814a1183e2e0560ab86daaf3f0 +Author: Neil Lathwood +Date: Thu Aug 3 17:11:26 2017 +0100 + + Update raspberry.sh (#140) + +commit 584fd645d470e85e30607b8be3102292b4a7b54e +Author: drid +Date: Wed Jul 12 22:55:02 2017 +0300 + + C.H.I.P. power values (#134) + + * C.H.I.P. power values + + * Added attribution + + * Fix ACIN current calculation + + * Battery current fix + +commit 3f9dc0f5f02c1590d6e84ac10c6f7c973d54f771 +Author: RedChops +Date: Thu Jun 29 16:11:26 2017 -0400 + + Fix for bug https://github.com/librenms/librenms/issues/6821 (#138) + +commit a4efb62466c58ee05b3c078283a2a9fecb7cd3ce +Author: Stefan Funke +Date: Wed Jun 28 22:36:26 2017 +0200 + + unnecessary use of wc while already calling grep (#137) + + * useless call of wc while already calling grep + + * move grep count call to CMD_GREP to stay in project style + +commit cc6d7882dba89bce0a1f3a27d9fd3b399a2430b9 +Author: einarjh +Date: Sat Jun 10 11:20:48 2017 +0200 + + Strip all non-ASCII characters from hddtemp output (#136) + +commit 3903f431f7f56ef4f48bd50d28c05aec8e795bc0 +Author: crcro +Date: Tue Jun 6 01:00:29 2017 +0300 + + bash script for pi-hole app (#135) + +commit 84630dfb84539936efa47bfe3b13638f809a82c5 +Author: Félim Whiteley +Date: Wed May 31 22:23:38 2017 +0100 + + Fix for first line as localhost (#130) + + An example output like below where the first line of output is just "localhost" so it causes the splitting to cause an out of index error. + + Example: + cat /tmp/apache-snmp + localhost + ServerVersion: Apache/2.4.25 (Ubuntu) PHP/5.6.30-5+deb.sury.org~trusty+2 + ServerMPM: prefork + Server Built: 2016-12-21T00:00:00 + CurrentTime: Thursday, 18-May-2017 19:26:43 EDT + RestartTime: Thursday, 18-May-2017 11:35:48 EDT + ParentServerConfigGeneration: 2 + ParentServerMPMGeneration: 1 + ServerUptimeSeconds: 28255 + ServerUptime: 7 hours 50 minutes 55 seconds + Load1: 0.04 + Load5: 0.05 + Load15: 0.10 + Total Accesses: 5367 + Total kBytes: 61432 + CPUUser: 19.69 + CPUSystem: 1.05 + CPUChildrenUser: 0 + CPUChildrenSystem: 0 + CPULoad: .0734029 + Uptime: 28255 + ReqPerSec: .189949 + BytesPerSec: 2226.38 + BytesPerReq: 11721 + BusyWorkers: 1 + IdleWorkers: 6 + Scoreboard: ___....._.__.W........................................................................................................................................ + +commit 16178c6ac31ed2511243ccfab5b25b69b031d3fa +Author: Aldemir Akpinar +Date: Thu Jun 1 00:23:07 2017 +0300 + + Added devuan support for os-updates.sh and removed code repitition (#131) + +commit f473c5e30ca0649baa590dd5a7f041ce91f57e73 +Author: BlackDex +Date: Tue May 23 14:44:05 2017 +0200 + + Added try-except checks for global values. (#107) + + Fixed an error which prevented output. + It seems some ceph version probably use different values or something. This is a quick fix to have the script output the correct values. + +commit 6fdcc91f7041ad49cbb906b814a1b5ecf8fd2e4c +Author: Karl Shea +Date: Thu May 4 02:06:32 2017 -0500 + + Fix bind config file read (#125) + +commit e3dad6cfc9c6549e1f5cfef41ef2cf20a9827352 +Author: VVelox +Date: Wed May 3 09:23:40 2017 -0500 + + BIND cleanup and expansion (#108) + + * add BIND named SNMP extend + + * nolonger piss the entire stats across the wire, but crunch them and return them + + * more work on bind + + * more misc. updates + + * add proper agent support as well as optional zeroing + + * add -m + +commit 69eee9fb898bd521e3f4ab5d2d93cf5b34949e1d +Author: Aldemir Akpinar +Date: Tue May 2 12:22:19 2017 +0300 + + Added Devuan GNU/Linux support (#124) + +commit eaa6af235978405418d8e6d6e0beb04f761a578b +Author: crcro +Date: Thu Apr 27 22:54:55 2017 +0300 + + snmp-extend: sdfsinfo (#122) + + * sdfsinfo app snmp extend + + * rewrite script to bash + + * more vars + +commit 69e1ace889cfee6963cc6506a5e96fb30cabac1b +Author: RedChops +Date: Sat Apr 22 19:29:00 2017 -0400 + + Include missing SMART ids in the output (#120) + +commit 705cc0f3fe62e4837ecf4be86dec95558ca07ff3 +Author: Svennd +Date: Tue Apr 18 22:34:05 2017 +0200 + + add support for SGE/rocks job tracker (#118) + +commit d7085e001cebf0bf086b84ac0c65cad54f90ee38 +Author: Chris Putnam +Date: Tue Apr 18 13:32:41 2017 -0700 + + hddtemp: parallelize calls to hddtemp for performance (#117) + + This poll script runs hddtemp with a list of all drives as arguments and reads the output. hddtemp scans each drive's SMART status serially, which scales poorly with a large number of drives. + + In lieu of a patch to the actual hddtemp project, optionally use GNU parallel when available to parallelize the call to hddtemp. + + In testing a machine with 58 drives I went from a runtime of about 5 seconds per run to 0.5s, a performance improvement of 10x. + +commit 5f47aad492a679a81da0a19f2649f60d6637e199 +Author: Chris Putnam +Date: Fri Apr 7 01:45:56 2017 -0500 + + hddtemp: improve detection of drives (#115) + + Previously, this script was only able to find 26 drives (sda-sdz) due to the use of globbing. + + A better strategy for detecting drives would be to use lsblk on systems that support it, failing over to globbing. + + This patch adds support both for lsblk and a more comprehensive glob solution with find that will at least catch 26^2 drives. + +commit 67bae5a86cfe47c90ade541c1e613f7e5e788cfd +Author: Philip Rosenberg-Watt +Date: Thu Apr 6 03:24:36 2017 -0600 + + fix: Update proxmox agent to use new Perl module (#88) + + PVE::API2Client is deprecated in Proxmox 4.4-6. Agent now requires + installation of libpve-apiclient-perl via apt. + + This commit fixes #81. + +commit a7fe1f8e6f98640463a93f934ac2580311db09ca +Author: Tony Murray +Date: Wed Mar 29 19:11:23 2017 -0500 + + Copy powerdns-recursor to snmp and remove <<>> header (#111) + +commit 74faec281c13928e60c140d85bb3138e7297fa79 +Author: Florian Beer +Date: Thu Mar 30 00:00:26 2017 +0200 + + Postfix app bug fixes (#105) + + * Postfix app bug fixes + + - add missing DS + - fix some totals + + * Move new variable to end of output + +commit 1e7762fb4eb832ed9d7530994804a284028c9c7c +Author: VVelox +Date: Wed Mar 22 09:28:57 2017 -0500 + + add SMART SNMP extend script (#101) + + * add SMART SNMP extend + + * cleanup default disk examples + + * correct a small typo + + * add option caching support + + * add checking selftest log and nolonger zeros non-existent IDs + + * now uses a config file + + * add the ability to guess at the config + + * properly remove device entries with partitions now and avoid adding dupes in a better manner + + * now have smartctl scan as well to see if it missed anything + + * note why ses and pass are ignored + + * properly use the cache file in the config now + + * actually use the cache now + +commit 94aa0feacdfc71b6d8044c66992069538071ca39 +Author: VVelox +Date: Sun Mar 19 13:03:59 2017 -0500 + + add unbound SNMP extend script (#102) + +commit 495f46afb431a0ef29fe58c40a01c7f9d352c3d5 +Author: Tony Murray +Date: Fri Mar 10 06:29:19 2017 -0600 + + Update mysql script to php7 version... (#104) + + * Update mysql script to php7 version... + + * Update mysql + +commit 61579bf0ace0a963f6ffbf9ca263910c5f6614fe +Author: Tuxis Internet Engineering V.O.F +Date: Wed Mar 8 09:51:04 2017 +0100 + + Enable ipv6 in Xinetd (#100) + + * Fix indenting and enable IPv6 in Xinetd + + * Fix changelog + + * Typo + +commit 7f79fc4167adac967d89d0ee6277f78886a5c7b9 +Author: Tony Murray +Date: Tue Mar 7 23:48:15 2017 -0600 + + Update mysql + +commit 1b1d8b491f842edc3e04c5405ae13de4f60a6751 +Author: VVelox +Date: Tue Mar 7 23:40:09 2017 -0600 + + clean up snmp/mysql_stats.php and make it a proper snmpd extend script now (#99) + + * cleanup and make it something that can properly be invoked via cli + + * blank the user/pass/host bits increasing the chances it will work out of the box + + * Update mysql_stats.php + + * Update mysql_stats.php + + * Update mysql_stats.php + + * Update mysql_stats.php + + * Rename mysql_stats.php to mysql + +commit e7c331070100290b3780ba6907add81be82165c6 +Author: VVelox +Date: Fri Mar 3 14:41:38 2017 -0600 + + add Nvidia SNMP extend poller (#94) + + * add Nvidia SNMP extend + + * update the extend path + + * now support more than 4 GPUs + + this will now support how ever many GPUs are installed on a system... + + Just double checked and it appears nvidia-smi dmon only reports up to 4 GPUs at a time... so if we have more than 4, begin checking they exist and if so print them + +commit 2308481188f72bbad12d7d94ebd941a73fc97655 +Author: VVelox +Date: Fri Mar 3 12:55:55 2017 -0600 + + add squid snmp extend (#93) + +commit 2700598925c8481641def507a4bf902a27cb01af +Author: VVelox +Date: Fri Mar 3 08:49:15 2017 -0600 + + FreeBSD NFS extends (#90) + + * add the FreeBSD NFS client and server extends + + * white space cleanup + + * white space cleanup + +commit db3b5c7cec8fa35832739e742c84fa61e465bd9f +Author: VVelox +Date: Wed Mar 1 17:46:13 2017 -0600 + + add Postgres SNMP extend (#91) + + * add Postgres SNMP extend + + * minor comment cleanups + + * use env for check_postgres.pl + + * quote the string + +commit 42e488743917fd39019ac9300caf391a5a8120c8 +Author: VVelox +Date: Wed Mar 1 12:35:06 2017 -0600 + + add detailed Postfix poller (#92) + + * add detailed postfix poller + + * env perl + +commit c4101c9ef2a8e8dffbfaee55f067c7c89fe18e27 +Merge: bb4c67b 8343e7f +Author: Tony Murray +Date: Fri Feb 24 11:10:43 2017 -0600 + + Merge pull request #84 from VVelox/master + + add a snmpd extend script for fail2ban + +commit 8343e7f34e1c382051f65bb9d7cf5bad454b934e +Author: Tony Murray +Date: Fri Feb 24 11:09:21 2017 -0600 + + Update fail2ban + +commit 4fcce9f01dd5b0c7979a2ebc95298ff40239bfd9 +Author: Tony Murray +Date: Fri Feb 24 11:02:19 2017 -0600 + + Redefining $firewalled + +commit 8bfbce68e503b2ddcdcc9619307d168b1c332df3 +Author: VVelox +Date: Thu Feb 23 09:54:38 2017 -0600 + + if cache older than 360 seconds, don't use it + +commit 0a78888889d1e67e5696bb59e2c8fff4fd76f9ff +Author: VVelox +Date: Thu Feb 23 09:13:59 2017 -0600 + + use $f2bc for getting jail status now and not just only overall status + +commit 1e160b86e46ff7023ea13d8de13fe98e52e3b270 +Author: VVelox +Date: Thu Feb 23 08:46:18 2017 -0600 + + don't reuse the variable $iptables + +commit 4b53918a7d09dc705c761c6eba3d0b68caca7159 +Author: VVelox +Date: Thu Feb 23 08:45:04 2017 -0600 + + poke the user about setting a iptables path as well + +commit 90620a8558e0b164fb2a714c007b14b1ba1b1567 +Author: VVelox +Date: Thu Feb 23 08:40:59 2017 -0600 + + misc. path cleanups + +commit 5ee0faa2c38e887b61b34fd4140ae23a8583d350 +Author: VVelox +Date: Wed Feb 22 21:58:03 2017 -0600 + + make caching optional + +commit 4ffd86f8bdbe8825ac0793c1cf0b86a886656f34 +Author: VVelox +Date: Wed Feb 22 21:42:53 2017 -0600 + + Update fail2ban + +commit 0227094c6fc9cf31d7d5f9a45a63e093b6e38aa5 +Author: VVelox +Date: Mon Feb 20 13:18:50 2017 -0600 + + track both firewall and fail2ban-client + +commit 3932875ce04c1b51b8bf4c43c9934f2b29800acb +Author: VVelox +Date: Mon Feb 20 03:50:59 2017 -0600 + + correct a comment + +commit c367e9ff9d61f9cee619c19278a2bdc6d8fc7637 +Author: VVelox +Date: Mon Feb 20 03:49:50 2017 -0600 + + now requires cron usage + +commit d90f3e879200108794beb7a2a4cc047f2938899e +Author: VVelox +Date: Sun Feb 19 23:41:51 2017 -0600 + + use fail2ban-client instead + +commit 710f38e8ff7cee520f9c7cc8ada421b6f32684c5 +Author: VVelox +Date: Sat Feb 18 00:09:12 2017 -0600 + + move this over to perl and properly check iptables + +commit 6f76427952194ca6036181c31402887e72317308 +Author: VVelox +Date: Fri Feb 17 23:08:53 2017 -0600 + + remove pointless exit + +commit 4b600ad2b41be4f338f1745320b3dbd64c5f5ba9 +Author: VVelox +Date: Fri Feb 17 23:08:25 2017 -0600 + + whoops, forgot to remove \ as well + +commit bb4c67b217fc6f553c36861d4da0c5edfd61913c +Merge: ff6ee0e e3f3bd3 +Author: Tony Murray +Date: Fri Feb 17 22:42:57 2017 -0600 + + Merge pull request #86 from florianbeer/patch-1 + + Update shebang + +commit 6955e5d410f87be4423ac86111841721292911fd +Author: VVelox +Date: Fri Feb 17 10:33:02 2017 -0600 + + don't assume it appends the jail name + +commit 8b78f863d34f24858ca3d061df02efa6213d3b3b +Author: VVelox +Date: Fri Feb 17 10:32:27 2017 -0600 + + update to check fail2ban and f2b + +commit e3f3bd3efb36ee391430d61d363afa1e8d322ae3 +Author: Florian Beer +Date: Fri Feb 17 16:37:00 2017 +0100 + + Update shebang + + With the original shebang this script didn't work on Debian and Ubuntu machines. Using `/usr/bin/env bash` makes the script more portable. + +commit ff6ee0e2bc0e84ce1b0c4276713f8cb70d3154a2 +Author: Robert Verspuy +Date: Fri Feb 17 01:46:13 2017 +0100 + + Fixed correct amount of pipeline symbols when degrees symbol is missing. (#85) + + When the script is called through xinetd/check_mk (on my system), there is no degree symbol, but a space. + Changed the script to handle both correctly + +commit 21c953d11af41e1256ecf92070fc36b999b1e084 +Merge: 1ec8f20 58d1726 +Author: kitsune +Date: Thu Feb 16 11:30:03 2017 -0600 + + Merge branch 'master' of https://github.com/librenms/librenms-agent + +commit 1ec8f204ee0c96ca0a9cf77dff7bdb0f79402462 +Author: VVelox +Date: Thu Feb 16 10:50:46 2017 -0600 + + add fail2ban snmpd extend script + +commit 58d17264c7d57978a408b800084f153857d3b3f9 +Author: rockyluke +Date: Thu Feb 16 16:12:01 2017 +0100 + + Use command -v instead binary path (#80) + +commit 60becc9b3a9429a42faae18440821b90ac6586fc +Author: VVelox +Date: Thu Feb 16 09:10:53 2017 -0600 + + add a single pool php-fpm net-snmpd extend poller (#83) + +commit 677bd4187e90211a70419e01fe97a809c6cabfd0 +Author: VVelox +Date: Wed Feb 15 11:37:18 2017 -0600 + + add a single pool php-fpm net-snmpd extend poller + +commit 575956cae3ea5fcb014db3777a83e52026f95baa +Author: crcro +Date: Fri Jan 20 10:47:30 2017 +0200 + + snmp: exim-stats (#79) + + * exim-stats frozen mails + + * added total queue info + +commit d090686b722a1b0d8ded3ebfedec5c3b0f8a46a3 +Merge: ae43e5f dc60463 +Author: Tony Murray +Date: Wed Dec 14 16:39:59 2016 -0600 + + Merge pull request #75 from bungojungo/master + + Added fedora support to distro/os-updates + +commit ae43e5f493941aab81c96e3dc9378da434b55ce6 +Merge: 6c130ea de1f177 +Author: Tony Murray +Date: Wed Dec 14 16:39:47 2016 -0600 + + Merge pull request #73 from paulgear/master + + Make ups-nut work on Debian Jessie + +commit 6c130ea65e191d76a12b7d6d31d4726937b0f3e4 +Merge: e527768 3d061d2 +Author: Tony Murray +Date: Wed Dec 14 16:34:10 2016 -0600 + + Merge pull request #76 from murrant/powerdns-python26 + + Support python2.6 for powerdns scripts + +commit 3d061d24079d0dcb7458a75b3d83d5aaba43acc9 +Author: Tony Murray +Date: Wed Dec 14 16:27:15 2016 -0600 + + Support python2.6 for powerdns scripts + fixes #67 + +commit dc604636bccd8779bd261b013af4872cad14e1f0 +Author: Jason Scalia +Date: Wed Dec 7 22:11:48 2016 -0500 + + added fedora/dnf support + +commit 8b3ca2dac293ef132f1e48afa871b7158d692d90 +Author: Jason Scalia +Date: Wed Dec 7 21:48:22 2016 -0500 + + Added fedora support + +commit de1f1775cc26aacb931141182c212de706b80b5f +Author: Paul Gear +Date: Sat Dec 3 14:16:54 2016 +1000 + + Restore previous default UPS name + +commit 465ec12dd4757baa95560b11f89a433f05fb7454 +Author: Paul Gear +Date: Sat Dec 3 14:07:02 2016 +1000 + + Make ups-nut work on Debian Jessie + + This script was broken on Debian Jessie (and probably + all other Debian-based distros, including Ubuntu). + This commit removes the hard-coding of paths and uses + $PATH per normal bash practice, and should work on a + wider range of shell variants. + +commit e52776889cea5e3379422ce4ffb7171bba4fbdf1 +Author: arrmo +Date: Sat Nov 26 02:12:41 2016 -0600 + + Update to Distro (to support dd-wrt) (#72) + +commit c5fea261dea71cc9600936455bdf357cc062b220 +Author: Mathias B +Date: Thu Nov 17 09:31:56 2016 +0100 + + Add Debian support (#71) + + Before that only Ubuntu was supported, now Debian users can use this nice script too! + +commit 36ed3f008c6f2a0cc0be0cdb1ce9199a6e495dbc +Author: Karl Shea +Date: Sat Oct 8 15:26:07 2016 -0500 + + Agent: script to collect data from GPSD (#69) + +commit 91c251fd94d73f44e8757b242db82ed240f80a1d +Author: Tuxis Internet Engineering V.O.F +Date: Wed Oct 5 11:06:48 2016 +0200 + + fix: a dirty hack to prevent failing of stats when the cluster is rebuilding (#68) + + because Ceph returns '-inf' which the json decompiler doesn't seem to get.. + +commit dd365168a5eedf655d87e34e89664b191f855a15 +Author: crcro +Date: Mon Oct 3 21:27:56 2016 +0300 + + fix conflict (#66) + +commit 58e16b794a0e33d0dd71d8c1f936bc8b25ad7ced +Author: crcro +Date: Sun Sep 25 16:28:37 2016 +0300 + + snmp-extend: os-updates (#65) + + * reverted back to os-release checks, added arch pacman + + * fixed file name + +commit 2699cde73fcbca9e556a762dcfd90c81e5561d26 +Author: crcro +Date: Sun Sep 25 16:28:00 2016 +0300 + + snmp-extend: ups-apcups (#58) + + * snmp-extend-ups-apcups + + * rewrite of apc ups + + * header fix + + * header fix + +commit fa308bfe3f388f110e9df083d6b2c649fa69472e +Author: crcro +Date: Sat Sep 24 20:30:09 2016 +0300 + + snmp-extend: ups-nut update 1 (#63) + + * new code for better matching, snmp-extend compliance + + * removed unused vars + + * extra fixes + + * removed the need of tmp file + + * removed charge_low, deemed useless by user + + * removed values that are not plottable + + * readded ds + +commit f63c4ab7bea382b08d0450b42a374db082ccd0ef +Merge: c1c537e d9f36a8 +Author: Tony Murray +Date: Mon Sep 12 22:01:51 2016 -0500 + + Merge pull request #61 from crcro/app-ntp-server-update-1 + + app: ntp-server update 1 + +commit c1c537eea11fde70435e88b28b17292dc7c72f75 +Merge: 9a2716d 11a9fce +Author: Tony Murray +Date: Mon Sep 12 22:01:24 2016 -0500 + + Merge pull request #57 from crcro/snmp-extend-ups-nut + + snmp-extend: ups-nut + +commit 9a2716dc83ad11462495e5ee804fb122eb402faa +Merge: 87cc835 85ae77c +Author: Tony Murray +Date: Mon Sep 12 19:33:07 2016 -0500 + + Merge pull request #60 from crcro/remove-ntp-php + + remove obsolete ntp scripts + +commit d9f36a84b13dd42361d24df11d6cb60c7b71f260 +Author: crcro +Date: Mon Sep 12 12:48:17 2016 +0300 + + cleaner code + +commit 28cae5cff3b87532fd145c55de5b22aa0f4c6d05 +Author: crcro +Date: Mon Sep 12 11:52:13 2016 +0300 + + better handling default case + +commit aeecb1621c8ed5863d5c7563ffc96047909b8cfa +Author: crcro +Date: Mon Sep 12 11:45:14 2016 +0300 + + header fix + +commit f48f4cc6e513773fac094d6b3115954deaeacbc7 +Author: crcro +Date: Mon Sep 12 11:43:34 2016 +0300 + + update 1 ntp server + +commit 87cc835096ffdd4f8310b51e684f63aa7726d14d +Author: crcro +Date: Sat Sep 10 19:08:03 2016 +0300 + + os-updates.sh clean (#59) + +commit 85ae77c01c28308dd1f58b897aa7c8efe5b87386 +Author: crcro +Date: Sat Sep 10 04:50:33 2016 +0300 + + remove obsolete ntpd-server.php + +commit 262f798a9737a5b62bef0ab7a657782a934b86ac +Author: crcro +Date: Sat Sep 10 04:48:55 2016 +0300 + + remove obsolete ntp-client.php script + +commit 11a9fcef62571e12168b8c1e9d1ac604b65c227d +Author: crcro +Date: Fri Sep 9 15:36:01 2016 +0300 + + snmp-extend-ups-nut + +commit 6128dc3c7133802ff66b199bc99289fb07761d6e +Author: vectr0n +Date: Fri Sep 9 02:16:28 2016 -0400 + + Update hddtemp to include hddtemp -w option (#56) + + hddtemp gives inconsistent values in it's current state, after some debugging I was able to resolve the issue by passing -w to the hddtemp command, this will wake-up the drive if it is in a sleep state to gather information. + +commit 42bc0a07aab450e242471e271380fc29642b34e7 +Author: crcro +Date: Wed Sep 7 22:37:31 2016 +0300 + + ntp-client app using shell only, tested with ntpq 4.2.8p8 (#54) + +commit 718d627cfdbad19848a384fc8eaba332dcaef504 +Author: crcro +Date: Wed Sep 7 22:37:23 2016 +0300 + + app: ntp-server (#55) + + * ntp-server app using shell only, tested with 4.2.8p8 + + * fix for higher stratum value + + * change the description in comment to reflect latest webui push + +commit 351e5aa7bc6f1a79d51b1bd098cace659c1b0e9f +Author: Tatermen +Date: Sun Aug 28 20:06:04 2016 +0100 + + Freeswitch (#53) + + feature: Added freeswitch support + +commit 839b518358d2acb488c3d7709e12392ee2b4c224 +Merge: 6a84755 561efa4 +Author: Neil Lathwood +Date: Tue Aug 23 21:48:08 2016 +0100 + + Merge pull request #52 from murrant/move-scripts + + SNMP extend scripts from the main repo + +commit 561efa41be5e22614912300ac9242582340e0662 +Author: Tony Murray +Date: Mon Aug 22 21:35:13 2016 -0500 + + SNMP extend scripts from the main repo + +commit 6a84755105f651d03939310b4bd5a3cd85dc90dd +Merge: c2e4c33 deb3683 +Author: Tony Murray +Date: Sun Aug 21 19:58:13 2016 -0500 + + Merge pull request #51 from crcro/dhcp_pretty + + rewrite dhcp-stats with loop + +commit deb36833f17d31ddd6176aa7dfc3767817e7c446 +Author: crcro +Date: Mon Aug 22 01:45:23 2016 +0300 + + @paulgear recomandation + +commit c2e4c33abf5edbc0b7a5a00f8871f87d4d0f0513 +Merge: 672918c 9cd81f1 +Author: Tony Murray +Date: Wed Aug 17 09:59:11 2016 -0500 + + Merge pull request #50 from OpcaoTelecom/unbound + + Added unbound stats script + +commit 9cd81f1b930e2ed777ecf3bf6c7deff65df6e564 +Author: Alan Gregory +Date: Wed Aug 17 09:36:39 2016 -0300 + + Added unbound stats script + +commit 672918c40fd87455398267cbf744a52362f738a7 +Merge: 9fe5444 87584e7 +Author: Tony Murray +Date: Tue Aug 16 12:43:10 2016 -0500 + + Merge pull request #48 from crcro/raspberry-sensors + + raspberry sensors + +commit 9fe5444738d086b1d33f92ca0e5905a14cd9c8a0 +Merge: c3afbf3 b6bdb9e +Author: Tony Murray +Date: Mon Aug 15 22:57:52 2016 -0500 + + Merge pull request #49 from murrant/ntp + + Copy ntp scripts from the main repo. + +commit b6bdb9ea45d579becc8f858090e8b7d3e4c809ea +Author: Tony Murray +Date: Mon Aug 15 22:56:31 2016 -0500 + + Copy ntp scripts from the main repo. + +commit 87584e7ef79996db60cd62e64dd4cbaf53a0bac8 +Author: crcro +Date: Sun Aug 14 17:43:27 2016 +0300 + + added snmp extend to get raspberry sensors + +commit c3afbf35bd81bff0dbcdb67e6657dd042ae67588 +Merge: 9623342 aa59548 +Author: Neil Lathwood +Date: Tue Aug 9 19:47:51 2016 +0100 + + Merge pull request #45 from murrant/os-updates + + Do not detect os, detect package managers + +commit 9623342554317ba55f7a987d18250e941a0a7c1f +Merge: 0f5a115 7828777 +Author: Tony Murray +Date: Tue Aug 9 13:08:41 2016 -0500 + + Merge pull request #46 from murrant/distro + + Update distro to match the main repo file + +commit aa59548e0c3d6e5462cd2342ca671dc72430c3f1 +Author: Tony Murray +Date: Tue Aug 9 12:50:23 2016 -0500 + + Do not detect os, detect package managers. + Add pacman support. + +commit 78287777696f6569dfe575770f1c47553fddd5a9 +Author: Tony Murray +Date: Tue Aug 9 11:40:01 2016 -0500 + + Update distro to match the main repo file + +commit 0f5a1150f373371fc508e160e58c56cea5adbb99 +Merge: d6308e4 05fe3f8 +Author: Neil Lathwood +Date: Thu Aug 4 18:53:10 2016 +0100 + + Merge pull request #40 from florianbeer/patch-1 + + Add Debian and make update call more robust + +commit d6308e4e1c04d69688d724c7c5c04ab0a3c94fbc +Merge: 3740f3e 2accc28 +Author: Neil Lathwood +Date: Wed Aug 3 21:09:08 2016 +0100 + + Merge pull request #42 from crcro/app-dhcp-stats + + app-dhcp-stats snmp extend + +commit 2accc2848c44f8c2c33a455eb1a2e4ffe801921c +Author: crcro +Date: Wed Aug 3 22:09:55 2016 +0300 + + app-dhcp-stats snmp extend + +commit 05fe3f8cc195b797f69b0599ca2a2e198f0b5d0c +Author: Florian Beer +Date: Wed Aug 3 12:16:22 2016 +0200 + + Remove update call as this requires root + + See discussion here https://github.com/librenms/librenms-agent/pull/40#issuecomment-237198796 + +commit fac01628a07cf8083f91d9924ab8d63a9d4141db +Author: Florian Beer +Date: Wed Aug 3 04:51:35 2016 +0200 + + Add Debian and make update call more robust + + - Debian based systems need to update the index before being able to report upgradable packages. + - Debian old-stable doesn't have `apt` yet and Ubuntu 14.04 emits the following warning when using `apt` in a script: + `WARNING: /usr/bin/apt does not have a stable CLI interface yet. Use with caution in scripts.` + + By using `apt-get`, issuing a `update` call first and then counting the result of `grep 'Inst'`, this script now works on Debian 7, Debian 8, Ubuntu 14.04 and Ubuntu 16.04. + +commit 3740f3e147d7d97e10e4b8e77757ab67deb2bb84 +Merge: fb678cb 1964aec +Author: Tony Murray +Date: Tue Aug 2 20:35:16 2016 -0500 + + Merge pull request #38 from crcro/master + + app: nfs-v3-stats + +commit fb678cb58df6277be2176e8a45a08af1d8dcb8d5 +Merge: 1d4c452 8d7e0df +Author: Tony Murray +Date: Mon Aug 1 11:26:27 2016 -0500 + + Merge pull request #39 from xbeaudouin/fix_distro_freebsd + + Add FreeBSD detection to distro script + +commit 8d7e0df4eb1e35b776aa17d2e6c2ea202cc021a7 +Author: xavier.beaudouin +Date: Mon Aug 1 11:15:52 2016 +0200 + + Add FreeBSD detection to distro script + +commit 1d4c4529ae907b343b7ffcb6eaeb94563ad2cb69 +Merge: dde18e9 760f9de +Author: Paul Gear +Date: Sat Jul 30 14:14:39 2016 +1000 + + Merge pull request #37 from xbeaudouin/master + + Fix some bash scripts to work with FreeBSD + +commit 1964aece5e421391cc6cb589c668da0b5f2eeaee +Author: crcro +Date: Fri Jul 29 20:22:35 2016 +0300 + + added snmp extend script for os-updates application + +commit 57b6224254eb3992e09358df2d867573512f6809 +Author: crcro +Date: Fri Jul 29 20:19:41 2016 +0300 + + added snmp extend script for nfs-v3-stats application + +commit 760f9de567a2876b0ad793979754661946b92c5c +Author: xavier.beaudouin +Date: Fri Jul 29 13:23:20 2016 +0200 + + /bin/bash => /usr/bin/env bash to allow freebsd agent work without patching each files + +commit dde18e98954c83fb52ae89083214814b5515a6c1 +Merge: 18f4006 9a3846c +Author: Neil Lathwood +Date: Tue Jul 26 20:46:20 2016 +0100 + + Merge pull request #36 from murrant/powerdns-recursor + + PowerDNS Recursor agent + +commit 18f4006e09a1436013eee8ed77927585f714fc43 +Merge: f75fc9f fc07e27 +Author: Neil Lathwood +Date: Tue Jul 26 20:45:38 2016 +0100 + + Merge pull request #33 from murrant/mysql-php7 + + Use mysqli instead of mysql + +commit f75fc9fce5a82c47e1303f5514eb0c421ad5cf93 +Merge: bfdf71d c70d12c +Author: Tony Murray +Date: Fri Jul 22 21:13:58 2016 -0500 + + Merge pull request #35 from murrant/duplicate-nfsstats + + Remove duplicate nfsstats file + +commit 9a3846cac30515a7a01a44ecc9fc6e08e78df1f5 +Author: Tony Murray +Date: Fri Jul 22 15:33:16 2016 -0500 + + PowerDNS Recursor agent + +commit c70d12c83c00e180da8a7e8281acdbd8e4741fa1 +Author: Tony Murray +Date: Fri Jul 22 15:22:48 2016 -0500 + + Remove duplicate nfsstats file + +commit bfdf71d6995ced14ebd1e25042a60c7107a57dc0 +Merge: 41cb583 9501c2f +Author: Tony Murray +Date: Thu Jul 21 22:30:29 2016 -0500 + + Merge pull request #34 from murrant/nfs + + Copy nfsstats script from main repo. + +commit 9501c2f4ffd4649982521c387b3d9dcab1de83d9 +Author: Tony Murray +Date: Thu Jul 21 22:28:41 2016 -0500 + + Copy nfsstats script from main repo. + Send PR to remove scripts from the main repo. + +commit fc07e27c37c74d47c61aeac3cb966062f8da63a2 +Author: Tony Murray +Date: Thu Jul 21 22:26:02 2016 -0500 + + Fix permissions + +commit 41cb5835ff3b0ca41a6392f19e43d590bd08d785 +Merge: db44c10 9bad4df +Author: Tony Murray +Date: Thu Jul 21 21:48:27 2016 -0500 + + Merge pull request #32 from tuxis-ie/proxmox-issue-28 + + Proxmox issue 28 + +commit e80b025818f2f993f4443be3100c5bcd1331812a +Author: Tony Murray +Date: Thu Jul 21 21:31:25 2016 -0500 + + Use mysqli instead of mysql + +commit 9bad4dfb3e586d7892709284cccf17417cf5ec03 +Author: Mark Schouten +Date: Wed Jul 13 15:06:57 2016 +0200 + + Something like this @einarjh ? + +commit 6d27c7edb3f4972a89fbf5641c4ece106b5dbc09 +Author: Mark Schouten +Date: Mon Jul 11 17:06:14 2016 +0200 + + Wrap these calls in an eval to prevent it from dying if its a container instead of a qemu vm. Fixes #28 + +commit db44c1070950c2e06565a39395bb09f09a023b4a +Merge: d00ce4a 5b21301 +Author: Neil Lathwood +Date: Sat Jul 9 19:12:59 2016 +0100 + + Merge pull request #31 from librenms/nfsstats + + Added nfsstats.sh file + +commit 5b21301ecdb761fa0e32f9295c8ea60aef44f3a7 +Author: Neil Lathwood +Date: Sat Jul 9 19:12:13 2016 +0100 + + Added nfsstats.sh file + +commit d00ce4a15a6b52753d108d1aeb2a768e7bfafe36 +Merge: c996b54 ca5a5a1 +Author: Neil Lathwood +Date: Thu Jun 30 08:57:07 2016 +0100 + + Merge pull request #29 from murrant/powerdns-python3 + + Python3 fixes for powerdns agent. Compatible with python2. + +commit ca5a5a12c065eb67e48410ed09ff97630a76f6b8 +Author: Tony Murray +Date: Wed Jun 29 19:52:10 2016 -0500 + + Python3 fixes for powerdns agent. Compatible with python2. + +commit c996b54e79b317785c58963abb6f71c31e61ba10 +Merge: fb7912b 8328d71 +Author: Neil Lathwood +Date: Thu Jun 9 11:38:43 2016 +0100 + + Merge pull request #27 from murrant/rrdcached + + Local script to collect stats from rrdcached + +commit 8328d71c0995fa8f6dc7c50de940fbe9b242fc41 +Author: Tony Murray +Date: Wed Jun 8 20:35:19 2016 -0500 + + Local script to collect stats from rrdcached + Being able to connect to local unix sockets is the primary advantage of this. + +commit fb7912beda4181b23d8cbbbf500a1e7ed4527001 +Merge: 601ac84 8d856e2 +Author: Daniel Preussker +Date: Thu May 5 13:32:02 2016 +0200 + + Merge pull request #25 from Exa-Omicron/master + + Improved hddtemp agent module + +commit 8d856e27648b6df2d89af852ad1cd912319a965f +Author: Robert Verspuy +Date: Thu May 5 10:27:30 2016 +0200 + + Improved hddtemp agent module + + I had some issues with the netcat / daemon implementation of the module. + netcat was stallingor sometimes netcat did not return the full output of hddtemp. + Running hddtemp directly without running it as a daemon is much more stable for me. + + This new version also does not give any stdout output when hddtemp is not installed or when no disks can be found. + Running the script manually on a server does give stderr output for easy debugging. + +commit 601ac843c303d29b8149142a3fac967aaa4a2638 +Merge: 21817b6 1c13779 +Author: Tony Murray +Date: Thu Apr 21 09:46:49 2016 -0500 + + Merge pull request #23 from librenms/freebsd-agent + + Create check_mk_agent_freebsd + +commit 1c1377958e6c8cfd8ca7fd1fd4fcafdae92e1a1b +Author: Neil Lathwood +Date: Thu Apr 21 15:41:06 2016 +0100 + + Update check_mk_agent_freebsd + +commit cdd235a12a0bd4d0cbffe330048fd476aa5fddd5 +Author: Neil Lathwood +Date: Thu Apr 21 15:39:59 2016 +0100 + + Create check_mk_agent_freebsd + + Added freebsd agent + +commit 21817b6b36692bdca8fac8f3ee4a0258a2d2bcee +Author: Tony Murray +Date: Tue Mar 29 08:29:02 2016 -0500 + + Fix wording for systemd unit + +commit 88c4b00b19370bea3e597770793d90b24f24b10b +Merge: dd2b95d 50a3c25 +Author: Neil Lathwood +Date: Tue Mar 29 09:51:00 2016 +0100 + + Merge pull request #22 from murrant/master + + Add systemd unit files + +commit 50a3c25115e501db4bd9fc97a8a8e3b7d81a635e +Author: Tony Murray +Date: Mon Mar 28 12:56:26 2016 -0500 + + Add systemd unit files + +commit dd2b95d8d2eb35bf1b3f0aea34d843af33f1c28e +Merge: 6d0babe ff2bbe6 +Author: Neil Lathwood +Date: Wed Nov 25 13:37:25 2015 +0000 + + Merge pull request #17 from f0o/upstream-snapshot + + Snapshot upstream changes + +commit ff2bbe6882a9b79b93883980b0360f780fc24d76 +Author: f0o +Date: Wed Nov 25 13:26:26 2015 +0000 + + Snapshot upstream changes + +commit 6d0babe0973d5cb8e2d35fd33e2f45e96ae96c15 +Merge: 8e847b9 12e31c1 +Author: Daniel Preussker +Date: Wed Nov 25 13:28:17 2015 +0000 + + Merge pull request #16 from tuxis-ie/powerdns-support + + Powerdns support + +commit 12e31c16c3c42e6d1c73a196978acf18e554e4b0 +Author: Mark Schouten +Date: Mon Nov 23 14:10:17 2015 +0100 + + Add PowerDNS Authoritative Agent + +commit d16462bb5ac978cfd5b7cb213359989b2aabc791 +Author: Mark Schouten +Date: Mon Nov 23 14:10:15 2015 +0100 + + Add PowerDNS Authoritative Agent + +commit 8e847b986aa3af50eb6c2302c3d1f0df158a47bd +Merge: da7e40c 66d5028 +Author: Neil Lathwood +Date: Wed Nov 11 17:17:24 2015 -0400 + + Merge pull request #15 from SaaldjorMike/mysql1 + + Moved mysql tag a bit up and added a newline to error msg. + +commit 66d502837d2643c59d7f87af076fd851b0ba12c1 +Author: Mike Rostermund +Date: Wed Nov 11 14:21:49 2015 +0100 + + Moved mysql tag a bit up and added a newline to error msg. + +commit da7e40c43eb3155d3253c1eb695a78a0d9362a51 +Merge: f6f0079 0cc7b49 +Author: Neil Lathwood +Date: Tue Nov 10 08:08:34 2015 -0400 + + Merge pull request #14 from tuxis-ie/ceph-support + + Ceph support + +commit 0cc7b493978c06f0f3e73749bac1fbadf56c1be8 +Author: Mark Schouten +Date: Tue Nov 10 11:00:58 2015 +0100 + + Add support for Ceph + +commit 9b4c3b34009a441df579051336bf3ea0647fe73c +Author: Mark Schouten +Date: Tue Nov 10 10:58:24 2015 +0100 + + Add support for Ceph + +commit f6f0079c6620ee3d75adf7511006006353903dd3 +Merge: d90957a 30b7651 +Author: Daniel Preussker +Date: Wed Nov 4 13:42:29 2015 +0000 + + Merge pull request #13 from tuxis-ie/master + + Crap, forgot this line... + +commit 30b7651e0142826202276a7bf9a31343d759c68a +Author: Mark Schouten +Date: Wed Nov 4 14:40:19 2015 +0100 + + Crap, forgot this line... + +commit d90957a0bc9e484056eaf26b206672b940fc7a9f +Merge: 25fcd5a 6554087 +Author: Daniel Preussker +Date: Wed Nov 4 13:35:33 2015 +0000 + + Merge pull request #12 from tuxis-ie/master + + Fix the proxmox-agent for Proxmox VE 4.0 + +commit 65540872e7a1215cfdca1d4b480670a67cf50a77 +Author: Mark Schouten +Date: Wed Nov 4 14:30:21 2015 +0100 + + Fix the proxmox-agent for Proxmox VE 4.0 + +commit 25fcd5ae76682006ed61aa09212738381968208f +Merge: 20e2d22 b6bfbba +Author: Paul Gear +Date: Mon Oct 26 09:39:15 2015 +1000 + + Merge pull request #10 from librenms/laf-patch-1 + + Update distro to use env + +commit b6bfbbaf2c99945aceb92e9c7f950a53196c26fc +Author: Neil Lathwood +Date: Sun Oct 25 21:51:43 2015 +0000 + + Update distro to use env + +commit 20e2d220bde9e4edec76d00551c955274d06130c +Merge: 87a20db 2b96259 +Author: Daniel Preussker +Date: Fri Aug 28 09:07:49 2015 +0000 + + Merge pull request #7 from tuxis-ie/master + + Add a proxmox-agent + +commit 2b9625953240ade30cf5ccef22a9293a016b819b +Author: Mark Schouten +Date: Fri Aug 28 10:52:04 2015 +0200 + + Add license + +commit d6795c60a171eba023b8c0e5b151376c6bcfa0d1 +Author: Mark Schouten +Date: Fri Aug 28 10:49:24 2015 +0200 + + Add proxmox-agent + +commit fee2ed820bedb4613871aa9747b40121e3ae7879 +Author: Mark Schouten +Date: Fri Aug 28 10:49:19 2015 +0200 + + Add proxmox-agent + +commit 87a20db845517070fdb2eec70d264e18bfde2871 +Merge: 8ae2b15 6493263 +Author: Daniel Preussker +Date: Thu Aug 20 17:14:11 2015 +0000 + + Merge pull request #5 from tuxis-ie/master + + Add files to create a Debian-package + +commit 64932630f0b67e876d0859df491705b11a71aa07 +Author: Mark Schouten +Date: Thu Aug 20 14:18:10 2015 +0200 + + Do not include the README in the repodir + +commit 77864124dc119b0d89b1c852090e5f283b02123a +Author: Mark Schouten +Date: Thu Aug 20 10:34:50 2015 +0200 + + Add license + +commit 8ae2b1520b9e75583b87977427415c90256473e1 +Merge: 69551b0 63d3166 +Author: Daniel Preussker +Date: Tue Aug 18 15:14:00 2015 +0000 + + Merge pull request #6 from librenms/f0o-mysql-host-logic + + Fix MySQL Host Logic + +commit 63d31665cea2afaeadb8c8ba1b58b37605597b80 +Author: Daniel Preussker +Date: Tue Aug 18 15:08:50 2015 +0000 + + Fix MySQL Host Logic + +commit 51270e24c19bed95030a41e3ab7828bb2330d68d +Author: Mark Schouten +Date: Mon Aug 17 16:58:33 2015 +0200 + + Also include distro in this package + +commit 2b4d17280dd4cbff1b497e2f6ffc17bf75020ea9 +Author: Mark Schouten +Date: Mon Aug 17 16:57:48 2015 +0200 + + Strip comments (on Qemu boxes, this pollutes a lot + +commit 2833310e228e185e78ddbb96589f63e9d2d7b852 +Author: Mark Schouten +Date: Mon Aug 17 16:50:26 2015 +0200 + + Enable dpkg and dmi by default + +commit 3cd06768b5487261ddde819aad6428a3183ffbbf +Author: Mark Schouten +Date: Mon Aug 17 16:48:22 2015 +0200 + + Place all plugins in a repo-dir and add mk_enplug to enable plugins + +commit 7954d5a085f0ffe31fa1becb6d3132ca63b46942 +Author: Mark Schouten +Date: Mon Aug 17 16:19:04 2015 +0200 + + Add Conflicts/Provides and fix location for xinetd.d + +commit a7df28415a4645293835c79d15201539376be11d +Author: Mark Schouten +Date: Mon Aug 17 15:12:12 2015 +0200 + + Add files to create a Debian-package + +commit 69551b05e2673c899077a4539d1b6a6ec95b4290 +Merge: cfec5ec 4683c68 +Author: Daniel Preussker +Date: Tue Jul 28 20:11:44 2015 +0000 + + Merge pull request #4 from alangregory/master + + Added Snmpd.conf example and distro executable + +commit 4683c68d1d23f63ff9977c8a11543004cd4b8a34 +Author: Alan Gregory +Date: Tue Jul 28 15:58:29 2015 -0300 + + Added Snmpd.conf example and distro executable + +commit cfec5ec65dc93a6bc9260eb4f1d3f9379d1c7287 +Author: Daniel Preussker +Date: Tue Jun 9 17:34:00 2015 +0000 + + Delete README.md + +commit f1c9d6578a9f5df51047e5246624a96e55e043d4 +Merge: a47d95b 195a46c +Author: Daniel Preussker +Date: Mon May 18 13:07:29 2015 +0200 + + Merge pull request #1 from f0o/master + + Initial commit + +commit 195a46c1e377f6729acf38f294153ef40147d2ff +Author: f0o +Date: Mon May 18 10:57:45 2015 +0000 + + Initial commit + +commit a47d95b58cc05e32a3feaa7f0022857da80ba58a +Author: Daniel Preussker +Date: Mon May 18 09:28:15 2015 +0000 + + Initial commit diff --git a/snmp/mdadm b/snmp/mdadm new file mode 100755 index 000000000..fdd40b983 --- /dev/null +++ b/snmp/mdadm @@ -0,0 +1,104 @@ +#!/bin/bash + +CAT=/bin/cat +LS=/bin/ls + +CONFIGFILE=$0.conf +if [ -f $CONFIGFILE ] ; then + . $CONFIGFILE +fi + +VERSION=1 +ERROR_CODE=0 +ERROR_STRING="" + +OUTPUT_DATA='['\ + +if [ -d /dev/md ] ; then + for RAID in /sys/block/md* ; do + + # ignore arrays with no slaves + if [ -z "$($LS -1 $RAID/slaves)" ] ; then + continue + fi + # ignore "non existing" arrays + if [ ! -f "$RAID/md/degraded" ] ; then + continue + fi + + RAID_NAME=$(basename $RAID) + RAID_DEV_LIST=$($LS $RAID/slaves/) + RAID_LEVEL=$($CAT $RAID/md/level) + RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks) + RAID_STATE=$($CAT $RAID/md/array_state) + RAID_ACTION=$($CAT $RAID/md/sync_action) + RAID_DEGRADED=$($CAT $RAID/md/degraded) + + if [ "$RAID_SYNC_SPEED" = "none" ] ; then + RAID_SYNC_SPEED=0 + else + let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024" + fi + + if [ "$($CAT $RAID/md/sync_completed)" = "none" ] ; then + RAID_SYNC_COMPLETED=100 + else + let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)" + fi + + # divide with 2 to size like in /proc/mdstat + # and multiply with 1024 to get size in bytes + let "RAID_SIZE=$($CAT $RAID/size)*1024/2" + + RAID_DEVICE_LIST='[' + ALL_DEVICE_COUNT=0 + for D in $RAID_DEV_LIST ; do + RAID_DEVICE_LIST=$RAID_DEVICE_LIST'"'$D'",' + let "ALL_DEVICE_COUNT+=1" + done + if [ ${#RAID_DEVICE_LIST} -gt 3 ] ; then + RAID_DEVICE_LIST=${RAID_DEVICE_LIST: : -1} + fi + RAID_DEVICE_LIST=$RAID_DEVICE_LIST']' + + RAID_MISSING_DEVICES='[' + for D in $RAID_DEV_LIST ; do + if [ -L $RAID/slaves/$D ] && [ -f $RAID/slaves/$D ] ; then + RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",' + fi + done + if [ ${#RAID_MISSING_DEVICES} -gt 3 ] ; then + RAID_MISSING_DEVICES=${RAID_MISSING_DEVICES: : -1} + fi + RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' + + let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" + + ARRAY_DATA='{'\ +'"name":"'$RAID_NAME\ +'","level":"'$RAID_LEVEL\ +'","size":"'$RAID_SIZE\ +'","disc_count":"'$RAID_DISC_COUNT\ +'","hotspare_count":"'$RAID_HOTSPARE_COUNT\ +'","device_list":'$RAID_DEVICE_LIST\ +',"missing_device_list":'$RAID_MISSING_DEVICES\ +',"state":"'$RAID_STATE\ +'","action":"'$RAID_ACTION\ +'","degraded":"'$RAID_DEGRADED\ +'","sync_speed":"'$RAID_SYNC_SPEED\ +'","sync_completed":"'$RAID_SYNC_COMPLETED\ +'"},' + + OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA + done +fi + +OUTPUT_DATA=${OUTPUT_DATA: : -1}']' + +OUTPUT='{"data":'$OUTPUT_DATA\ +',"error":"'$ERROR_CODE\ +'","errorString":"'$ERROR_STRING\ +'","version":"'$VERSION'"}' + +echo $OUTPUT + From b0da8f7ab7e4f849a7b97b73e4e51bb965eae04f Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 9 May 2019 22:32:54 +0200 Subject: [PATCH 073/332] fix configfile declaration --- snmp/dhcp-status.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 972214718..458cd02f5 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -13,7 +13,7 @@ BIN_SED='/usr/bin/sed' BIN_SORT='/usr/bin/sort' BIN_WC='/usr/bin/wc' -CONFIGFILE=dhcp-status.conf +CONFIGFILE=/etc/snmp/dhcp-status.conf if [ -f $CONFIGFILE ] ; then . dhcp-status.conf fi From e5d5915868f79094d44ed285bb9d39fefbb45359 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Tue, 14 May 2019 00:55:02 +0200 Subject: [PATCH 074/332] mdadm config file fix (#233) --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index fdd40b983..f6340d487 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -3,7 +3,7 @@ CAT=/bin/cat LS=/bin/ls -CONFIGFILE=$0.conf +CONFIGFILE=/etc/snmp/mdadm.conf if [ -f $CONFIGFILE ] ; then . $CONFIGFILE fi From 3ecdc4dfc1417c5323bb1f8a111f7b0381cbb4d9 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 23 May 2019 23:57:56 +0200 Subject: [PATCH 075/332] enhance smart to show power_on_hours also --- snmp/smart | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/snmp/smart b/snmp/smart index e31fd8838..309d8f81f 100755 --- a/snmp/smart +++ b/snmp/smart @@ -267,7 +267,6 @@ foreach my $line ( @disks ){ }else{ $output=`$smartctl -A /dev/$disk`; } - my %IDs=( '5'=>'null', '10'=>'null', '173'=>'null', @@ -284,6 +283,7 @@ foreach my $line ( @disks ){ '199'=>'null', '231'=>'null', '233'=>'null', + '9'=>'null', ); my @outputA=split( /\n/, $output ); @@ -317,6 +317,12 @@ foreach my $line ( @disks ){ $IDs{$id}=$raw; } + # 9, power on hours + if ( $id == 9 ) { + my @runtime=split(/\ /, $raw); + $IDs{$id}=$runtime[0]; + } + # 188, Command_Timeout if ( $id == 188 ) { my $total=0; @@ -331,14 +337,13 @@ foreach my $line ( @disks ){ # 190, airflow temp # 194, temp - if ( + if ( ( $id == 190 ) || ( $id == 194 ) ) { my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; } - } # SAS Wrapping @@ -398,7 +403,7 @@ foreach my $line ( @disks ){ $toReturn=$toReturn.$disk_id.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. - $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; + $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective.','.$IDs{'9'}."\n"; } From e0381ec7f6f4ad1c5a65dfaafc161c859ed0c16f Mon Sep 17 00:00:00 2001 From: Shao Yu Lung Date: Mon, 10 Jun 2019 10:58:35 +0800 Subject: [PATCH 076/332] add nginx agent use python3 --- snmp/nginx-python3.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100755 snmp/nginx-python3.py diff --git a/snmp/nginx-python3.py b/snmp/nginx-python3.py new file mode 100755 index 000000000..4bfb3a402 --- /dev/null +++ b/snmp/nginx-python3.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +from urllib.request import urlopen +import re + +data = urlopen('http://localhost/nginx-status').read() + +params = {} + +for line in data.decode().split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) From 8a355ccb5bfcf858604f303fc6e9211d37b669b7 Mon Sep 17 00:00:00 2001 From: Shao Yu Lung Date: Mon, 10 Jun 2019 12:27:53 +0800 Subject: [PATCH 077/332] reformat --- snmp/nginx-python3.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/snmp/nginx-python3.py b/snmp/nginx-python3.py index 4bfb3a402..e2a64118d 100755 --- a/snmp/nginx-python3.py +++ b/snmp/nginx-python3.py @@ -7,22 +7,22 @@ params = {} for line in data.decode().split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print(Active) - else: - print(params[param]) + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) From 6e644272b43b4133ffb10f942ae584dc66cefc96 Mon Sep 17 00:00:00 2001 From: Shao Yu Lung Date: Mon, 10 Jun 2019 12:28:13 +0800 Subject: [PATCH 078/332] add nginx agent use python3 --- agent-local/nginx-python3.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100755 agent-local/nginx-python3.py diff --git a/agent-local/nginx-python3.py b/agent-local/nginx-python3.py new file mode 100755 index 000000000..2464f89d1 --- /dev/null +++ b/agent-local/nginx-python3.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +from urllib.request import urlopen +import re + +data = urlopen('http://127.0.0.1/nginx-status').read() + +params = {} + +for line in data.decode().split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] + +print("<<>>\n") + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) From 13ce5d35777b3bf7c959ce2126f09360a5d940ff Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 15 Jun 2019 14:40:55 +0200 Subject: [PATCH 079/332] fixing configfile call (#235) --- snmp/dhcp-status.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 458cd02f5..a629d0a32 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -4,7 +4,6 @@ # edit your snmpd.conf add the below line and restart snmpd # # extend dhcpstats /opt/dhcp-status.sh # ################################################################ - FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' BIN_CAT='/usr/bin/cat' BIN_GREP='/usr/bin/grep' @@ -15,7 +14,7 @@ BIN_WC='/usr/bin/wc' CONFIGFILE=/etc/snmp/dhcp-status.conf if [ -f $CONFIGFILE ] ; then - . dhcp-status.conf + . $CONFIGFILE fi DHCP_LEASES='^lease' From 97bc5d62f3141f561af8f838d187a1d873869b72 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 25 Jun 2019 02:13:05 -0500 Subject: [PATCH 080/332] fix occasionally random ordering --- snmp/ups-apcups | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/snmp/ups-apcups b/snmp/ups-apcups index f3f45d7df..dd377048b 100755 --- a/snmp/ups-apcups +++ b/snmp/ups-apcups @@ -103,9 +103,14 @@ if ( $toReturn{error} == 0 ){ # pulls apart the output my @lines=split(/\n/, $apcaccess_output); foreach my $line ( @lines ){ - my ( $var, $val )=split(/\ *\:\ */, $line, 2); - $val=~s/\ .*//; - $status{$var}=$val; + my ( $var, $val )=split(/\:\ */, $line, 2); + if ( + defined( $var ) && defined( $val ) + ){ + $var=~s/\ .*//; + $val=~s/\ .*//; + $status{$var}=$val; + } } #pull the desired variables from the output @@ -123,6 +128,7 @@ $toReturn{data}=\%data; # convert $toReturn to JSON and pretty print if asked to my $j=JSON->new; +$j->canonical(1); if ( $opts{p} ){ $j->pretty(1); } From e1855aeb8d3e226aa12a341be331b338e9452302 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 25 Jun 2019 02:17:47 -0500 Subject: [PATCH 081/332] indent fix --- snmp/ups-apcups | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/snmp/ups-apcups b/snmp/ups-apcups index dd377048b..5cd682c04 100755 --- a/snmp/ups-apcups +++ b/snmp/ups-apcups @@ -84,29 +84,28 @@ my $apcaccess_output=`$apcaccess`; $toReturn{error}=$?; # check for bad exit codes -if ( $? == -1){ +if ( $? == -1) { $toReturn{errorString}='failed to run apcaccess'; -} -elsif ($? & 127) { +} elsif ($? & 127) { $toReturn{errorString}= sprintf "apcaccess died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; + ($? & 127), ($? & 128) ? 'with' : 'without'; } else { - $toReturn{error}=$? >> 8; - $toReturn{errorString}="apcaccess exited with ".$toReturn{error}; + $toReturn{error}=$? >> 8; + $toReturn{errorString}="apcaccess exited with ".$toReturn{error}; } # if no bad exit codes, we can process $apcaccess_output -if ( $toReturn{error} == 0 ){ +if ( $toReturn{error} == 0 ) { # holds the found data for the apcupsd status my %status; # pulls apart the output my @lines=split(/\n/, $apcaccess_output); - foreach my $line ( @lines ){ + foreach my $line ( @lines ) { my ( $var, $val )=split(/\:\ */, $line, 2); if ( - defined( $var ) && defined( $val ) - ){ + defined( $var ) && defined( $val ) + ) { $var=~s/\ .*//; $val=~s/\ .*//; $status{$var}=$val; @@ -129,11 +128,11 @@ $toReturn{data}=\%data; # convert $toReturn to JSON and pretty print if asked to my $j=JSON->new; $j->canonical(1); -if ( $opts{p} ){ - $j->pretty(1); +if ( $opts{p} ) { + $j->pretty(1); } print $j->encode( \%toReturn ); -if (! $opts{p} ){ - print "\n"; +if (! $opts{p} ) { + print "\n"; } exit 0; From bcd3a6573e886688c93caac16f3e1302cdfd37b4 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 30 Jun 2019 22:50:29 +0200 Subject: [PATCH 082/332] mdadm name independent mdadm array detection --- snmp/mdadm | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index f6340d487..ed291bccd 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -2,6 +2,8 @@ CAT=/bin/cat LS=/bin/ls +BASENAME=/usr/bin/basename +REALPATH=/usr/bin/realpath CONFIGFILE=/etc/snmp/mdadm.conf if [ -f $CONFIGFILE ] ; then @@ -15,7 +17,8 @@ ERROR_STRING="" OUTPUT_DATA='['\ if [ -d /dev/md ] ; then - for RAID in /sys/block/md* ; do + for ARRAY_BLOCKDEVICE in $(ls -1 /dev/md/*) ; do + RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) # ignore arrays with no slaves if [ -z "$($LS -1 $RAID/slaves)" ] ; then @@ -26,7 +29,11 @@ if [ -d /dev/md ] ; then continue fi - RAID_NAME=$(basename $RAID) + if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]]; then + RAID_NAME=$($BASENAME $RAID) + else + RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE) + fi RAID_DEV_LIST=$($LS $RAID/slaves/) RAID_LEVEL=$($CAT $RAID/md/level) RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks) From 94294d12cbe24bafcf7f5da14e90c012482cadad Mon Sep 17 00:00:00 2001 From: Zmegolaz Date: Mon, 1 Jul 2019 22:00:41 +0200 Subject: [PATCH 083/332] Split SMART power on hours on h too, not only space --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 309d8f81f..75a02ff0e 100755 --- a/snmp/smart +++ b/snmp/smart @@ -319,7 +319,7 @@ foreach my $line ( @disks ){ # 9, power on hours if ( $id == 9 ) { - my @runtime=split(/\ /, $raw); + my @runtime=split(/[\ h]/, $raw); $IDs{$id}=$runtime[0]; } From 0b4e95a096e2483c469c2c5bdf8b823dd72412f7 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 3 Jul 2019 11:10:29 +0200 Subject: [PATCH 084/332] Detect current Ceph version and change statistics commands based on those versions. Fixes #186 --- agent-local/ceph | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 29298510a..9a83d3989 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -17,6 +17,11 @@ from subprocess import check_output import json +def cephversion(): + cephv = check_output(["/usr/bin/ceph", "version"]).replace('ceph version ', '') + major, minor = cephv.split('.')[0:2] + return [int(major), int(minor)] + def cephdf(): cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0') @@ -44,12 +49,18 @@ def cephdf(): def osdperf(): + global major osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).replace('-inf', '0') - for o in json.loads(osdperf)['osd_perf_infos']: - print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + if major > 13: + for o in json.loads(osdperf)['osdstats']['osd_perf_infos']: + print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + else: + for o in json.loads(osdperf)['osd_perf_infos']: + print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) def poolstats(): + global major poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).replace('-inf', '0') for p in json.loads(poolstats): @@ -62,12 +73,17 @@ def poolstats(): except: w = 0 try: - o = p['client_io_rate']['op_per_sec'] + if major > 11: + o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec'] + else: + o = p['client_io_rate']['op_per_sec'] except: o = 0 print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) +major, minor = cephversion() + print "<<>>" print "" poolstats() @@ -75,4 +91,3 @@ print "" osdperf() print "" cephdf() - From 05fbd40a4c7158d1171859fe3a87e1030df038f9 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 3 Jul 2019 12:05:14 +0200 Subject: [PATCH 085/332] Copy snmp script inside the package as well --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 31d16d2df..ac4690ce5 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,9 @@ install: mkdir -p $(PREFIX)/usr/lib/check_mk_agent/plugins mkdir -p $(PREFIX)/usr/lib/check_mk_agent/repo mkdir -p $(PREFIX)/usr/lib/check_mk_agent/local - cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/ + mkdir -p $(PREFIX)/usr/share/librenms-agent/snmp + cp -rL agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/ + cp -rL snmp/* $(PREFIX)/usr/share/librenms-agent/snmp rm $(PREFIX)/usr/lib/check_mk_agent/repo/README mkdir -p $(PREFIX)/usr/bin install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent From c019417a44481d7a8545839d844ddd89d746c018 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 3 Jul 2019 12:06:15 +0200 Subject: [PATCH 086/332] New debian changelog --- debian/changelog | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/debian/changelog b/debian/changelog index 222de759c..8fcd34695 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,12 @@ +librenms-agent (1.1.0) stable; urgency=low + + - New upstream versions + - Include SNMP scripts + - Fix Ceph scripts + - Fix nginx scripts + + -- Mark Schouten Wed, 03 Jul 2019 12:06:00 +0200 + librenms-agent (1.0.7) stable; urgency=low - New upstream versions From ca9b5ad17deec93abe69911344a1a13f5e95c40e Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Wed, 10 Jul 2019 11:48:49 +0800 Subject: [PATCH 087/332] add zfs support for freebsd use python 3 copy from #166. --- snmp/zfs-freebsd.py | 116 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 snmp/zfs-freebsd.py diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py new file mode 100644 index 000000000..3ef92c889 --- /dev/null +++ b/snmp/zfs-freebsd.py @@ -0,0 +1,116 @@ +#!/usr/local/bin/python3 + +# FreeNAS 11.1 not support #!/usr/bin/env python3 + +import json +import subprocess + +def percent(numerator, denominator, default=0): + try: + return numerator / denominator * 100 + except ZeroDivisionError: + return default + +def main(args): + p = subprocess.run(['/sbin/sysctl', '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) + + if p.returncode != 0: + return p.returncode + + def chomp(line): + bits = [b.strip() for b in line.split(':')] + return bits[0], int(bits[1]) + stats = dict(chomp(l) for l in p.stdout.splitlines()) + if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats: + stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0 + + output = dict() + + # ARC misc + output['deleted'] = stats['kstat.zfs.misc.arcstats.deleted'] + output['evict_skip'] = stats['kstat.zfs.misc.arcstats.evict_skip'] + output['mutex_skip'] = stats['kstat.zfs.misc.arcstats.mutex_miss'] + output['recycle_miss'] = stats['kstat.zfs.misc.arcstats.recycle_miss'] + + # ARC size + output['target_size_per'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 + output['arc_size_per'] = stats['kstat.zfs.misc.arcstats.size'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 + output['target_size_arat'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] + output['min_size_per'] = stats['kstat.zfs.misc.arcstats.c_min'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 + + output['arc_size'] = stats['kstat.zfs.misc.arcstats.size'] + output['target_size_max'] = stats['kstat.zfs.misc.arcstats.c_max'] + output['target_size_min'] = stats['kstat.zfs.misc.arcstats.c_min'] + output['target_size'] = stats['kstat.zfs.misc.arcstats.c'] + + # ARC size breakdown + output['mfu_size'] = stats['kstat.zfs.misc.arcstats.size'] - stats['kstat.zfs.misc.arcstats.p'] + output['p'] = stats['kstat.zfs.misc.arcstats.p'] + output['rec_used_per'] = stats['kstat.zfs.misc.arcstats.p'] / stats['kstat.zfs.misc.arcstats.size'] * 100 + output['freq_used_per'] = output['mfu_size'] / stats['kstat.zfs.misc.arcstats.size'] * 100 + + # ARC misc efficiency stats + output['arc_hits'] = stats['kstat.zfs.misc.arcstats.hits'] + output['arc_misses'] = stats['kstat.zfs.misc.arcstats.misses'] + output['demand_data_hits'] = stats['kstat.zfs.misc.arcstats.demand_data_hits'] + output['demand_data_misses'] = stats['kstat.zfs.misc.arcstats.demand_data_misses'] + output['demand_meta_hits'] = stats['kstat.zfs.misc.arcstats.demand_metadata_hits'] + output['demand_meta_misses'] = stats['kstat.zfs.misc.arcstats.demand_metadata_misses'] + output['mfu_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mfu_ghost_hits'] + output['mfu_hits'] = stats['kstat.zfs.misc.arcstats.mfu_hits'] + output['mru_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mru_ghost_hits'] + output['mru_hits'] = stats['kstat.zfs.misc.arcstats.mru_hits'] + output['pre_data_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_data_hits'] + output['pre_data_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_data_misses'] + output['pre_meta_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_hits'] + output['pre_meta_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_misses'] + + output['anon_hits'] = output['arc_hits'] - (output['mfu_hits'] + output['mru_hits'] + output['mfu_ghost_hits'] + output['mru_ghost_hits']) + output['arc_accesses_total'] = output['arc_hits'] + output['arc_misses'] + output['demand_data_total'] = output['demand_data_hits'] + output['demand_data_misses'] + output['pre_data_total'] = output['pre_data_hits'] + output['pre_data_misses'] + output['real_hits'] = output['mfu_hits'] + output['mru_hits'] + + # ARC efficiency percents + output['cache_hits_per'] = percent(output['arc_hits'], output['arc_accesses_total']) + output['cache_miss_per'] = percent(output['arc_misses'], output['arc_accesses_total']) + output['actual_hit_per'] = percent(output['real_hits'], output['arc_accesses_total']) + output['data_demand_per'] = percent(output['demand_data_hits'], output['demand_data_total']) + output['data_pre_per'] = percent(output['pre_data_hits'], output['pre_data_total']) + output['anon_hits_per'] = percent(output['anon_hits'], output['arc_hits']) + output['mru_per'] = percent(output['mru_hits'], output['arc_hits']) + output['mfu_per'] = percent(output['mfu_hits'], output['arc_hits']) + output['mru_ghost_per'] = percent(output['mru_ghost_hits'], output['arc_hits']) + output['mfu_ghost_per'] = percent(output['mfu_ghost_hits'], output['arc_hits']) + output['demand_hits_per'] = percent(output['demand_data_hits'], output['arc_hits']) + output['pre_hits_per'] = percent(output['pre_data_hits'], output['arc_hits']) + output['meta_hits_per'] = percent(output['demand_meta_hits'], output['arc_hits']) + output['pre_meta_hits_per'] = percent(output['pre_meta_hits'], output['arc_hits']) + output['demand_misses_per'] = percent(output['demand_data_misses'], output['arc_misses']) + output['pre_misses_per'] = percent(output['pre_data_misses'], output['arc_misses']) + output['meta_misses_per'] = percent(output['demand_meta_misses'], output['arc_misses']) + output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses']) + + # pools + p = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) + if p.returncode != 0: + return p.returncode + output['pools'] = [] + fields = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split('\t'))) + if p['expandsz'] == '-': + p['expandsz'] = 0 + p['frag'] = p['frag'].rstrip('%') + if p['frag'] == '-': + p['frag'] = 0 + p['dedup'] = p['dedup'].rstrip('x') + output['pools'].append(p) + + print(json.dumps(output)) + + return 0 + +if __name__ == '__main__': + import sys + sys.exit(main(sys.argv[1:])) From d609e23600d7a5137d92dabe296a8cb2d60df8d8 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 8 Aug 2019 21:18:06 -0500 Subject: [PATCH 088/332] /tmp fix (#250) * convert the snmp scripts using tmp files over to use mktemp * reverse this... joy... not a temp file but cache file ;( * moved cache file from under /tmp to /var/cache/librenms * fix mysql tmp usage --- agent-local/apache | 2 +- agent-local/dpkg | 2 +- agent-local/mysql | 8 ++++---- agent-local/nfsstats | 4 ++-- agent-local/rpm | 2 +- snmp/apache-stats | 2 +- snmp/apache-stats.py | 2 +- snmp/mysql | 8 ++++---- snmp/nfs-stats.sh | 6 +++--- snmp/powerdns-dnsdist | 2 +- 10 files changed, 19 insertions(+), 19 deletions(-) diff --git a/agent-local/apache b/agent-local/apache index c0d9a795e..a5f0927cd 100755 --- a/agent-local/apache +++ b/agent-local/apache @@ -14,7 +14,7 @@ use LWP::Simple; $CACHETIME = 30; -$CACHEFILE = '/tmp/agent-local-apache'; +$CACHEFILE = '/var/cache/librenms/agent-local-apache'; # check for cache file newer CACHETIME seconds ago if ( -f $CACHEFILE && time - (stat( $CACHEFILE ))[9] < $CACHETIME) { diff --git a/agent-local/dpkg b/agent-local/dpkg index 1c3d08011..70917ba72 100755 --- a/agent-local/dpkg +++ b/agent-local/dpkg @@ -4,7 +4,7 @@ # We cache because it is a 1sec delay, which is painful for the poller if [ -x /usr/bin/dpkg-query ]; then DATE=$(date +%s) - FILE=/tmp/agent-local-dpkg + FILE=/var/cache/librenms/agent-local-dpkg if [ ! -e $FILE ]; then dpkg-query -W --showformat='${Status} ${Package} ${Version} ${Architecture} ${Installed-Size}\n'|grep " installed "|cut -d\ -f4- > $FILE diff --git a/agent-local/mysql b/agent-local/mysql index 9277efc91..0b9419fd0 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -27,7 +27,7 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ -# Define MySQL connection constants in config.php. Instead of defining +# Define MySQL connection constants in config.php. Instead of defining # parameters here, you can define them in another file named the same as this # file, with a .cnf extension. # ============================================================================ @@ -48,7 +48,7 @@ $heartbeat_server_id = 0; # Server id to associate with a heartbeat. Leave 0 if $heartbeat_table = 'percona.heartbeat'; # db.tbl. -$cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$cache_dir = '/var/cache/librenms'; # If set, this uses caching to avoid multiple calls. $timezone = null; # If not set, uses the system default. Example: "UTC" $cache_time = 30; # How long to cache data. @@ -286,7 +286,7 @@ function ss_get_mysql_stats( $options ) { $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; - + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); @@ -471,7 +471,7 @@ function ss_get_mysql_stats( $options ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) diff --git a/agent-local/nfsstats b/agent-local/nfsstats index 02a834cdb..404e2fd06 100755 --- a/agent-local/nfsstats +++ b/agent-local/nfsstats @@ -15,8 +15,8 @@ BIN_GREP='/usr/bin/grep' BIN_PASTE='/usr/bin/paste' BIN_RM='/usr/bin/rm' BIN_MV='/usr/bin/mv' -LOG_OLD='/tmp/nfsstats_old' -LOG_NEW='/tmp/nfsstats_new' +LOG_OLD='/var/cache/librenms/nfsstats_old' +LOG_NEW='/var/cache/librenms/nfsstats_new' $BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 diff --git a/agent-local/rpm b/agent-local/rpm index 88483be1d..b27c0c44e 100755 --- a/agent-local/rpm +++ b/agent-local/rpm @@ -4,7 +4,7 @@ # We cache because it is a 1sec delay, which is painful for the poller if [ -x /bin/rpm ]; then DATE=$(date +%s) - FILE=/tmp/agent-local-rpm + FILE=/var/cache/librenms/agent-local-rpm if [ ! -e $FILE ]; then /bin/rpm -q --all --queryformat '%{N} %{V} %{R} %{ARCH} %{SIZE}\n' > $FILE fi diff --git a/snmp/apache-stats b/snmp/apache-stats index 863514aea..ef6574639 100755 --- a/snmp/apache-stats +++ b/snmp/apache-stats @@ -14,7 +14,7 @@ use LWP::Simple; $CACHETIME = 30; -$CACHEFILE = '/tmp/snmp-cache-apache'; +$CACHEFILE = '/var/cache/librenms/snmp-cache-apache'; # check for cache file newer CACHETIME seconds ago if ( -f $CACHEFILE && time - (stat( $CACHEFILE ))[9] < $CACHETIME) { diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 378d858e8..f098a8c55 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -19,7 +19,7 @@ # CACHETIME = 30 -CACHEFILE = '/tmp/apache-snmp' +CACHEFILE = '/var/cache/librenms/apache-snmp' # check for cache file newer CACHETIME seconds ago import os diff --git a/snmp/mysql b/snmp/mysql index 27833e016..44e31e289 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -27,7 +27,7 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ -# Define MySQL connection constants in config.php. Instead of defining +# Define MySQL connection constants in config.php. Instead of defining # parameters here, you can define them in another file named the same as this # file, with a .cnf extension. # ============================================================================ @@ -49,7 +49,7 @@ $heartbeat_utc = FALSE; # Whether pt-heartbeat is run with --utc option. $heartbeat_server_id = 0; # Server id to associate with a heartbeat. Leave 0 if no preference. $heartbeat_table = 'percona.heartbeat'; # db.tbl. -$cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$cache_dir = '/var/cache/librenms'; # If set, this uses caching to avoid multiple calls. $timezone = null; # If not set, uses the system default. Example: "UTC" $cache_time = 30; # How long to cache data. @@ -289,7 +289,7 @@ function ss_get_mysql_stats( $options ) { $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; - + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); @@ -474,7 +474,7 @@ function ss_get_mysql_stats( $options ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) diff --git a/snmp/nfs-stats.sh b/snmp/nfs-stats.sh index 25bbb6b1e..22d8d3f8f 100755 --- a/snmp/nfs-stats.sh +++ b/snmp/nfs-stats.sh @@ -12,9 +12,9 @@ BIN_TR='/usr/bin/tr' BIN_PASTE='/usr/bin/paste' BIN_RM='/usr/bin/rm' BIN_MV='/usr/bin/mv' -LOG_OLD='/tmp/nfsio_old' -LOG_NEW='/tmp/nfsio_new' -LOG_FIX='/tmp/nfsio_fix' +LOG_OLD='/var/cache/librenms/nfsio_old' +LOG_NEW='/var/cache/librenms/nfsio_new' +LOG_FIX='/var/cache/librenms/nfsio_fix' #get reply cache (rc - values: hits, misses, nocache) $BIN_CAT $CFG_NFSFILE | $BIN_SED -n 1p | $BIN_AWK '{print $2,$3,$4}' | $BIN_TR " " "\n" > $LOG_NEW diff --git a/snmp/powerdns-dnsdist b/snmp/powerdns-dnsdist index 87eda58bd..0572fb5cd 100644 --- a/snmp/powerdns-dnsdist +++ b/snmp/powerdns-dnsdist @@ -6,7 +6,7 @@ API_AUTH_USER="admin" API_AUTH_PASS="" API_URL="" API_STATS="jsonstat?command=stats" -TMP_FILE="/tmp/dnsdist_current.stats" +TMP_FILE=`/usr/bin/mktemp` #/ Description: BASH script to get PowerDNS dnsdist stats #/ Examples: ./powerdns-dnsdist From 944a209d7f91ee8f2aeec20c97070d1a76ffdeb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Bourqui?= Date: Sun, 11 Aug 2019 09:59:26 +0200 Subject: [PATCH 089/332] Add support for BSD and Illumos kstat --- snmp/zfs-linux | 47 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index c5f36256c..3d1ab28cb 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -3,17 +3,45 @@ import json import subprocess def main(args): + LINUX = '/proc/spl/kstat/zfs/arcstats' + BSD1 = 'sysctl' + BSD2 = 'kstat.zfs.misc.arcstats' + ILLUMOS = 'kstat -n arcstats' + COLUMN = 1 + SPLIT = None res = {} - ARCSTATS = open('/proc/spl/kstat/zfs/arcstats', 'r') - LINES = ARCSTATS.readlines() + try: + LINES = open(LINUX, 'r').readlines() + COLUMN = 2 + + except IOError as e1: + try: + proc = subprocess.run([BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True) + LINES = proc.stdout.splitlines() + LINES = [x[len(BSD2)+1:] for x in LINES] + SPLIT = ':' + except FileNotFoundError as e2: + try: + proc = subprocess.run(ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True) + LINES = proc.stdout.splitlines() + except FileNotFoundError as e3: + print('Linux :', e1) + print('BSD :', e2) + print('Illumos:', e3) + return 1 + LINES = [x.strip() for x in LINES] - + STATS = {} for line in LINES[2:]: - splitline = line.split() - STATS[splitline[0]] = int(splitline[2]) - + splitline = line.split(SPLIT) + try: + STATS[splitline[0]] = int(splitline[COLUMN]) + # Skip non int value like Illumos crtime, empty line at the end + except: + continue + # ARC misc DELETED = STATS['deleted'] EVICT_SKIP = STATS['evict_skip'] @@ -100,7 +128,10 @@ def main(args): return proc.returncode pools = [] - FIELDS = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] + FIELDS = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup', 'health', 'altroot'] + if len(proc.stdout.splitlines()[0].split('\t')) == 10: + FIELDS.remove('ckpoint') + for line in proc.stdout.splitlines(): info = dict(zip(FIELDS, line.split('\t'))) @@ -109,6 +140,8 @@ def main(args): info['frag'] = 0 if info['frag'] == '-' else info['frag'] info['dedup'] = info['dedup'].rstrip('x') info['cap'] = info['cap'].rstrip('%') + if 'ckpoint' in info: + info['ckpoint'] = 0 if info['ckpoint'] == '-' else info['ckpoint'] pools.append(info) From f9f031912cf8cf571c18c581cc20263f49760d5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Bourqui?= Date: Sun, 11 Aug 2019 10:08:20 +0200 Subject: [PATCH 090/332] DEMAND_ used instead of PREFETCH_METADATA_MISSES --- snmp/zfs-linux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 3d1ab28cb..e193c5ea2 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -120,7 +120,7 @@ def main(args): DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 - PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 # pools proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) From e3fdfc5defeded642d2b0ca31d6ce6cb45ec69ef Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Mon, 21 Oct 2019 04:03:13 +0200 Subject: [PATCH 091/332] Seafile Monitoring (#249) --- snmp/seafile.py | 211 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100755 snmp/seafile.py diff --git a/snmp/seafile.py b/snmp/seafile.py new file mode 100755 index 000000000..c34cf6e6e --- /dev/null +++ b/snmp/seafile.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# https://download.seafile.com/published/web-api/v2.1-admin + +# user -> libraries (count) +# user -> trash-libraries (count) +# user -> space consumption (count) +# user -> is activated (bool) + +# connected_devices (count) +# groups (count) + +# Clients -> plattform (count) +# Clients -> version (count) + +import requests +import json + +# Configfile content example: +# {"url": "https://seafile.mydomain.org", +# "username": "some_admin_login@mail.address", +# "password": "password", +# "account_identifier": "name", +# "hide_monitoring_account": true +# } + +CONFIGFILE='/etc/snmp/seafile.json' +error = 0 +error_string = '' +version = 1 + + +def get_data(url_path, data=None, token=None): + complete_url = "%s/%s" % (url, url_path) + headers = {'Accept': 'application/json'} + if token: + headers['Authorization'] = "Token %s" % token + + try: + if token: + r = requests.get(complete_url, data=data, headers=headers) + else: + r = requests.post(complete_url, data=data, headers=headers) + try: + return r.json() + except json.decoder.JSONDecodeError: + return 'no valid json returned - url correct?' + except requests.exceptions.RequestException as err: + return str(err) + + +def get_devices(): + # get all devices + url_path = 'api/v2.1/admin/devices/' + return get_data(url_path, token=token) + + +def get_groups(): + # get all groups + url_path = 'api/v2.1/admin/groups/' + return get_data(url_path, token=token) + + +def get_sysinfo(): + # get all groups + url_path = 'api/v2.1/admin/sysinfo/' + return get_data(url_path, token=token) + + +def get_account_information(): + # get all accounts withs details + account_list = [] + for account in get_data('api2/accounts/', token=token): + + # get account details + url_path = 'api2/accounts/%s/' % account['email'] + account_data = get_data(url_path, token=token) + + # get libraries by owner + url_path = 'api/v2.1/admin/libraries/?owner=%s' % account['email'] + account_data['repos'] = get_data(url_path, token=token)['repos'] + + # get deleted libraries by owner + url_path = 'api/v2.1/admin/trash-libraries/?owner=%s' % account['email'] + account_data['trash_repos'] = get_data(url_path, token=token)['repos'] + + account_list.append(account_data) + return account_list + + +def resort_devices(device_list): + data = {} + platform = {} + client_version = {} + for device in device_list: + # don't list information assigned to monitor account + if hide_monitoring_account: + if device['user'] == configfile['username']: + continue + + if device['platform'] not in platform.keys(): + platform[device['platform']] = 1 + else: + platform[device['platform']] += 1 + + if device['client_version'] not in client_version.keys(): + client_version[device['client_version']] = 1 + else: + client_version[device['client_version']] += 1 + + data['platform'] = [] + for k, v in platform.items(): + data['platform'].append({'os_name': k, + 'clients':v}) + data['client_version'] = [] + for k, v in client_version.items(): + data['client_version'].append({'client_version': k, + 'clients':v}) + + return data + + +def resort_groups(group_list): + data = {'count': len(group_list)} + return data + + +def resort_accounts(account_list): + if account_identifier in ['name', 'email']: + identifier = account_identifier + else: + identifier = 'name' + + accepted_key_list = ['is_active', 'usage'] + + data = [] + for user_account in account_list: + # don't list information assigned to monitor account + if hide_monitoring_account: + if user_account['email'] == configfile['username']: + continue + + new_account = {} + new_account['owner'] = user_account[identifier] + new_account['repos'] = len(user_account['repos']) + new_account['trash_repos'] = len(user_account['trash_repos']) + + for k in user_account.keys(): + if k not in accepted_key_list: + continue + new_account[k] = user_account[k] + data.append(new_account) + + return sorted(data, key=lambda k: k['owner'].lower()) + + +# ------------------------ MAIN -------------------------------------------------------- +with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + error = 1 + error_string = "Configfile Error: '%s'" % e + +if not error: + url = configfile['url'] + username = configfile['username'] + password = configfile['password'] + try: + account_identifier = configfile['account_identifier'] + except KeyError: + account_identifier = None + try: + hide_monitoring_account = configfile['hide_monitoring_account'] + except KeyError: + hide_monitoring_account = False + + # get token + login_data = {'username': username, 'password': password} + ret = get_data('api2/auth-token/', data=login_data) + if type(ret) != str: + if 'token' in ret.keys(): + token = ret['token'] + else: + error = 1 + try: + error_string = json.dumps(ret) + except: + error_string = ret + else: + error = 1 + error_string = ret + +data = {} +if not error: + ret= get_account_information() +if not error: + data['accounts'] = resort_accounts(ret) + data['devices'] = resort_devices(get_devices()['devices']) + data['groups'] = resort_groups(get_groups()['groups']) + data['sysinfo'] = get_sysinfo() + +output = {'error': error, + 'errorString': error_string, + 'version': version, + 'data': data + } + +print(json.dumps(output)) + From 4937e15c14be77462282d71ed91988e63b2f464b Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Mon, 21 Oct 2019 11:18:08 -0400 Subject: [PATCH 092/332] Verion fix for NTP SNMP (#256) The version variable was not pulled right on centos based distros. This fixes the issue. --- snmp/ntp-client | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-client b/snmp/ntp-client index 04db80655..925155abe 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -28,7 +28,7 @@ NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= ' NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_VERSION=`$BIN_NTPD --version | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_HEAD -c 1` +NTP_VERSION=`$BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}'` echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}' From 8481de55fcc16661e15e41bb079d0e56f113299d Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Tue, 5 Nov 2019 22:55:41 +0100 Subject: [PATCH 093/332] snmp extend for monitoring certificate file validity (#239) --- snmp/certificate.py | 79 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100755 snmp/certificate.py diff --git a/snmp/certificate.py b/snmp/certificate.py new file mode 100755 index 000000000..8957b7c9f --- /dev/null +++ b/snmp/certificate.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +import socket +import ssl +import datetime +import json + + +CONFIGFILE='/etc/snmp/certificate.json' +# {"domains": [ +# {"fqdn": "www.mydomain.com"}, +# {"fqdn": "www2.mydomain.com"} +# ] +# } + + +def get_certificate_data(domain, port=443): + + context = ssl.create_default_context() + conn = context.wrap_socket( + socket.socket(socket.AF_INET), + server_hostname=domain, + ) + # 3 second timeout because Lambda has runtime limitations + conn.settimeout(3.0) + + try: + conn.connect((domain, port)) + error_msg = None + except ConnectionRefusedError as e: + error_msg = e + ssl_info = conn.getpeercert() + return ssl_info, error_msg + + +output = {} +output['error'] = 0 +output['errorString'] = "" +output['version'] = 1 + +with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + output['error'] = 1 + output['errorString'] = "Configfile Error: '%s'" % e + +if not output['error']: + output_data_list = [] + for domain in configfile['domains']: + output_data = {} + + if 'port' not in domain.keys(): + domain['port'] = 443 + certificate_data, error_msg = get_certificate_data(domain['fqdn'], domain['port']) + + output_data['cert_name'] = domain['fqdn'] + + if not error_msg: + ssl_date_format = r'%b %d %H:%M:%S %Y %Z' + validity_end = datetime.datetime.strptime(certificate_data['notAfter'], ssl_date_format) + validity_start = datetime.datetime.strptime(certificate_data['notBefore'], ssl_date_format) + cert_age = datetime.datetime.now() - validity_start + cert_still_valid = validity_end - datetime.datetime.now() + + output_data['age'] = cert_age.days + output_data['remaining_days'] = cert_still_valid.days + + else: + output_data['age'] = None + output_data['remaining_days'] = None + output['error'] = 1 + output['errorString'] = "%s: %s" % (domain['fqdn'], error_msg) + + output_data_list.append(output_data) + + output['data'] = output_data_list + +print(json.dumps(output)) From dd27e7936d3cff94af59291cb0eb3a244db8ba9f Mon Sep 17 00:00:00 2001 From: Svennd Date: Mon, 25 Nov 2019 11:48:03 +0100 Subject: [PATCH 094/332] Update zfs-linux Traceback (most recent call last): File "./zfs-linux", line 178, in sys.exit(main(sys.argv[1:])) File "./zfs-linux", line 92, in main DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 ZeroDivisionError: division by zero --- snmp/zfs-linux | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index c5f36256c..87543f576 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -79,20 +79,20 @@ def main(args): ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0 - MRU_PERCENT = MRU_HITS / ARC_HITS * 100 - MFU_PERCENT = MFU_HITS / ARC_HITS * 100 - MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 - MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 - - DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 - PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 - METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 - PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 - - DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 - PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 - METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 - PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + MRU_PERCENT = MRU_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + MFU_PERCENT = MFU_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + + DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + + DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 # pools proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) From bafc0ac1541bb2b92f7c8fc98498d3956d667203 Mon Sep 17 00:00:00 2001 From: Svennd Date: Tue, 10 Dec 2019 14:39:07 +0100 Subject: [PATCH 095/332] Update zfs-linux --- snmp/zfs-linux | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 37d747593..09b24cb11 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -117,10 +117,10 @@ def main(args): METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 - DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 - PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 - METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 - PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 + DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 # pools proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) From 44c798f6dcc232ae77acc7e59d1e9ffc721bdbdb Mon Sep 17 00:00:00 2001 From: Dan Langille Date: Tue, 24 Dec 2019 14:56:52 -0500 Subject: [PATCH 096/332] Repair error When running this script, I get: Useless use of multiplication (*) in void context at ./zfs-freebsd line 163 --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index 93e162476..f503bef13 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -160,7 +160,7 @@ my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $demand_data_hits / $demand_data_total * 100; + $demand_data_hits /= $demand_data_total * 100; } my $data_prefetch_percent=0; From 8e5583ac7821e28b85660812b5310fee531407b2 Mon Sep 17 00:00:00 2001 From: Dmkaz Date: Mon, 13 Jan 2020 12:00:09 -0500 Subject: [PATCH 097/332] Fix zfs-freebsd.py Capacity Output 'zpool-list -pH' returns capacity as a percentage which needs to be stripped as well as account for null (-) values. Additionally, the output now also includes 'CKPOINT' which needs to be added to the fields array so it splits correctly. --- snmp/zfs-freebsd.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index 3ef92c889..276717aa6 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -96,16 +96,21 @@ def chomp(line): if p.returncode != 0: return p.returncode output['pools'] = [] - fields = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] - for l in p.stdout.splitlines(): - p = dict(zip(fields, l.split('\t'))) - if p['expandsz'] == '-': - p['expandsz'] = 0 - p['frag'] = p['frag'].rstrip('%') - if p['frag'] == '-': - p['frag'] = 0 - p['dedup'] = p['dedup'].rstrip('x') - output['pools'].append(p) + fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup'] + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split('\t'))) + if p['ckpoint'] == '-': + p['ckpoint'] = 0 + if p['expandsz'] == '-': + p['expandsz'] = 0 + p['frag'] = p['frag'].rstrip('%') + if p['frag'] == '-': + p['frag'] = 0 + p['cap'] = p['cap'].rstrip('%') + if p['cap'] == '-': + p['cap'] = 0 + p['dedup'] = p['dedup'].rstrip('x') + output['pools'].append(p) print(json.dumps(output)) From 4e97a51a65aaeeb19be372b68d6b745dfde661ca Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 18 Jan 2020 19:37:09 +0100 Subject: [PATCH 098/332] Hotspare Count should not go below zero --- snmp/mdadm | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/mdadm b/snmp/mdadm index ed291bccd..8f8cffd5d 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -80,6 +80,9 @@ if [ -d /dev/md ] ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" + if [ $RAID_HOTSPARE_COUNT -lt 0 ]; then + RAID_HOTSPARE_COUNT=0 + fi ARRAY_DATA='{'\ '"name":"'$RAID_NAME\ From 3ea93ab4d21dff963e2d4c2ba940dea9dd37708a Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 18 Jan 2020 21:50:12 +0100 Subject: [PATCH 099/332] adding space --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index 8f8cffd5d..bc1d28291 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -80,7 +80,7 @@ if [ -d /dev/md ] ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" - if [ $RAID_HOTSPARE_COUNT -lt 0 ]; then + if [ $RAID_HOTSPARE_COUNT -lt 0 ] ; then RAID_HOTSPARE_COUNT=0 fi From 9f461a5e16e33b448944692b77bdc9db62f37546 Mon Sep 17 00:00:00 2001 From: PipoCanaja <38363551+PipoCanaja@users.noreply.github.com> Date: Sun, 19 Jan 2020 10:40:39 +0100 Subject: [PATCH 100/332] fix $data_demand_percent calculation --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index f503bef13..e4d27cf80 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -160,7 +160,7 @@ my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $demand_data_hits /= $demand_data_total * 100; + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; } my $data_prefetch_percent=0; From 74d6ac09b89b5306f74946d50f872d29f216647e Mon Sep 17 00:00:00 2001 From: PipoCanaja <38363551+PipoCanaja@users.noreply.github.com> Date: Sun, 19 Jan 2020 10:47:30 +0100 Subject: [PATCH 101/332] Update zfs-freebsd.py --- snmp/zfs-freebsd.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index 276717aa6..2227598df 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -97,20 +97,20 @@ def chomp(line): return p.returncode output['pools'] = [] fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup'] - for l in p.stdout.splitlines(): - p = dict(zip(fields, l.split('\t'))) - if p['ckpoint'] == '-': - p['ckpoint'] = 0 - if p['expandsz'] == '-': - p['expandsz'] = 0 - p['frag'] = p['frag'].rstrip('%') - if p['frag'] == '-': - p['frag'] = 0 - p['cap'] = p['cap'].rstrip('%') - if p['cap'] == '-': - p['cap'] = 0 - p['dedup'] = p['dedup'].rstrip('x') - output['pools'].append(p) + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split('\t'))) + if p['ckpoint'] == '-': + p['ckpoint'] = 0 + if p['expandsz'] == '-': + p['expandsz'] = 0 + p['frag'] = p['frag'].rstrip('%') + if p['frag'] == '-': + p['frag'] = 0 + p['cap'] = p['cap'].rstrip('%') + if p['cap'] == '-': + p['cap'] = 0 + p['dedup'] = p['dedup'].rstrip('x') + output['pools'].append(p) print(json.dumps(output)) From 4e64a6284457bfeabce29928bec9142ba9be3850 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 22 Jan 2020 01:52:20 +0100 Subject: [PATCH 102/332] pureftp snmpd extension (#269) --- snmp/pureftpd.py | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100755 snmp/pureftpd.py diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py new file mode 100755 index 000000000..6f10770b0 --- /dev/null +++ b/snmp/pureftpd.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 + +import os +import json + +CONFIGFILE = '/etc/snmp/pureftpd.json' + +pureftpwho_cmd = '/usr/sbin/pure-ftpwho' +pureftpwho_args = '-v -s -n' + + +output_data = {} +output_data['version'] = 1 +output_data['errorString'] = "" +output_data['error'] = 0 + + +if os.path.isfile(CONFIGFILE): + with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + output_data['error'] = 1 + output_data['errorString'] = "Configfile Error: '%s'" % e +else: + configfile = None + +if not output_data['error'] and configfile: + try: + if 'pureftpwho_cmd' in configfile.keys(): + pureftpwho_cmd = configfile['pureftpwho_cmd'] + except KeyError: + output_data['error'] = 1 + output_data['errorString'] = "Configfile Error: '%s'" % e + + +output = os.popen('sudo ' + pureftpwho_cmd + ' ' + pureftpwho_args).read() + +data = {} + +for line in output.split('\n'): + if not len(line): + continue + + pid, acct, time, state, file, peer, local, port, transfered, total, percent, bandwidth = line.split('|') + + if "IDLE" in state: + state = "IDLE" + elif "DL" in state: + state = "DL" + elif "UL" in state: + state = "UL" + + if acct not in data.keys(): + data[acct] = {} + if state not in data[acct]: + data[acct][state] = {'bitrate': 0, + 'connections': 0 + } + bandwidth_bit = int(bandwidth) * 1024 + data[acct][state]['bitrate'] += bandwidth_bit + data[acct][state]['connections'] += 1 + +output_data['data'] = data + +print (json.dumps(output_data)) From 3ae29bb40ea5460968cc881234ce66d54f9b89af Mon Sep 17 00:00:00 2001 From: fbourqui Date: Wed, 22 Jan 2020 11:35:58 +0100 Subject: [PATCH 103/332] fix useSN, selftest with disk defined as argument was resulting in /dev/dev.... parameter to smartctl -l and -i --- snmp/smart | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/snmp/smart b/snmp/smart index 75a02ff0e..3d8befdfe 100755 --- a/snmp/smart +++ b/snmp/smart @@ -260,13 +260,11 @@ foreach my $line ( @disks ){ $disk=$line; $name=$line; } - my $disk_sn=$disk; my $output; - if ( $disk =~ /\// ){ - $output=`$smartctl -A $disk`; - }else{ - $output=`$smartctl -A /dev/$disk`; + if ( $disk !~ /\// ){ + $disk = '/dev/'.$disk; } + $output=`$smartctl -A $disk`; my %IDs=( '5'=>'null', '10'=>'null', '173'=>'null', @@ -381,7 +379,7 @@ foreach my $line ( @disks ){ } #get the selftest logs - $output=`$smartctl -l selftest /dev/$disk`; + $output=`$smartctl -l selftest $disk`; @outputA=split( /\n/, $output ); my $completed=scalar grep(/Completed without error/, @outputA); my $interrupted=scalar grep(/Interrupted/, @outputA); @@ -395,7 +393,7 @@ foreach my $line ( @disks ){ # get the drive serial number, if needed my $disk_id=$name; if ( $useSN ){ - while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { + while (`$smartctl -i $disk` =~ /Serial Number:(.*)/g) { $disk_id = $1; $disk_id =~ s/^\s+|\s+$//g; } From 6520489ecca33159a00a1fc6d54922be2c4d81f7 Mon Sep 17 00:00:00 2001 From: fbourqui Date: Thu, 30 Jan 2020 13:03:04 +0100 Subject: [PATCH 104/332] Store Crucial SSD #202 in generic #231 (#272) --- snmp/smart | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snmp/smart b/snmp/smart index 75a02ff0e..652d55758 100755 --- a/snmp/smart +++ b/snmp/smart @@ -298,6 +298,12 @@ foreach my $line ( @disks ){ my $raw=$lineA[9]; my $id=$lineA[0]; + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 ) { + $IDs{231}=$raw; + } + # single int raw values if ( ( $id == 5 ) || From 8330f0a654ffa9c4478321530e5a15ab2a4cdd58 Mon Sep 17 00:00:00 2001 From: Joseph Tingiris Date: Mon, 3 Feb 2020 12:46:36 -0500 Subject: [PATCH 105/332] asterisk add iax2 stats (#274) * asterisk add iax2 stats * fix iax2 unmonitored variable; removed unnecessary newline --- snmp/asterisk | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/asterisk b/snmp/asterisk index 109aec4a4..7c6b7a19a 100644 --- a/snmp/asterisk +++ b/snmp/asterisk @@ -13,6 +13,7 @@ then echo "<<>>" $ASCLI -rx "core show channels" | awk '/active calls/ { print "Calls=" $1 } /active channels/ { print "Channels=" $1}' $ASCLI -rx 'sip show peers' | awk '/sip peers/ { print "SipPeers=" $1 "\nSipMonOnline=" $5 "\nSipMonOffline=" $7 "\nSipUnMonOnline=" $10 "\nSipUnMonOffline=" $12}' + $ASCLI -rx 'iax2 show peers' | awk '/iax2 peers/ { gsub("\\[",""); gsub("\\]",""); print "Iax2Peers=" $1 "\nIax2Online=" $4 "\nIax2Offline=" $6 "\nIax2Unmonitored=" $8}' else exit 0 From 652ae6289992200bc9f461ff127fb9f08f6d8416 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 5 Feb 2020 23:34:42 +0100 Subject: [PATCH 106/332] set MDADM sync complete to 0 if on degraded array --- snmp/mdadm | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index bc1d28291..5265fe69f 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -47,10 +47,12 @@ if [ -d /dev/md ] ; then let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024" fi - if [ "$($CAT $RAID/md/sync_completed)" = "none" ] ; then - RAID_SYNC_COMPLETED=100 - else + if [ "$($CAT $RAID/md/sync_completed)" != "none" ] ; then let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)" + elif [ $RAID_DEGRADED -eq 1 ] ; then + RAID_SYNC_COMPLETED=0 + else + RAID_SYNC_COMPLETED=100 fi # divide with 2 to size like in /proc/mdstat From 9f781d8549d64e381a5a3af2ccf616345afe6a05 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 12 Feb 2020 08:26:12 +0100 Subject: [PATCH 107/332] puppet agent monitoring script (#258) --- snmp/puppet_agent.py | 106 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100755 snmp/puppet_agent.py diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py new file mode 100755 index 000000000..9d0f343cb --- /dev/null +++ b/snmp/puppet_agent.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import json +import yaml +from os.path import isfile +from time import time + + +output = {} +output['error'] = 0 +output['errorString'] = "" +output['version'] = 1 + +CONFIGFILE = '/etc/snmp/puppet.json' +# optional config file +# { +# "agent": { +# "summary_file": "/my/custom/path/to/summary_file" +# } +# } + + +summary_files = ['/var/cache/puppet/state/last_run_summary.yaml', + '/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml'] + + +def parse_yaml_file(filename): + try: + yaml_data = yaml.load(open(filename, 'r')) + msg = None + except yaml.scanner.ScannerError as e: + yaml_data = [] + msg = str(e) + except yaml.parser.ParserError as e: + yaml_data = [] + msg = str(e) + + return msg, yaml_data + + +def time_processing(data): + new_data = {} + + for k in data.keys(): + if k == 'last_run': + # generate difference to last run (seconds) + new_data[k] = round(time() - data[k]) + continue + new_data[k] = round(data[k], 2) + + return new_data + + +def processing(data): + new_data = {} + for k in ['changes', 'events', 'resources', 'version']: + new_data[k] = data[k] + + new_data['time'] = time_processing(data['time']) + + return new_data + + +# extend last_run_summary_file list with optional custom file +if isfile(CONFIGFILE): + with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + output['error'] = 1 + output['errorString'] = "Configfile Error: '%s'" % e +else: + configfile = None + +if not output['error'] and configfile: + try: + if 'agent' in configfile.keys(): + custom_summary_file = configfile['agent']['summary_file'] + summary_files.insert(0, custom_summary_file) + except KeyError: + output['error'] = 1 + output['errorString'] = "Configfile Error: '%s'" % e + +# search existing summary file from list +if not output['error']: + summary_file = None + for sum_file in summary_files: + if isfile(sum_file): + summary_file = sum_file + break + + if not summary_file: + output['error'] = 1 + output['errorString'] = "no puppet agent run summary file found" + +# open summary file +if not output['error']: + msg, data = parse_yaml_file(summary_file) + + if msg: + output['error'] = 1 + output['errorString'] = msg + +output['data'] = processing(data) + +print (json.dumps(output)) From 2820e53dbc3b6053f28890fdd2d05b4ab9ee085d Mon Sep 17 00:00:00 2001 From: Alex R Date: Thu, 13 Feb 2020 13:27:12 +0100 Subject: [PATCH 108/332] Added check if sudo is required and alter the $picmd to become "sudo $picmd" (#273) This will enable the script to run on libeelec images on raspberry pi for example. --- snmp/raspberry.sh | 54 ++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index f5c57f827..72ec0616f 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -19,29 +19,31 @@ getStatusMPG4='codec_enabled MPG4' getStatusMJPG='codec_enabled MJPG' getStatusWMV9='codec_enabled WMV9' -sudo $picmd $getTemp | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsCore | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsRamI | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsRamP | $pised 's|[^0-9.]||g' -sudo $picmd $getFreqArm | $pised 's/frequency(45)=//g' -sudo $picmd $getFreqCore | $pised 's/frequency(1)=//g' -sudo $picmd $getStatusH264 | $pised 's/H264=//g' -sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' -sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' -sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' -sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' -sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' -sudo $picmd $getStatusH264 | $pised 's/enabled/2/g' -sudo $picmd $getStatusMPG2 | $pised 's/enabled/2/g' -sudo $picmd $getStatusWVC1 | $pised 's/enabled/2/g' -sudo $picmd $getStatusMPG4 | $pised 's/enabled/2/g' -sudo $picmd $getStatusMJPG | $pised 's/enabled/2/g' -sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' -sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' -sudo $picmd $getStatusH264 | $pised 's/disabled/1/g' -sudo $picmd $getStatusMPG2 | $pised 's/disabled/1/g' -sudo $picmd $getStatusWVC1 | $pised 's/disabled/1/g' -sudo $picmd $getStatusMPG4 | $pised 's/disabled/1/g' -sudo $picmd $getStatusMJPG | $pised 's/disabled/1/g' -sudo $picmd $getStatusWMV9 | $pised 's/disabled/1/g' +[ $(id -u) -eq 0 ] || picmd="sudo $picmd" + +$picmd $getTemp | $pised 's|[^0-9.]||g' +$picmd $getVoltsCore | $pised 's|[^0-9.]||g' +$picmd $getVoltsRamC | $pised 's|[^0-9.]||g' +$picmd $getVoltsRamI | $pised 's|[^0-9.]||g' +$picmd $getVoltsRamP | $pised 's|[^0-9.]||g' +$picmd $getFreqArm | $pised 's/frequency(45)=//g' +$picmd $getFreqCore | $pised 's/frequency(1)=//g' +$picmd $getStatusH264 | $pised 's/H264=//g' +$picmd $getStatusMPG2 | $pised 's/MPG2=//g' +$picmd $getStatusWVC1 | $pised 's/WVC1=//g' +$picmd $getStatusMPG4 | $pised 's/MPG4=//g' +$picmd $getStatusMJPG | $pised 's/MJPG=//g' +$picmd $getStatusWMV9 | $pised 's/WMV9=//g' +$picmd $getStatusH264 | $pised 's/enabled/2/g' +$picmd $getStatusMPG2 | $pised 's/enabled/2/g' +$picmd $getStatusWVC1 | $pised 's/enabled/2/g' +$picmd $getStatusMPG4 | $pised 's/enabled/2/g' +$picmd $getStatusMJPG | $pised 's/enabled/2/g' +$picmd $getStatusWMV9 | $pised 's/enabled/2/g' +$picmd $getStatusWMV9 | $pised 's/enabled/2/g' +$picmd $getStatusH264 | $pised 's/disabled/1/g' +$picmd $getStatusMPG2 | $pised 's/disabled/1/g' +$picmd $getStatusWVC1 | $pised 's/disabled/1/g' +$picmd $getStatusMPG4 | $pised 's/disabled/1/g' +$picmd $getStatusMJPG | $pised 's/disabled/1/g' +$picmd $getStatusWMV9 | $pised 's/disabled/1/g' From 257aa598f580531567b6b77eb07c57de8e5e3749 Mon Sep 17 00:00:00 2001 From: crcro Date: Thu, 13 Feb 2020 14:27:36 +0200 Subject: [PATCH 109/332] mailcow-dockerized postfix stats (#270) * initial release * added requirements info --- snmp/mailcow-dockerized-postfix | 90 +++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 snmp/mailcow-dockerized-postfix diff --git a/snmp/mailcow-dockerized-postfix b/snmp/mailcow-dockerized-postfix new file mode 100644 index 000000000..354f14fbd --- /dev/null +++ b/snmp/mailcow-dockerized-postfix @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2020 Cercel Valentin-Adrian +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# mailcow-dockerized postfix stats +# please adjust librenms_poller_interval according to your LibreNMS setup - default to 5 minutes +# requirements: mailcow-dockerized and pflogsumm +# + +import subprocess +import re +import json + +# LibreNMS poller interval +librenms_poller_interval = 300 + + +def libre_to_mcd_postfix(libre_seconds): + return str(int(libre_seconds / 60)) + + +def cli_get_docker_container(): + return subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True).decode('utf8').strip() + + +def cli_command(): + cli_part = "docker logs --since " + libre_to_mcd_postfix(librenms_poller_interval) \ + + "m " + cli_get_docker_container() + "| pflogsumm --smtpd-stats" + return cli_part + + +def get_output(): + return subprocess.check_output(cli_command(), shell=True).decode('utf8') + + +def output_cleaning(input): + output = re.split('\n', input) + return list(filter(None, output)) + + +def entry_generator(input): + entry = re.sub(' +', ':', input.strip().lstrip()) + return entry.split(':') + + +# limit our needed output +mcd_postfix_data = get_output().split('messages') +data = mcd_postfix_data[1].split('smtpd') + +# postfix stats only +mcd_postfix_info = data[0] +# smtpd stats only +mcd_smtpd_info = data[1].split('Per-Hour Traffic Summary')[0] + +# postfix stats export +mcd_postfix = output_cleaning(mcd_postfix_info) + +points_data = [] +points_label = [] +for entry in mcd_postfix: + data_labels = entry_generator(entry) + + if data_labels[0].find('k') == -1: + points_data.append(data_labels[0]) + else: + data_point = data_labels[0].replace('k', '', 1) + data_point = int(data_point) * 1024 + points_data.append(data_point) + + points_label.append(re.sub('[^a-zA-Z]+', '', data_labels[1])) + +entries = dict(zip(points_label, points_data)) +export = {"data": entries, "error": "0", "errorString": "", "version": "1"} +data = re.sub(' ', '', json.dumps(export)) +print(data) + From a4599dee561102a81dec64f4538636ca4bdc5f24 Mon Sep 17 00:00:00 2001 From: Joseph Tingiris Date: Thu, 13 Feb 2020 07:29:10 -0500 Subject: [PATCH 110/332] apache-stats.sh: shell script that produces LibreNMS apache application output. The only dependency is curl. (#263) --- snmp/apache-stats.sh | 213 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100755 snmp/apache-stats.sh diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh new file mode 100755 index 000000000..b1a9a06db --- /dev/null +++ b/snmp/apache-stats.sh @@ -0,0 +1,213 @@ +#!/usr/bin/env sh + +# This script produces LibreNMS apache-stats output. The only dependency is curl. + +# 20200102, joseph.tingiris@gmail.com + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +# +# Functions +# + +function debugecho() { + if [ ${#Debug} -gt 0 ]; then + echo debug: $@ + fi +} + +# +# Globals +# + +Tmp_File=/tmp/apache_status + +# Debug=on; use environment, i.e. Debug=on apache-stats.sh +if [ "${DEBUG}" != "" ]; then + Debug=${DEBUG} +else + if [ "${Debug}" != "" ]; then + Debug=${Debug} + fi +fi + +# set default values to U; not all apache's have all stats +Total_Accesses="U" +Total_kBytes="U" +CPULoad="U" +Uptime="U" +ReqPerSec="U" +BytesPerSec="U" +BytesPerReq="U" +BusyWorkers="U" +IdleWorkers="U" +Scoreboard="U" + +# set default scoreboard counters to 0 +let Scoreboard_=0 +let ScoreboardDot=0 +let ScoreboardC=0 +let ScoreboardD=0 +let ScoreboardG=0 +let ScoreboardI=0 +let ScoreboardK=0 +let ScoreboardL=0 +let ScoreboardR=0 +let ScoreboardS=0 +let ScoreboardW=0 + +# +# Main +# + +curl --silent --fail "http://localhost/server-status?auto" -o ${Tmp_File} &> /dev/null +if [ $? -ne 0 ]; then + # curl failed + exit 1 +fi + +if [ ! -s ${Tmp_File} ]; then + # empty output + exit 1 +fi + +while read Line; do + Field=${Line%:*} + Value=${Line#*: } + + debugecho "Line: ${Line}" + debugecho "Field: ${Field}" + debugecho "Value: ${Value}" + debugecho + + if [ "${Field}" == "Total Accesses" ]; then + Total_Accesses=${Value} + fi + + if [ "${Field}" == "Total kBytes" ]; then + Total_kBytes=${Value} + fi + + if [ "${Field}" == "CPULoad" ]; then + CPULoad=${Value} + fi + + if [ "${Field}" == "Uptime" ]; then + Uptime=${Value} + fi + + if [ "${Field}" == "ReqPerSec" ]; then + ReqPerSec=${Value} + fi + + if [ "${Field}" == "BytesPerSec" ]; then + BytesPerSec=${Value} + fi + + if [ "${Field}" == "BytesPerReq" ]; then + BytesPerReq=${Value} + fi + + if [ "${Field}" == "BusyWorkers" ]; then + BusyWorkers=${Value} + fi + + if [ "${Field}" == "IdleWorkers" ]; then + IdleWorkers=${Value} + fi + + if [ "${Field}" == "Scoreboard" ]; then + Scoreboard=${Value} + fi + +done < ${Tmp_File} + +# value output order must be this ... +echo "${Total_Accesses}" +echo "${Total_kBytes}" +echo "${CPULoad}" +echo "${Uptime}" +echo "${ReqPerSec}" +echo "${BytesPerSec}" +echo "${BytesPerReq}" +echo "${BusyWorkers}" +echo "${IdleWorkers}" + +debugecho "Scoreboard = ${Scoreboard}" +for (( c=0; c<${#Scoreboard}; c++ )); do + + if [ "${Scoreboard:$c:1}" == "_" ]; then + let Scoreboard_=${Scoreboard_}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "." ]; then + let ScoreboardDot=${ScoreboardDot}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "C" ]; then + let ScoreboardC=${ScoreboardC}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "D" ]; then + let ScoreboardD=${ScoreboardD}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "G" ]; then + let ScoreboardG=${ScoreboardG}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "I" ]; then + let ScoreboardI=${ScoreboardI}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "K" ]; then + let ScoreboardK=${ScoreboardK}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "L" ]; then + let ScoreboardL=${ScoreboardL}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "R" ]; then + let ScoreboardR=${ScoreboardR}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "S" ]; then + let ScoreboardS=${ScoreboardS}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "W" ]; then + let ScoreboardW=${ScoreboardW}+1 + continue + fi + + debugecho "${Scoreboard:$c:1}" +done + +# scoreboard output order must be this ... +echo ${Scoreboard_} +echo ${ScoreboardS} +echo ${ScoreboardR} +echo ${ScoreboardW} +echo ${ScoreboardK} +echo ${ScoreboardD} +echo ${ScoreboardC} +echo ${ScoreboardL} +echo ${ScoreboardG} +echo ${ScoreboardI} +echo ${ScoreboardDot} + +# clean up +if [ -f ${Tmp_File} ]; then + rm -f ${Tmp_File} &> /dev/null +fi From 0c74ab10ceb4120904c278492088ffe8454689a2 Mon Sep 17 00:00:00 2001 From: "David P. Discher" Date: Wed, 19 Feb 2020 00:08:57 -0800 Subject: [PATCH 111/332] Completely breaks libremns metric collections, lets actually do the assigment. (#276) Perl error "Useless use of multiplication (*) in void context at /etc/snmp.d/zfs-freebsd line 163." --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index 93e162476..e4d27cf80 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -160,7 +160,7 @@ my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $demand_data_hits / $demand_data_total * 100; + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; } my $data_prefetch_percent=0; From 80c5bf33e34d4934f89b5d9430ddac83ff21e465 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Fri, 28 Feb 2020 21:33:27 +0100 Subject: [PATCH 112/332] remove unneeded Squid parsing Script --- snmp/squid | 74 ------------------------------------------------------ 1 file changed, 74 deletions(-) delete mode 100644 snmp/squid diff --git a/snmp/squid b/snmp/squid deleted file mode 100644 index 633db4170..000000000 --- a/snmp/squid +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh - -# Add this to snmpd.conf as below. -# extend squid /etc/snmp/squid - -# To get this working smoothly and securely, you can add the items below to your squid.conf. -# acl snmppublic snmp_community public -# snmp_port 3401 -# snmp_access allow snmppublic localhost -# snmp_access deny all - - -# set this as being equal to the value of 'acl snmppublic snmp_community' in squid.conf -community='public' - -# set this as being equal to the value of 'snmp_port' in squid.conf -port='3401' - -# the full path to snmpwalk -snmpwalk='/usr/bin/env snmpwalk' - -## -## Nothing Should Need Changed Below Here -## - -# cacheMemMaxSize Integer32 -# cacheSwapMaxSize Integer32 -# cacheSwapHighWM Integer32 -# cacheSwapLowWM Integer32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.2.5 - -# cacheSysPageFaults Counter32 -# cacheSysNumReads Counter32 -# cacheMemUsage Integer32 -# cacheCpuTime Integer32 -# cacheCpuUsage Integer32 -# cacheMaxResSize Integer32 -# cacheNumObjCount Gauge32 -# cacheCurrentLRUExpiration Timeticks -# Storage LRU Expiration Age -# cacheCurrentUnlinkRequests Gauge32 -# cacheCurrentUnusedFDescrCnt Gauge32 -# cacheCurrentResFileDescrCnt Gauge32 -# cacheCurrentFileDescrCnt Gauge32 -# cacheCurrentFileDescrMax Gauge32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.1 - -# cacheProtoClientHttpRequests Counter32 -# cacheHttpHits Counter32 -# cacheHttpErrors Counter32 -# cacheHttpInKb Counter32 -# cacheHttpOutKb Counter32 -# cacheIcpPktsSent Counter32 -# cacheIcpPktsRecv Counter32 -# cacheIcpKbSent Counter32 -# cacheIcpKbRecv Counter32 -# cacheServerRequests Integer32 -# cacheServerErrors Integer32 -# cacheServerInKb Counter32 -# cacheServerOutKb Counter32 -# cacheCurrentSwapSize Gauge32 -# cacheClients Gauge32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.1 - -# cacheRequestHitRatio.1 Integer32 -# cacheRequestHitRatio.5 Integer32 -# cacheRequestHitRatio.60 Integer32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.2.1.9 - -# cacheRequestByteRatio.1 Integer32 -# cacheRequestByteRatio.5 Integer32 -# cacheRequestByteRatio.60 Integer32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.2.1.10 - From 5d9bf9f22593fcbc70250d887872cc9c175641fd Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 21 Mar 2020 22:13:22 +0100 Subject: [PATCH 113/332] Pureftpd Byte to bit --- snmp/pureftpd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py index 6f10770b0..1c768e1ba 100755 --- a/snmp/pureftpd.py +++ b/snmp/pureftpd.py @@ -57,7 +57,7 @@ data[acct][state] = {'bitrate': 0, 'connections': 0 } - bandwidth_bit = int(bandwidth) * 1024 + bandwidth_bit = int(bandwidth) * 1024 * 8 data[acct][state]['bitrate'] += bandwidth_bit data[acct][state]['connections'] += 1 From bb1d61e2d942fdab265b77cf1c2c21974adfca97 Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Wed, 8 Apr 2020 11:40:41 +0200 Subject: [PATCH 114/332] Fix ssl.SSLCertVerificationError If the certificate is already expired, the script crash. Fix #286 --- snmp/certificate.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index 8957b7c9f..e81502bdf 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -24,12 +24,14 @@ def get_certificate_data(domain, port=443): # 3 second timeout because Lambda has runtime limitations conn.settimeout(3.0) + ssl_info = None try: conn.connect((domain, port)) error_msg = None - except ConnectionRefusedError as e: + ssl_info = conn.getpeercert() + except (ConnectionRefusedError, ssl.SSLCertVerificationError) as e: error_msg = e - ssl_info = conn.getpeercert() + return ssl_info, error_msg From a19293219b81d8e11404e7328c313e662059e4d0 Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Wed, 8 Apr 2020 17:59:00 +0200 Subject: [PATCH 115/332] Return values to librenms application in order to log the error --- snmp/certificate.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index e81502bdf..e5ed5ea71 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -24,13 +24,21 @@ def get_certificate_data(domain, port=443): # 3 second timeout because Lambda has runtime limitations conn.settimeout(3.0) - ssl_info = None + error_msg = None + ssl_info = {} try: conn.connect((domain, port)) error_msg = None ssl_info = conn.getpeercert() - except (ConnectionRefusedError, ssl.SSLCertVerificationError) as e: + except ConnectionRefusedError as e: error_msg = e + # Manage expired certificates + except ssl.SSLCertVerificationError as e: + # Arbitrary start date + ssl_info['notBefore'] = "Jan 1 00:00:00 2020 GMT" + # End date is now (we don't have the real one but the certificate is expired) + one_minute_further = datetime.datetime.now() + datetime.timedelta(minutes=1) + ssl_info['notAfter'] = one_minute_further.strftime('%b %d %H:%M:%S %Y GMT') return ssl_info, error_msg From 359a7b56309c8a2ed7d41c07b2d1d7d4ae28cbc7 Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Wed, 8 Apr 2020 17:59:44 +0200 Subject: [PATCH 116/332] Fix redundant line --- snmp/certificate.py | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index e5ed5ea71..d97f66f12 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -28,7 +28,6 @@ def get_certificate_data(domain, port=443): ssl_info = {} try: conn.connect((domain, port)) - error_msg = None ssl_info = conn.getpeercert() except ConnectionRefusedError as e: error_msg = e From 7e6b19f26c05ee48fb93db98b8efdb53e209c4bb Mon Sep 17 00:00:00 2001 From: FingerlessGloves Date: Sun, 12 Apr 2020 17:17:16 +0100 Subject: [PATCH 117/332] Make python3 version of Nginx extend the default (#284) * Rename nginx to nginx-python2, as python2 is EOL Rename Nginx to Nginx-python2, as python2 is EOL. Shall make python3 version the default version now. Keeping python2 version for legacy. * Rename nginx-python3.py to nginx --- snmp/nginx | 38 +++++++++++++++++++------------------- snmp/nginx-python2 | 28 ++++++++++++++++++++++++++++ snmp/nginx-python3.py | 28 ---------------------------- 3 files changed, 47 insertions(+), 47 deletions(-) create mode 100755 snmp/nginx-python2 delete mode 100755 snmp/nginx-python3.py diff --git a/snmp/nginx b/snmp/nginx index 06efab6e6..e2a64118d 100755 --- a/snmp/nginx +++ b/snmp/nginx @@ -1,28 +1,28 @@ -#!/usr/bin/env python2 -import urllib2 +#!/usr/bin/env python3 +from urllib.request import urlopen import re -data = urllib2.urlopen('http://localhost/nginx-status').read() +data = urlopen('http://localhost/nginx-status').read() params = {} -for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass +for line in data.decode().split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) diff --git a/snmp/nginx-python2 b/snmp/nginx-python2 new file mode 100755 index 000000000..06efab6e6 --- /dev/null +++ b/snmp/nginx-python2 @@ -0,0 +1,28 @@ +#!/usr/bin/env python2 +import urllib2 +import re + +data = urllib2.urlopen('http://localhost/nginx-status').read() + +params = {} + +for line in data.split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] diff --git a/snmp/nginx-python3.py b/snmp/nginx-python3.py deleted file mode 100755 index e2a64118d..000000000 --- a/snmp/nginx-python3.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 -from urllib.request import urlopen -import re - -data = urlopen('http://localhost/nginx-status').read() - -params = {} - -for line in data.decode().split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass - -dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] - -for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print(Active) - else: - print(params[param]) From 3b7d73b7b3cf98d23818a621cf8c431b16eb02b2 Mon Sep 17 00:00:00 2001 From: Kees van Veen Date: Tue, 14 Apr 2020 16:42:33 +0200 Subject: [PATCH 118/332] added option to use portno in arguments --- snmp/portactivity | 83 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 57 insertions(+), 26 deletions(-) diff --git a/snmp/portactivity b/snmp/portactivity index 9965e0d69..c44142de9 100755 --- a/snmp/portactivity +++ b/snmp/portactivity @@ -96,7 +96,7 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print "\n". - "-p A comma seperated list of TCP protocols to check for in netstat.\n". + "-p A comma seperated list of TCP protocols (names or numbers) to check for in netstat.\n". "-P Print the output in a human readable manner.\n"; } @@ -196,11 +196,11 @@ my %valid_states=( 'UNKNOWN'=>1, ); -#gets the options +# gets the options my %opts=(); getopts('p:P', \%opts); -#what will be returned +# what will be returned my %to_return; $to_return{error}='0'; $to_return{errorString}=''; @@ -213,34 +213,42 @@ if (! defined( $opts{p} ) ){ exit 1; } -#the list of protocols to check for -my @protos_array=split(/\,/, $opts{p}); +# the list of arguments to check for +my @argument_array=split(/\,/, $opts{p}); +# the discovered protocols +my @protos_array=(); -#holds the various protocol hashes +# holds the various protocol hashes my %protos; my %proto_lookup; -#make sure each one specificied is defined and build the hash that will be returned +# the final data to present +my %data_collection; + +# make sure each one specificied is defined and build the hash that will be returned my $protos_array_int=0; -while ( defined( $protos_array[$protos_array_int] ) ){ - $protos{ $protos_array[$protos_array_int] }=newProto; - - #check if it exists - my $port=getservbyname( $protos_array[$protos_array_int] , 'tcp' ); - - # if it is not defined, then we error - if ( !defined( $port ) ){ - $to_return{errorString}='"'.$protos_array[$protos_array_int].'" is not a known service either add it or double check your spelling'; - $to_return{error}=4; - return_json(\%to_return, $opts{P}); - exit 4; +while ( defined( $argument_array[$protos_array_int] ) ){ + + # collect the argument + my $arg = $argument_array[$protos_array_int]; + + # Check if it is a port number .. + if ( $arg !~ /^-?\d+$/) { + # no it is a service name + $arg = getservbyname( $arg , 'tcp' ); } - $proto_lookup{ $port } = $protos_array [$protos_array_int ]; + # get a new protos online, by portno + $protos{ $arg } = newProto; + # add it to the "protos_array" && "proto_lookup" + push(@protos_array,$arg); + $proto_lookup{ $arg } = $arg; $protos_array_int++; } + + my $netstat='netstat -n'; my $os=$^O; @@ -269,15 +277,16 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ ){ $protos_array_int=0; my $service; + while( ( defined( $protos_array[ $protos_array_int ] ) ) && ( !defined( $service ) ) #stop once we find it ){ + #check if this matches either ports - if ( - ( defined($proto_lookup{ $conn->{'local_port'} }) ) || - ( defined($proto_lookup{ $conn->{'foreign_port'} }) ) - ){ + if ( $conn->{'local_port'} eq int ( $protos_array[ $protos_array_int ] ) ) { + $service=$protos_array[ $protos_array_int ]; + } elsif ( $conn->{'foreign_port'} eq int ( $protos_array[ $protos_array_int ] ) ) { $service=$protos_array[ $protos_array_int ]; } @@ -291,6 +300,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ my $state=$conn->{'state'}; #translate the state names if ( $os eq 'linux' ){ + if ( $state eq 'SYN_RECV' ){ $state='SYN_RECEIVED'; }elsif( $state eq 'FIN_WAIT1' ){ @@ -324,7 +334,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ } if ( - ( $conn->{'local_port'} eq $service ) && + ( $conn->{'local_port'} eq $service ) && ( $state ne 'LISTEN' ) && ( ! $processed ) ){ @@ -339,7 +349,28 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ $active_conns_int++; } +# try to lookup a name for the portnumber used in the data +my($portno,$dataset); +while ( ($portno,$dataset) = each(%protos) ) { + + my $servicename = $portno; + + if ( $portno =~ /^-?\d+$/) { + my $servicename_test = getservbyport($portno,"tcp"); + + if ( ( not defined $servicename_test ) || ( $servicename_test eq '' ) ){ + $servicename = $portno; + } else { + $servicename = $servicename_test; + } + } + + # add to "data_collection" + $data_collection{$servicename} = $dataset; +} + + #return the finished product -$to_return{data}=\%protos; +$to_return{data}=\%data_collection; return_json(\%to_return, $opts{P}); exit 0; From 103cd5b293b306292f709ef7584c72450c5c4e01 Mon Sep 17 00:00:00 2001 From: AltiUP <44464440+AltiUP@users.noreply.github.com> Date: Sun, 19 Apr 2020 00:02:53 +0200 Subject: [PATCH 119/332] Change stats file location The stats file should not be placed in /var/run/named but in /var/cache/bind. In run there will be permissions problems. --- snmp/bind | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/bind b/snmp/bind index 1d66d2d18..390738132 100644 --- a/snmp/bind +++ b/snmp/bind @@ -39,7 +39,7 @@ The variables are as below. rndc = The path to rndc. Default: /usr/bin/env rndc call_rndc = A 0/1 boolean on weather to call rndc stats. Suggest to set to 0 if using netdata. Default: 1 - stats_file = The path to the named stats file. Default: /var/run/named/stats + stats_file = The path to the named stats file. Default: /var/cache/bind/stats agent = A 0/1 boolean for if this is being used as a LibreNMS agent or not. Default: 0 zero_stats = A 0/1 boolean for if the stats file should be zeroed first. Default: 0 (1 if guessed) @@ -53,7 +53,7 @@ it should be. ## my $call_rndc=1; my $rndc='/usr/bin/env rndc'; -my $stats_file='/var/run/named/stats'; +my $stats_file='/var/cache/bind/stats'; my $zero_stats=0; my $agent=0; my $missing=0; From 174d94777f8629434e3de7df8651248c53938a78 Mon Sep 17 00:00:00 2001 From: Konstantin V Bekreyev Date: Tue, 21 Apr 2020 21:30:37 +0400 Subject: [PATCH 120/332] add FreeBSD to osupdate (#288) * Update osupdate osupdate for FreeBSD * Update osupdate --- snmp/osupdate | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index f45493dc4..8a391fa2b 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -24,6 +24,8 @@ BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' +BIN_PKG='/usr/sbin/pkg' +CMD_PKG=' audit -q -F' ################################################################ # Don't change anything unless you know what are you doing # @@ -68,6 +70,14 @@ elif command -v apt-get &>/dev/null ; then else echo "0"; fi +elif command -v pkg &>/dev/null ; then + # FreeBSD + UPDATES=`$BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC` + if [ $UPDATES -ge 1 ]; then + echo $UPDATES; + else + echo "0"; + fi else echo "0"; fi From 660ded7ec06044b33ea168436ca60790e6951da6 Mon Sep 17 00:00:00 2001 From: priiduonu Date: Thu, 23 Apr 2020 05:51:17 +0300 Subject: [PATCH 121/332] fix: removed double entry for WMV9 codec (#292) --- snmp/raspberry.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 72ec0616f..a2f924102 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -40,7 +40,6 @@ $picmd $getStatusWVC1 | $pised 's/enabled/2/g' $picmd $getStatusMPG4 | $pised 's/enabled/2/g' $picmd $getStatusMJPG | $pised 's/enabled/2/g' $picmd $getStatusWMV9 | $pised 's/enabled/2/g' -$picmd $getStatusWMV9 | $pised 's/enabled/2/g' $picmd $getStatusH264 | $pised 's/disabled/1/g' $picmd $getStatusMPG2 | $pised 's/disabled/1/g' $picmd $getStatusWVC1 | $pised 's/disabled/1/g' From 5c5b32dcd8cb80bab0ec2c711854c281bb985643 Mon Sep 17 00:00:00 2001 From: Tobias Frederick Date: Thu, 30 Apr 2020 21:00:25 +0200 Subject: [PATCH 122/332] Fix apache-stats.sh --- snmp/apache-stats.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh index b1a9a06db..275d81586 100755 --- a/snmp/apache-stats.sh +++ b/snmp/apache-stats.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash # This script produces LibreNMS apache-stats output. The only dependency is curl. From c8f31e749e07a1d6eac3fccd2a1542986f7ba450 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 14 May 2020 23:27:47 +0200 Subject: [PATCH 123/332] Redis Application Agent --- snmp/redis.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100755 snmp/redis.py diff --git a/snmp/redis.py b/snmp/redis.py new file mode 100755 index 000000000..59905c63c --- /dev/null +++ b/snmp/redis.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import subprocess +import json + +shell_cmd = "redis-cli info" +all_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') + +version = 1 +error = 0 +error_string = "" +redis_data = {} + +# stdout list to json +try: + category = '' + for d in all_data: + d = d.replace(b'\r', b'') + + if d in [b'']: + continue + + if d.startswith(b'#'): + category = d.replace(b'# ', b'').decode("utf-8") + redis_data[category] = {} + continue + + if not len(category): + error = 2 + error_string = 'category not defined' + break + + k, v = d.split(b':') + k = k.decode("utf-8") + v = v.decode("utf-8") + + # convert string to int/float, if possible +# try: +# if '.' in v: +# v = float(v) +# else: +# v = int(v) +# except ValueError: +# pass + + redis_data[category][k] = v + +except: + error = 1 + error_string = 'data extracting error' + +output = {'version': version, + 'error': error, + 'errorString': error_string, + 'data': redis_data} + +#print (json.dumps(output, indent=4, sort_keys=True)) +print (json.dumps(output)) From cdaf20e2c8ebf0bd63455713e3eb3ea97c4ba0ae Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Fri, 15 May 2020 05:18:33 +0200 Subject: [PATCH 124/332] Remove comments --- snmp/redis.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/snmp/redis.py b/snmp/redis.py index 59905c63c..097dda78c 100755 --- a/snmp/redis.py +++ b/snmp/redis.py @@ -34,15 +34,6 @@ k = k.decode("utf-8") v = v.decode("utf-8") - # convert string to int/float, if possible -# try: -# if '.' in v: -# v = float(v) -# else: -# v = int(v) -# except ValueError: -# pass - redis_data[category][k] = v except: @@ -54,5 +45,4 @@ 'errorString': error_string, 'data': redis_data} -#print (json.dumps(output, indent=4, sort_keys=True)) print (json.dumps(output)) From e6892ea76dcbb27a59f9011e3c4881ea839c76ee Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Fri, 15 May 2020 01:15:20 -0500 Subject: [PATCH 125/332] Update gpsd 4s max time limit was causing some timeouts, especially given the two 1s sleeps. Especially with a lot of sentences coming back from the GPS chip it was probably not enough to always catch the right variables. --- agent-local/gpsd | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agent-local/gpsd b/agent-local/gpsd index cfb0b0b97..a774c794f 100755 --- a/agent-local/gpsd +++ b/agent-local/gpsd @@ -4,8 +4,8 @@ $server = 'localhost'; $port = 2947; -set_time_limit(4); -ini_set('max_execution_time', 4); +set_time_limit(6); +ini_set('max_execution_time', 6); $sock = @fsockopen($server, $port, $errno, $errstr, 2); @@ -50,4 +50,4 @@ if ($resp) { function satellite_used($sat) { return $sat->used; -} \ No newline at end of file +} From c91ba4d29d74e1328f5d78717d940a8c9f6b768f Mon Sep 17 00:00:00 2001 From: "Chris (Someguy123)" Date: Fri, 22 May 2020 07:48:00 +0100 Subject: [PATCH 126/332] Remove absolute exe for powerdns + use python3 - `agent-local/powerdns` - Replaced the absolute path `/usr/bin/pdns_control` with `pdns_control`, since pdns_control can also be in `/usr/sbin` or `/usr/local/bin`. Python can find the executable using PATH just fine. - Changed `vars` to `kvars` to avoid conflicting with the reserved `vars` Python symbol - Changed shebang to use `python3` instead of `python` - as Python 2 is EOL. - `agent-local/powerdns-recursor` - Changed shebang to use `/usr/bin/env` instead of a hardcoded path to Python - Changed shebang to use `python3` instead of `python` - as Python 2 is EOL. **NOTE:** As per https://pythonclock.org/ - Python 2 is end-of-life, and is no longer included by default on modern Linux distros, along with macOS (OS X). I would recommend adjusting all Python-based agents to use Python 3 by default, instead of Python 2. --- agent-local/powerdns | 22 ++++++++++++---------- agent-local/powerdns-recursor | 2 +- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/agent-local/powerdns b/agent-local/powerdns index 913abcf90..d4fc6bb30 100755 --- a/agent-local/powerdns +++ b/agent-local/powerdns @@ -1,16 +1,17 @@ -#!/usr/bin/env python - +#!/usr/bin/env python3 from subprocess import Popen, PIPE -vars = [ 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', -'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', -'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', -'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', -'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', -'udp4-queries', 'udp6-answers', 'udp6-queries' ] +kvars = [ + 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', + 'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', + 'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', + 'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', + 'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', + 'udp4-queries', 'udp6-answers', 'udp6-queries' +] rvars = {} -cmd = ['/usr/bin/pdns_control', 'show', '*'] +cmd = ['pdns_control', 'show', '*'] for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): v = l.split('=') @@ -19,5 +20,6 @@ for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): print("<<>>") -for k in vars: +for k in kvars: print(rvars[k]) + diff --git a/agent-local/powerdns-recursor b/agent-local/powerdns-recursor index 0ac290489..6949c7f44 100755 --- a/agent-local/powerdns-recursor +++ b/agent-local/powerdns-recursor @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 import json, subprocess from subprocess import Popen, PIPE From 4e1230c10ec1bcfb6f0cec118172be58e5771304 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 30 May 2020 18:10:16 +0200 Subject: [PATCH 127/332] remove unneeded file --- qq | 2069 ------------------------------------------------------------ 1 file changed, 2069 deletions(-) delete mode 100644 qq diff --git a/qq b/qq deleted file mode 100644 index 58a21ca28..000000000 --- a/qq +++ /dev/null @@ -1,2069 +0,0 @@ -commit 3361bf4c3ae5868b00d09215e10359f58a36ac12 -Author: SourceDoctor -Date: Wed May 1 14:55:54 2019 +0200 - - add mdadm support - -commit d822c899a78bdfd1e7d9f4df2bd5cd512b1696bd -Merge: 8fbfbd5 544fd8b -Author: VVelox -Date: Sun Mar 24 03:56:16 2019 -0500 - - Merge pull request #226 from VVelox/smart-update - - SMART monitoring update adding RAID support - -commit 544fd8bd6e525b3c29d9965c2b405b39ba49a98d -Author: Zane C. Bowers-Hadley -Date: Tue Mar 19 02:58:30 2019 -0500 - - update the date - -commit 8fbfbd5b39bbc22ca606327813c4fe54b38e4d30 -Merge: cb04f8c 38acc2b -Author: VVelox -Date: Tue Mar 19 02:53:30 2019 -0500 - - Merge pull request #225 from VVelox/pa-fix - - portactivity fixes - -commit 503fb9f7389d8307074ed856f96a870a0d26dd72 -Author: Zane C. Bowers-Hadley -Date: Tue Mar 19 02:25:17 2019 -0500 - - tested and it appears to work properly... documentation updated - -commit bdfd0ceea948382684a2bd96659731f9ac5f15b1 -Author: Zane C. Bowers-Hadley -Date: Tue Mar 19 00:40:06 2019 -0500 - - update the guessing to only use smartctl --scan-open and generate with more complex options - -commit 38acc2bd3d8e81414b4bfc2cb2bb3e955877fbc1 -Author: Zane C. Bowers-Hadley -Date: Mon Mar 18 03:39:17 2019 -0500 - - actually make this work on system not FreeBSD and deal with the bug where a connection may not have a protocol - -commit cb04f8c0ac148cb2b250d0a408f672db22e99ed5 -Merge: 147cb67 af32f56 -Author: VVelox -Date: Sun Mar 17 23:27:46 2019 -0500 - - Merge pull request #224 from VVelox/zfs-fix - - ZFS-FreeBSD divide by zero fix - -commit af32f56a74e0d9915b4beb419a28814e9bf058d8 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 06:07:59 2019 -0500 - - merge... and update version - -commit 658c3c6ead712837bbb763c6b9ecdd782b043629 -Merge: 6564128 147cb67 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 06:06:57 2019 -0500 - - Merge branch 'zfs-fix' of https://github.com/VVelox/librenms-agent into zfs-fix - -commit 656412830564593cfefeee5dceeae89bfa371000 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 06:02:43 2019 -0500 - - remove unneeded else statement and re-apply patch - -commit 3ce06d6defc63f200f2bbfec7718748c8ec9e832 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 05:55:33 2019 -0500 - - freshly initilized ZFS pulls that are not in use don't have a $data_demand_total - -commit 147cb67824b213045826677946166c8ee807f23c -Author: Tony Murray -Date: Tue Feb 12 20:33:05 2019 -0600 - - Use os-release whenever possible for the distro script (#220) - - Except centos... https://bugs.centos.org/view.php?id=8359 - -commit c9a0d2893e44f89f7c8c9450a9d42438eff1404d -Author: Felicián Hoppál -Date: Mon Feb 11 23:06:57 2019 +0100 - - Fix: zpool list output changed, incorrect values (#219) - - * fix zpool data, output of zpool list -pH changed in freebsd 11 - - * fix zpool data, output of zpool list -pH changed in freebsd 11 - - * bump version - - * version dump to 2 - -commit 3a407e3f721b7677fb2724af736ea87838d4dcc5 -Author: Tony Murray -Date: Thu Jan 17 11:44:02 2019 -0600 - - Update powerdns script to json (#218) - -commit ad300c035a2be4a55553c2994d5ce7ba69d57432 -Author: VVelox -Date: Wed Jan 9 23:41:39 2019 -0600 - - various misc fixes for the postfix poller (#112) - - * update postfix - - * move a few things to reduce the number of changed lines - - * move mself to the end - - * white space cleanup and another small cleanup of $chr - - * use $chrNew instead of $chrC when writing the current values - - * more white space cleanup - - * replace one more missed instance of iuoscp - -commit c40606140114b9059409f17a21b06fe8655b760e -Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> -Date: Thu Jan 10 18:40:40 2019 +1300 - - Fix: InnoDB stat support for MariaDB v10+ (#211) - - * mariadb innodb support for v10+ - - * fix newer innodb insert buffers - - * agent mysql to snmp extend - -commit 6fdaffa1b2ba8c49ed8bd38fb6445335b3146329 -Author: Mike Centola -Date: Thu Jan 10 00:35:28 2019 -0500 - - Added gpsd script for SNMP Extend (#217) - - Fixed Typos - - Fixed another typo - -commit f54c442d06abd7d2112dc4dc5db315524030308c -Merge: 1b90904 107d72e -Author: CrazyMax -Date: Sat Dec 29 22:17:13 2018 +0100 - - Merge pull request #216 from jasoncheng7115/patch-2 - - Added Proxmox VE Versoin support - -commit 1b90904f61c6d4078f2b427e17c82cf1f8b926ba -Author: VVelox -Date: Fri Dec 28 20:10:13 2018 -0600 - - convert the FreeBSD NFS stuff over to JSON and add in lots of sanity (#190) - - * convert fbsdnfsclient over to JSON - - * Convert the server stuff to JSON and fix the output of the client extend. - - * misc. stuff - - * lots of cleanup and sanity added to the FreeBSD NFS scripts - - * fix the #! line - - * update the docs at the top - -commit 5be1b168ba4e03ba3a58b3833a26587474ff7b29 -Author: VVelox -Date: Fri Dec 28 20:08:46 2018 -0600 - - JSON SNMP extend for UPS-APC app. (#189) - - * add snmp/ups-apcups, a Perl rewrite of snmp/ups-apcups.sh to support JSON - - * finish documenting it - - * add version and remove units from the returned values - -commit 107d72e862c2e2a53870272859252a5d39bf8c72 -Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> -Date: Tue Dec 25 09:15:22 2018 +0800 - - Added Proxmox VE Versoin support - -commit 433d744953fa800ce49fa060b141c10663c0b952 -Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> -Date: Sun Dec 16 22:21:00 2018 +0800 - - Added FreeNAS Version support (#215) - - Hi, - - I added FreeNAS version information support, as shown in the figure: - - ![2018-12-15 11 53 31](https://user-images.githubusercontent.com/30381035/50044886-2329a580-00c5-11e9-817c-b89a8374270d.png) - ![2018-12-15 11 53 49](https://user-images.githubusercontent.com/30381035/50044887-2329a580-00c5-11e9-93b4-b140809f84a3.png) - -commit 3c4511d987c2058bd6e8605bb0e87c6fc1d36861 -Merge: ff124a1 dc3d267 -Author: CrazyMax -Date: Fri Dec 14 19:03:01 2018 +0100 - - Merge pull request #214 from dsgagi/patch-1 - - Fix Debian detection on Proxmox - lsb_release binary doesn't exist - -commit dc3d2673ddc86d02ca2cd8d93bbf2fd53ca43c55 -Author: dsgagi -Date: Fri Dec 14 18:49:58 2018 +0100 - - Update distro - - Remove extra white spaces. - -commit 456d2e7672d8532af4df7f6da2b5c18b02778bf7 -Author: dsgagi -Date: Fri Dec 14 18:47:54 2018 +0100 - - Update distro - - Minor changes to the code, for better output. - -commit 5b53ab54c8a6d9f3b81abf42725b5da2b3ebec3d -Author: dsgagi -Date: Wed Dec 12 16:09:25 2018 +0100 - - Update distro - -commit ff124a1358755ceddc0ae6a4187d358da0d54d06 -Author: VVelox -Date: Thu Nov 22 09:04:58 2018 -0600 - - add portactivity SNMP extend (#159) - - * add portactivity SNMP extend in its initial form - - * update for the current json_app_get - - * add version to the returned JSON - - * add basic POD documentation - -commit a827734c0ec0e0cdf5e2a04730ec68dbad3fd477 -Author: gardar -Date: Thu Oct 25 19:19:20 2018 +0000 - - CloudLinux distro detection (#208) - - Added CloudLinux distro detection, previously CloudLinux got identified as RedHat - -commit 8d66211adc47d3bad5dd042e3ddbc59a23a28819 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Thu Oct 25 07:17:42 2018 -0400 - - Fix package manager detection (#204) - - * Fix package manager detection - - * use release file for os detection - - * Use command to to validate package manager type - - * check if exists and the execute permission is granted - - * make script more portable - -commit d49fe954dfdeffbeee091051f1f0c515d020f281 -Author: Félim Whiteley -Date: Tue Oct 23 17:46:54 2018 +0100 - - Add divide by zero check (#191) - - On several servers (Ubuntu 18.04) DEMAND_DATA_TOTAL is 0 currently and is causing an error - - Traceback (most recent call last): - File "/usr/local/bin/zfs-linux", line 178, in - sys.exit(main(sys.argv[1:])) - File "/usr/local/bin/zfs-linux", line 76, in main - DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 - ZeroDivisionError: division by zero - -commit 381cc2466af521772607c682a9a707471a38ff4b -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Tue Oct 23 08:51:12 2018 -0400 - - fix nginx script indentation (#205) - -commit 3dada041e433318592e137678d24c32dd1a134b4 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Thu Oct 18 10:37:10 2018 -0400 - - Fix binary operator expected error (#203) - -commit ccb244aa09de36e4e4dd85120702580144e86383 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:28:39 2018 -0400 - - osupdate script clean up (#199) - - - Change script name for simplify of configuration management orchestration scripts. - - Update code syntax. - -commit f0f34b4a2d1a36836f6bffe4307d5d51524009b4 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:28:07 2018 -0400 - - phpfpmsf script clean up (#198) - - - Change script name for simplify of configuration management orchestration scripts. - - Update code syntax. - -commit e0dcd4a064cedb09241e4af17198bf61e8fd1bf3 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:27:21 2018 -0400 - - nginx script clean up (#197) - - - Change script name for simplify of configuration management orchestration scripts. - - Change 172.0.0.1 to localhost for better nginx handling. - -commit 1c61a96344317c13fce90811c11c0fa4cb7efb36 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:26:45 2018 -0400 - - ntp-client data correction (#196) - - NTP was not displaying data right for linux servers. It was putting the frequency data into the offset data. This was giving bad graphs in the UI. Tested the correction on both RHEL and Debian based operating systems and all passes. - - Remove the .sh to simplify for configuration management orchestration scripts. - -commit 28a2f8ae55db7ca773f881560017b4890bc4bbce -Author: voxnil <14983067+voxnil@users.noreply.github.com> -Date: Mon Oct 15 13:00:16 2018 -0700 - - Update zfs-linux to use env for python - -commit ca7a5cdafe6dd603538aad8f63bc624143f98377 -Author: Brock Alberry -Date: Wed Sep 19 09:09:04 2018 -0400 - - PhotonOS distro detection (#193) - - * PhotonOS distro detection - - Detection before `/etc/os-release` since that is present yet missing the build number. - - * awk detection - - combining https://github.com/librenms/librenms-agent/pull/193 and https://github.com/librenms/librenms-agent/pull/194 - -commit 7542bd26f4c883c7e622056a1a34909d1dc9aa2c -Author: Allison -Date: Tue Sep 18 20:20:23 2018 -0700 - - Update distro (#194) - - Adding full detection for ASUSWRT-Merlin - -commit 7c173b160c5be401fa36d85edf15add61a3146d7 -Author: VVelox -Date: Mon Aug 27 04:03:01 2018 -0500 - - convert all the NTP stuff to JSON (#174) - - This requires https://github.com/librenms/librenms/pull/8571 and is for https://github.com/librenms/librenms/pull/8608 . - - Also converted this to regular sh instead of bash, so it will work on more systems with less dependencies. - - Has been tested as working on DD-WRT and FreeBSD. - -commit 99ad80740cb2fcea1c33e59caf1c05af5a53a14f -Author: VVelox -Date: Sun Aug 19 17:47:07 2018 -0500 - - update for the new json_app_get stuff (#179) - -commit c772ac97d3f5b805c311fd13d924513b4561d10b -Author: crcro -Date: Fri Aug 10 00:44:02 2018 +0300 - - added rockstor nas distro detection (#187) - -commit c535b1286c7701a2cefcd10ffd799fba65e56dd2 -Author: TheGreatDoc <32565115+TheGreatDoc@users.noreply.github.com> -Date: Thu Jul 19 22:39:08 2018 +0200 - - Asterisk Script (#183) - - Asterisk App support. - - Channels - - Calls - - Total SIP Peers - - Monitored Online - - Monitored Offline - - Unmonitored Online - - Unmonitored Offline - -commit 7e55d1cd5db04019de09aff7b134a85df71e901a -Author: István Sárándi -Date: Mon Jun 25 16:10:00 2018 +0200 - - Update fail2ban extend script to new JSON format (#181) - - As seen at [this location](https://github.com/librenms/librenms/blob/7fab99cfc13b80a543fb779d68c659b52fc074b1/includes/polling/functions.inc.php#L768) the JSON output needs to contain a `data` field. The poller php script actually also extracts this `data` field as one of the first steps, see at [this line](https://github.com/librenms/librenms/blob/c3007b483a12758042e5d0c6009a8ef48e3e1a39/includes/polling/applications/fail2ban.inc.php#L36). - Before I changed these parts the graph didn't show up because the RRD files simply weren't generated as an exception occurred in the poller. This fixes this problem. - -commit b5d77f1a999c5e0f08bc02550fd24e7c37b759c7 -Author: VVelox -Date: Mon May 28 07:22:09 2018 -0500 - - convert fail2ban-client to JSON (#172) - - * convert to JSON - - * add version return - - * change the version number of the returned data to 1 - -commit 41d36dc97f6886bae4ae6e8ba928892ef9d3c8c3 -Author: VVelox -Date: Fri Apr 27 16:46:57 2018 -0500 - - make using SN or device name selectable for SMART reporting (#168) - - * make using SN or device name selectable - - * change the default to SN - -commit 385d466eee1adc06eecd4a84cfd6615f2e4ba2ec -Author: Sander Steffann -Date: Fri Apr 13 17:42:27 2018 +0100 - - Add random entropy monitoring (#173) - -commit a56adb467a1cdf9785f977420dd07a48335f41b3 -Author: Serphentas -Date: Wed Apr 11 10:39:32 2018 +0200 - - add zfs support for linux (#170) - - * add zfs support for linux - - * fix pools and anon_hits_per - - * strip percent sign for pool cap - - * fix anon_hits json key typo - - * fix demand_data_hits json key typo - - * fix comparison as in #169 - - * fix min_size_percent - -commit 8ec6017246edc9784e670d84bd8b52ec094dbb82 -Author: VVelox -Date: Wed Apr 11 02:34:39 2018 -0500 - - correct arc size breakdown (#171) - -commit 3ddb1d6be6b4a4a0cd006251b497bb1ccf8170e8 -Author: VVelox -Date: Tue Apr 10 22:04:07 2018 -0500 - - correct arc size breakdown - -commit 90fd6f60f3aed5f71140d23a8d022ae9909e7473 -Author: Dylan Underwood -Date: Fri Mar 23 11:24:02 2018 -0500 - - Should be greater than or equal to (#167) - -commit 3a8462461595535a53554b0ad66bc922118e83d1 -Author: endofline -Date: Tue Feb 27 23:10:35 2018 +0200 - - Replace disk identifier with disk serial in S.M.A.R.T snmp script (#164) - -commit bbd3b1309aaa3ecaf6f502e92718719539715c58 -Author: endofline -Date: Sun Feb 18 22:33:42 2018 +0200 - - Fix Command_Timeout missing from SMART output (#163) - -commit fd9fd178a4b43feafb414822167b3033693c8efc -Author: crcro -Date: Sat Jan 6 22:06:45 2018 +0200 - - extend: powerdns-dnsdist (#158) - - * powerdns-dnsdist app - - * fix script in help - - * removed local data manipulation - - * again name of file in script help - - * removed personal api info - -commit bacaca0be4104cc003222b941e433d5470cae76d -Author: VVelox -Date: Sat Dec 30 05:42:37 2017 -0600 - - ZFS SNMP agent :3 <3 (#156) - - * Add it as it currently is. Needs to be moved over to JSON - - * rename it to zfs-freebsd as it is FreeBSD specific - - now uses JSON - - * misc. updates and document it all - - * minor spelling correction - -commit c7cae0765e0f5072fdf3dd224f357290e2697fb5 -Author: VVelox -Date: Sat Dec 30 05:39:36 2017 -0600 - - update the fail2ban stuff (#155) - - Dropping firewall checking as the new fail2ban uses pf and anchors on - FreeBSD, which while esoteric as fuck works nicely and is reliable. - -commit 8920cd3f290e8c13a3bb7db96ceb8db05845869d -Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> -Date: Wed Dec 13 16:13:10 2017 +1300 - - freeradius.sh: new agent for incoming main PR (#151) - - * Update os-updates.sh - - * Update os-updates.sh - - * Update os-updates.sh - - * Create freeradius.sh - - * Update freeradius.sh - - * Update freeradius.sh - -commit 3b9d632a8d6dbd6ac3f42f75ba36faa235ef4440 -Author: arrmo -Date: Mon Dec 4 14:11:17 2017 -0600 - - hddtemp, ignore devices not supporting SMART (#153) - -commit 7fb48df8579a8e113153c1439a4fa92829847d9f -Author: Daniel Bull -Date: Fri Oct 27 06:41:05 2017 +0100 - - Fix: Apache SNMP extend IndexError (#116) - - See issue for more information: - https://github.com/librenms/librenms-agent/issues/95 - -commit 2996ad88b00f24777c0e5629cb931b8b448dd515 -Author: dragans -Date: Fri Oct 27 07:39:09 2017 +0200 - - fix: Update mysql (#127) - - Update mysql agent script based on updated changes in newest version of Percona Monitoring Plugins (Cacti template). - - Changes enable correct parsing of status data for newer versions of MySQL/MariaDB database servers and should be backward compatible with older versions. - -commit d0762871b4cfb0a7cbfcc5ba99bc1fe0b0c51cf3 -Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> -Date: Tue Oct 10 08:02:05 2017 +1300 - - os-update.sh: back to package management based and count fixes (#149) - - * Update os-updates.sh - - * Update os-updates.sh - - * Update os-updates.sh - -commit 6a40ca1e9cc4319e6b7363541feb9681dcf5bc5f -Author: tomarch -Date: Wed Sep 20 21:47:11 2017 +0200 - - fix munin agent (#148) - - Without the full munin-scripts path, this script won't find munin file and return nothing. - -commit 1b03d2f9f74ca29b177e596c0ff2ba13a0e1292d -Author: Uwe Arzt -Date: Wed Sep 6 20:42:58 2017 +0200 - - Add Oracle Linux Distribution to distro script (#146) - - * Add Oracle Linux to distro script - - * Revert local change - -commit 45478555df856af51e707c3cd6ace716c709e0fb -Author: arrmo -Date: Sun Aug 27 14:59:15 2017 -0500 - - Update Distro, for Raspbian Support (#144) - -commit 3380a85ff13f0dad706690b71b2bd8e9d9452926 -Author: Zucht -Date: Sat Aug 12 17:30:02 2017 +0200 - - Update raspberry.sh (#143) - - Fix state WMV9 - -commit a50e1dffb89738814a1183e2e0560ab86daaf3f0 -Author: Neil Lathwood -Date: Thu Aug 3 17:11:26 2017 +0100 - - Update raspberry.sh (#140) - -commit 584fd645d470e85e30607b8be3102292b4a7b54e -Author: drid -Date: Wed Jul 12 22:55:02 2017 +0300 - - C.H.I.P. power values (#134) - - * C.H.I.P. power values - - * Added attribution - - * Fix ACIN current calculation - - * Battery current fix - -commit 3f9dc0f5f02c1590d6e84ac10c6f7c973d54f771 -Author: RedChops -Date: Thu Jun 29 16:11:26 2017 -0400 - - Fix for bug https://github.com/librenms/librenms/issues/6821 (#138) - -commit a4efb62466c58ee05b3c078283a2a9fecb7cd3ce -Author: Stefan Funke -Date: Wed Jun 28 22:36:26 2017 +0200 - - unnecessary use of wc while already calling grep (#137) - - * useless call of wc while already calling grep - - * move grep count call to CMD_GREP to stay in project style - -commit cc6d7882dba89bce0a1f3a27d9fd3b399a2430b9 -Author: einarjh -Date: Sat Jun 10 11:20:48 2017 +0200 - - Strip all non-ASCII characters from hddtemp output (#136) - -commit 3903f431f7f56ef4f48bd50d28c05aec8e795bc0 -Author: crcro -Date: Tue Jun 6 01:00:29 2017 +0300 - - bash script for pi-hole app (#135) - -commit 84630dfb84539936efa47bfe3b13638f809a82c5 -Author: Félim Whiteley -Date: Wed May 31 22:23:38 2017 +0100 - - Fix for first line as localhost (#130) - - An example output like below where the first line of output is just "localhost" so it causes the splitting to cause an out of index error. - - Example: - cat /tmp/apache-snmp - localhost - ServerVersion: Apache/2.4.25 (Ubuntu) PHP/5.6.30-5+deb.sury.org~trusty+2 - ServerMPM: prefork - Server Built: 2016-12-21T00:00:00 - CurrentTime: Thursday, 18-May-2017 19:26:43 EDT - RestartTime: Thursday, 18-May-2017 11:35:48 EDT - ParentServerConfigGeneration: 2 - ParentServerMPMGeneration: 1 - ServerUptimeSeconds: 28255 - ServerUptime: 7 hours 50 minutes 55 seconds - Load1: 0.04 - Load5: 0.05 - Load15: 0.10 - Total Accesses: 5367 - Total kBytes: 61432 - CPUUser: 19.69 - CPUSystem: 1.05 - CPUChildrenUser: 0 - CPUChildrenSystem: 0 - CPULoad: .0734029 - Uptime: 28255 - ReqPerSec: .189949 - BytesPerSec: 2226.38 - BytesPerReq: 11721 - BusyWorkers: 1 - IdleWorkers: 6 - Scoreboard: ___....._.__.W........................................................................................................................................ - -commit 16178c6ac31ed2511243ccfab5b25b69b031d3fa -Author: Aldemir Akpinar -Date: Thu Jun 1 00:23:07 2017 +0300 - - Added devuan support for os-updates.sh and removed code repitition (#131) - -commit f473c5e30ca0649baa590dd5a7f041ce91f57e73 -Author: BlackDex -Date: Tue May 23 14:44:05 2017 +0200 - - Added try-except checks for global values. (#107) - - Fixed an error which prevented output. - It seems some ceph version probably use different values or something. This is a quick fix to have the script output the correct values. - -commit 6fdcc91f7041ad49cbb906b814a1b5ecf8fd2e4c -Author: Karl Shea -Date: Thu May 4 02:06:32 2017 -0500 - - Fix bind config file read (#125) - -commit e3dad6cfc9c6549e1f5cfef41ef2cf20a9827352 -Author: VVelox -Date: Wed May 3 09:23:40 2017 -0500 - - BIND cleanup and expansion (#108) - - * add BIND named SNMP extend - - * nolonger piss the entire stats across the wire, but crunch them and return them - - * more work on bind - - * more misc. updates - - * add proper agent support as well as optional zeroing - - * add -m - -commit 69eee9fb898bd521e3f4ab5d2d93cf5b34949e1d -Author: Aldemir Akpinar -Date: Tue May 2 12:22:19 2017 +0300 - - Added Devuan GNU/Linux support (#124) - -commit eaa6af235978405418d8e6d6e0beb04f761a578b -Author: crcro -Date: Thu Apr 27 22:54:55 2017 +0300 - - snmp-extend: sdfsinfo (#122) - - * sdfsinfo app snmp extend - - * rewrite script to bash - - * more vars - -commit 69e1ace889cfee6963cc6506a5e96fb30cabac1b -Author: RedChops -Date: Sat Apr 22 19:29:00 2017 -0400 - - Include missing SMART ids in the output (#120) - -commit 705cc0f3fe62e4837ecf4be86dec95558ca07ff3 -Author: Svennd -Date: Tue Apr 18 22:34:05 2017 +0200 - - add support for SGE/rocks job tracker (#118) - -commit d7085e001cebf0bf086b84ac0c65cad54f90ee38 -Author: Chris Putnam -Date: Tue Apr 18 13:32:41 2017 -0700 - - hddtemp: parallelize calls to hddtemp for performance (#117) - - This poll script runs hddtemp with a list of all drives as arguments and reads the output. hddtemp scans each drive's SMART status serially, which scales poorly with a large number of drives. - - In lieu of a patch to the actual hddtemp project, optionally use GNU parallel when available to parallelize the call to hddtemp. - - In testing a machine with 58 drives I went from a runtime of about 5 seconds per run to 0.5s, a performance improvement of 10x. - -commit 5f47aad492a679a81da0a19f2649f60d6637e199 -Author: Chris Putnam -Date: Fri Apr 7 01:45:56 2017 -0500 - - hddtemp: improve detection of drives (#115) - - Previously, this script was only able to find 26 drives (sda-sdz) due to the use of globbing. - - A better strategy for detecting drives would be to use lsblk on systems that support it, failing over to globbing. - - This patch adds support both for lsblk and a more comprehensive glob solution with find that will at least catch 26^2 drives. - -commit 67bae5a86cfe47c90ade541c1e613f7e5e788cfd -Author: Philip Rosenberg-Watt -Date: Thu Apr 6 03:24:36 2017 -0600 - - fix: Update proxmox agent to use new Perl module (#88) - - PVE::API2Client is deprecated in Proxmox 4.4-6. Agent now requires - installation of libpve-apiclient-perl via apt. - - This commit fixes #81. - -commit a7fe1f8e6f98640463a93f934ac2580311db09ca -Author: Tony Murray -Date: Wed Mar 29 19:11:23 2017 -0500 - - Copy powerdns-recursor to snmp and remove <<>> header (#111) - -commit 74faec281c13928e60c140d85bb3138e7297fa79 -Author: Florian Beer -Date: Thu Mar 30 00:00:26 2017 +0200 - - Postfix app bug fixes (#105) - - * Postfix app bug fixes - - - add missing DS - - fix some totals - - * Move new variable to end of output - -commit 1e7762fb4eb832ed9d7530994804a284028c9c7c -Author: VVelox -Date: Wed Mar 22 09:28:57 2017 -0500 - - add SMART SNMP extend script (#101) - - * add SMART SNMP extend - - * cleanup default disk examples - - * correct a small typo - - * add option caching support - - * add checking selftest log and nolonger zeros non-existent IDs - - * now uses a config file - - * add the ability to guess at the config - - * properly remove device entries with partitions now and avoid adding dupes in a better manner - - * now have smartctl scan as well to see if it missed anything - - * note why ses and pass are ignored - - * properly use the cache file in the config now - - * actually use the cache now - -commit 94aa0feacdfc71b6d8044c66992069538071ca39 -Author: VVelox -Date: Sun Mar 19 13:03:59 2017 -0500 - - add unbound SNMP extend script (#102) - -commit 495f46afb431a0ef29fe58c40a01c7f9d352c3d5 -Author: Tony Murray -Date: Fri Mar 10 06:29:19 2017 -0600 - - Update mysql script to php7 version... (#104) - - * Update mysql script to php7 version... - - * Update mysql - -commit 61579bf0ace0a963f6ffbf9ca263910c5f6614fe -Author: Tuxis Internet Engineering V.O.F -Date: Wed Mar 8 09:51:04 2017 +0100 - - Enable ipv6 in Xinetd (#100) - - * Fix indenting and enable IPv6 in Xinetd - - * Fix changelog - - * Typo - -commit 7f79fc4167adac967d89d0ee6277f78886a5c7b9 -Author: Tony Murray -Date: Tue Mar 7 23:48:15 2017 -0600 - - Update mysql - -commit 1b1d8b491f842edc3e04c5405ae13de4f60a6751 -Author: VVelox -Date: Tue Mar 7 23:40:09 2017 -0600 - - clean up snmp/mysql_stats.php and make it a proper snmpd extend script now (#99) - - * cleanup and make it something that can properly be invoked via cli - - * blank the user/pass/host bits increasing the chances it will work out of the box - - * Update mysql_stats.php - - * Update mysql_stats.php - - * Update mysql_stats.php - - * Update mysql_stats.php - - * Rename mysql_stats.php to mysql - -commit e7c331070100290b3780ba6907add81be82165c6 -Author: VVelox -Date: Fri Mar 3 14:41:38 2017 -0600 - - add Nvidia SNMP extend poller (#94) - - * add Nvidia SNMP extend - - * update the extend path - - * now support more than 4 GPUs - - this will now support how ever many GPUs are installed on a system... - - Just double checked and it appears nvidia-smi dmon only reports up to 4 GPUs at a time... so if we have more than 4, begin checking they exist and if so print them - -commit 2308481188f72bbad12d7d94ebd941a73fc97655 -Author: VVelox -Date: Fri Mar 3 12:55:55 2017 -0600 - - add squid snmp extend (#93) - -commit 2700598925c8481641def507a4bf902a27cb01af -Author: VVelox -Date: Fri Mar 3 08:49:15 2017 -0600 - - FreeBSD NFS extends (#90) - - * add the FreeBSD NFS client and server extends - - * white space cleanup - - * white space cleanup - -commit db3b5c7cec8fa35832739e742c84fa61e465bd9f -Author: VVelox -Date: Wed Mar 1 17:46:13 2017 -0600 - - add Postgres SNMP extend (#91) - - * add Postgres SNMP extend - - * minor comment cleanups - - * use env for check_postgres.pl - - * quote the string - -commit 42e488743917fd39019ac9300caf391a5a8120c8 -Author: VVelox -Date: Wed Mar 1 12:35:06 2017 -0600 - - add detailed Postfix poller (#92) - - * add detailed postfix poller - - * env perl - -commit c4101c9ef2a8e8dffbfaee55f067c7c89fe18e27 -Merge: bb4c67b 8343e7f -Author: Tony Murray -Date: Fri Feb 24 11:10:43 2017 -0600 - - Merge pull request #84 from VVelox/master - - add a snmpd extend script for fail2ban - -commit 8343e7f34e1c382051f65bb9d7cf5bad454b934e -Author: Tony Murray -Date: Fri Feb 24 11:09:21 2017 -0600 - - Update fail2ban - -commit 4fcce9f01dd5b0c7979a2ebc95298ff40239bfd9 -Author: Tony Murray -Date: Fri Feb 24 11:02:19 2017 -0600 - - Redefining $firewalled - -commit 8bfbce68e503b2ddcdcc9619307d168b1c332df3 -Author: VVelox -Date: Thu Feb 23 09:54:38 2017 -0600 - - if cache older than 360 seconds, don't use it - -commit 0a78888889d1e67e5696bb59e2c8fff4fd76f9ff -Author: VVelox -Date: Thu Feb 23 09:13:59 2017 -0600 - - use $f2bc for getting jail status now and not just only overall status - -commit 1e160b86e46ff7023ea13d8de13fe98e52e3b270 -Author: VVelox -Date: Thu Feb 23 08:46:18 2017 -0600 - - don't reuse the variable $iptables - -commit 4b53918a7d09dc705c761c6eba3d0b68caca7159 -Author: VVelox -Date: Thu Feb 23 08:45:04 2017 -0600 - - poke the user about setting a iptables path as well - -commit 90620a8558e0b164fb2a714c007b14b1ba1b1567 -Author: VVelox -Date: Thu Feb 23 08:40:59 2017 -0600 - - misc. path cleanups - -commit 5ee0faa2c38e887b61b34fd4140ae23a8583d350 -Author: VVelox -Date: Wed Feb 22 21:58:03 2017 -0600 - - make caching optional - -commit 4ffd86f8bdbe8825ac0793c1cf0b86a886656f34 -Author: VVelox -Date: Wed Feb 22 21:42:53 2017 -0600 - - Update fail2ban - -commit 0227094c6fc9cf31d7d5f9a45a63e093b6e38aa5 -Author: VVelox -Date: Mon Feb 20 13:18:50 2017 -0600 - - track both firewall and fail2ban-client - -commit 3932875ce04c1b51b8bf4c43c9934f2b29800acb -Author: VVelox -Date: Mon Feb 20 03:50:59 2017 -0600 - - correct a comment - -commit c367e9ff9d61f9cee619c19278a2bdc6d8fc7637 -Author: VVelox -Date: Mon Feb 20 03:49:50 2017 -0600 - - now requires cron usage - -commit d90f3e879200108794beb7a2a4cc047f2938899e -Author: VVelox -Date: Sun Feb 19 23:41:51 2017 -0600 - - use fail2ban-client instead - -commit 710f38e8ff7cee520f9c7cc8ada421b6f32684c5 -Author: VVelox -Date: Sat Feb 18 00:09:12 2017 -0600 - - move this over to perl and properly check iptables - -commit 6f76427952194ca6036181c31402887e72317308 -Author: VVelox -Date: Fri Feb 17 23:08:53 2017 -0600 - - remove pointless exit - -commit 4b600ad2b41be4f338f1745320b3dbd64c5f5ba9 -Author: VVelox -Date: Fri Feb 17 23:08:25 2017 -0600 - - whoops, forgot to remove \ as well - -commit bb4c67b217fc6f553c36861d4da0c5edfd61913c -Merge: ff6ee0e e3f3bd3 -Author: Tony Murray -Date: Fri Feb 17 22:42:57 2017 -0600 - - Merge pull request #86 from florianbeer/patch-1 - - Update shebang - -commit 6955e5d410f87be4423ac86111841721292911fd -Author: VVelox -Date: Fri Feb 17 10:33:02 2017 -0600 - - don't assume it appends the jail name - -commit 8b78f863d34f24858ca3d061df02efa6213d3b3b -Author: VVelox -Date: Fri Feb 17 10:32:27 2017 -0600 - - update to check fail2ban and f2b - -commit e3f3bd3efb36ee391430d61d363afa1e8d322ae3 -Author: Florian Beer -Date: Fri Feb 17 16:37:00 2017 +0100 - - Update shebang - - With the original shebang this script didn't work on Debian and Ubuntu machines. Using `/usr/bin/env bash` makes the script more portable. - -commit ff6ee0e2bc0e84ce1b0c4276713f8cb70d3154a2 -Author: Robert Verspuy -Date: Fri Feb 17 01:46:13 2017 +0100 - - Fixed correct amount of pipeline symbols when degrees symbol is missing. (#85) - - When the script is called through xinetd/check_mk (on my system), there is no degree symbol, but a space. - Changed the script to handle both correctly - -commit 21c953d11af41e1256ecf92070fc36b999b1e084 -Merge: 1ec8f20 58d1726 -Author: kitsune -Date: Thu Feb 16 11:30:03 2017 -0600 - - Merge branch 'master' of https://github.com/librenms/librenms-agent - -commit 1ec8f204ee0c96ca0a9cf77dff7bdb0f79402462 -Author: VVelox -Date: Thu Feb 16 10:50:46 2017 -0600 - - add fail2ban snmpd extend script - -commit 58d17264c7d57978a408b800084f153857d3b3f9 -Author: rockyluke -Date: Thu Feb 16 16:12:01 2017 +0100 - - Use command -v instead binary path (#80) - -commit 60becc9b3a9429a42faae18440821b90ac6586fc -Author: VVelox -Date: Thu Feb 16 09:10:53 2017 -0600 - - add a single pool php-fpm net-snmpd extend poller (#83) - -commit 677bd4187e90211a70419e01fe97a809c6cabfd0 -Author: VVelox -Date: Wed Feb 15 11:37:18 2017 -0600 - - add a single pool php-fpm net-snmpd extend poller - -commit 575956cae3ea5fcb014db3777a83e52026f95baa -Author: crcro -Date: Fri Jan 20 10:47:30 2017 +0200 - - snmp: exim-stats (#79) - - * exim-stats frozen mails - - * added total queue info - -commit d090686b722a1b0d8ded3ebfedec5c3b0f8a46a3 -Merge: ae43e5f dc60463 -Author: Tony Murray -Date: Wed Dec 14 16:39:59 2016 -0600 - - Merge pull request #75 from bungojungo/master - - Added fedora support to distro/os-updates - -commit ae43e5f493941aab81c96e3dc9378da434b55ce6 -Merge: 6c130ea de1f177 -Author: Tony Murray -Date: Wed Dec 14 16:39:47 2016 -0600 - - Merge pull request #73 from paulgear/master - - Make ups-nut work on Debian Jessie - -commit 6c130ea65e191d76a12b7d6d31d4726937b0f3e4 -Merge: e527768 3d061d2 -Author: Tony Murray -Date: Wed Dec 14 16:34:10 2016 -0600 - - Merge pull request #76 from murrant/powerdns-python26 - - Support python2.6 for powerdns scripts - -commit 3d061d24079d0dcb7458a75b3d83d5aaba43acc9 -Author: Tony Murray -Date: Wed Dec 14 16:27:15 2016 -0600 - - Support python2.6 for powerdns scripts - fixes #67 - -commit dc604636bccd8779bd261b013af4872cad14e1f0 -Author: Jason Scalia -Date: Wed Dec 7 22:11:48 2016 -0500 - - added fedora/dnf support - -commit 8b3ca2dac293ef132f1e48afa871b7158d692d90 -Author: Jason Scalia -Date: Wed Dec 7 21:48:22 2016 -0500 - - Added fedora support - -commit de1f1775cc26aacb931141182c212de706b80b5f -Author: Paul Gear -Date: Sat Dec 3 14:16:54 2016 +1000 - - Restore previous default UPS name - -commit 465ec12dd4757baa95560b11f89a433f05fb7454 -Author: Paul Gear -Date: Sat Dec 3 14:07:02 2016 +1000 - - Make ups-nut work on Debian Jessie - - This script was broken on Debian Jessie (and probably - all other Debian-based distros, including Ubuntu). - This commit removes the hard-coding of paths and uses - $PATH per normal bash practice, and should work on a - wider range of shell variants. - -commit e52776889cea5e3379422ce4ffb7171bba4fbdf1 -Author: arrmo -Date: Sat Nov 26 02:12:41 2016 -0600 - - Update to Distro (to support dd-wrt) (#72) - -commit c5fea261dea71cc9600936455bdf357cc062b220 -Author: Mathias B -Date: Thu Nov 17 09:31:56 2016 +0100 - - Add Debian support (#71) - - Before that only Ubuntu was supported, now Debian users can use this nice script too! - -commit 36ed3f008c6f2a0cc0be0cdb1ce9199a6e495dbc -Author: Karl Shea -Date: Sat Oct 8 15:26:07 2016 -0500 - - Agent: script to collect data from GPSD (#69) - -commit 91c251fd94d73f44e8757b242db82ed240f80a1d -Author: Tuxis Internet Engineering V.O.F -Date: Wed Oct 5 11:06:48 2016 +0200 - - fix: a dirty hack to prevent failing of stats when the cluster is rebuilding (#68) - - because Ceph returns '-inf' which the json decompiler doesn't seem to get.. - -commit dd365168a5eedf655d87e34e89664b191f855a15 -Author: crcro -Date: Mon Oct 3 21:27:56 2016 +0300 - - fix conflict (#66) - -commit 58e16b794a0e33d0dd71d8c1f936bc8b25ad7ced -Author: crcro -Date: Sun Sep 25 16:28:37 2016 +0300 - - snmp-extend: os-updates (#65) - - * reverted back to os-release checks, added arch pacman - - * fixed file name - -commit 2699cde73fcbca9e556a762dcfd90c81e5561d26 -Author: crcro -Date: Sun Sep 25 16:28:00 2016 +0300 - - snmp-extend: ups-apcups (#58) - - * snmp-extend-ups-apcups - - * rewrite of apc ups - - * header fix - - * header fix - -commit fa308bfe3f388f110e9df083d6b2c649fa69472e -Author: crcro -Date: Sat Sep 24 20:30:09 2016 +0300 - - snmp-extend: ups-nut update 1 (#63) - - * new code for better matching, snmp-extend compliance - - * removed unused vars - - * extra fixes - - * removed the need of tmp file - - * removed charge_low, deemed useless by user - - * removed values that are not plottable - - * readded ds - -commit f63c4ab7bea382b08d0450b42a374db082ccd0ef -Merge: c1c537e d9f36a8 -Author: Tony Murray -Date: Mon Sep 12 22:01:51 2016 -0500 - - Merge pull request #61 from crcro/app-ntp-server-update-1 - - app: ntp-server update 1 - -commit c1c537eea11fde70435e88b28b17292dc7c72f75 -Merge: 9a2716d 11a9fce -Author: Tony Murray -Date: Mon Sep 12 22:01:24 2016 -0500 - - Merge pull request #57 from crcro/snmp-extend-ups-nut - - snmp-extend: ups-nut - -commit 9a2716dc83ad11462495e5ee804fb122eb402faa -Merge: 87cc835 85ae77c -Author: Tony Murray -Date: Mon Sep 12 19:33:07 2016 -0500 - - Merge pull request #60 from crcro/remove-ntp-php - - remove obsolete ntp scripts - -commit d9f36a84b13dd42361d24df11d6cb60c7b71f260 -Author: crcro -Date: Mon Sep 12 12:48:17 2016 +0300 - - cleaner code - -commit 28cae5cff3b87532fd145c55de5b22aa0f4c6d05 -Author: crcro -Date: Mon Sep 12 11:52:13 2016 +0300 - - better handling default case - -commit aeecb1621c8ed5863d5c7563ffc96047909b8cfa -Author: crcro -Date: Mon Sep 12 11:45:14 2016 +0300 - - header fix - -commit f48f4cc6e513773fac094d6b3115954deaeacbc7 -Author: crcro -Date: Mon Sep 12 11:43:34 2016 +0300 - - update 1 ntp server - -commit 87cc835096ffdd4f8310b51e684f63aa7726d14d -Author: crcro -Date: Sat Sep 10 19:08:03 2016 +0300 - - os-updates.sh clean (#59) - -commit 85ae77c01c28308dd1f58b897aa7c8efe5b87386 -Author: crcro -Date: Sat Sep 10 04:50:33 2016 +0300 - - remove obsolete ntpd-server.php - -commit 262f798a9737a5b62bef0ab7a657782a934b86ac -Author: crcro -Date: Sat Sep 10 04:48:55 2016 +0300 - - remove obsolete ntp-client.php script - -commit 11a9fcef62571e12168b8c1e9d1ac604b65c227d -Author: crcro -Date: Fri Sep 9 15:36:01 2016 +0300 - - snmp-extend-ups-nut - -commit 6128dc3c7133802ff66b199bc99289fb07761d6e -Author: vectr0n -Date: Fri Sep 9 02:16:28 2016 -0400 - - Update hddtemp to include hddtemp -w option (#56) - - hddtemp gives inconsistent values in it's current state, after some debugging I was able to resolve the issue by passing -w to the hddtemp command, this will wake-up the drive if it is in a sleep state to gather information. - -commit 42bc0a07aab450e242471e271380fc29642b34e7 -Author: crcro -Date: Wed Sep 7 22:37:31 2016 +0300 - - ntp-client app using shell only, tested with ntpq 4.2.8p8 (#54) - -commit 718d627cfdbad19848a384fc8eaba332dcaef504 -Author: crcro -Date: Wed Sep 7 22:37:23 2016 +0300 - - app: ntp-server (#55) - - * ntp-server app using shell only, tested with 4.2.8p8 - - * fix for higher stratum value - - * change the description in comment to reflect latest webui push - -commit 351e5aa7bc6f1a79d51b1bd098cace659c1b0e9f -Author: Tatermen -Date: Sun Aug 28 20:06:04 2016 +0100 - - Freeswitch (#53) - - feature: Added freeswitch support - -commit 839b518358d2acb488c3d7709e12392ee2b4c224 -Merge: 6a84755 561efa4 -Author: Neil Lathwood -Date: Tue Aug 23 21:48:08 2016 +0100 - - Merge pull request #52 from murrant/move-scripts - - SNMP extend scripts from the main repo - -commit 561efa41be5e22614912300ac9242582340e0662 -Author: Tony Murray -Date: Mon Aug 22 21:35:13 2016 -0500 - - SNMP extend scripts from the main repo - -commit 6a84755105f651d03939310b4bd5a3cd85dc90dd -Merge: c2e4c33 deb3683 -Author: Tony Murray -Date: Sun Aug 21 19:58:13 2016 -0500 - - Merge pull request #51 from crcro/dhcp_pretty - - rewrite dhcp-stats with loop - -commit deb36833f17d31ddd6176aa7dfc3767817e7c446 -Author: crcro -Date: Mon Aug 22 01:45:23 2016 +0300 - - @paulgear recomandation - -commit c2e4c33abf5edbc0b7a5a00f8871f87d4d0f0513 -Merge: 672918c 9cd81f1 -Author: Tony Murray -Date: Wed Aug 17 09:59:11 2016 -0500 - - Merge pull request #50 from OpcaoTelecom/unbound - - Added unbound stats script - -commit 9cd81f1b930e2ed777ecf3bf6c7deff65df6e564 -Author: Alan Gregory -Date: Wed Aug 17 09:36:39 2016 -0300 - - Added unbound stats script - -commit 672918c40fd87455398267cbf744a52362f738a7 -Merge: 9fe5444 87584e7 -Author: Tony Murray -Date: Tue Aug 16 12:43:10 2016 -0500 - - Merge pull request #48 from crcro/raspberry-sensors - - raspberry sensors - -commit 9fe5444738d086b1d33f92ca0e5905a14cd9c8a0 -Merge: c3afbf3 b6bdb9e -Author: Tony Murray -Date: Mon Aug 15 22:57:52 2016 -0500 - - Merge pull request #49 from murrant/ntp - - Copy ntp scripts from the main repo. - -commit b6bdb9ea45d579becc8f858090e8b7d3e4c809ea -Author: Tony Murray -Date: Mon Aug 15 22:56:31 2016 -0500 - - Copy ntp scripts from the main repo. - -commit 87584e7ef79996db60cd62e64dd4cbaf53a0bac8 -Author: crcro -Date: Sun Aug 14 17:43:27 2016 +0300 - - added snmp extend to get raspberry sensors - -commit c3afbf35bd81bff0dbcdb67e6657dd042ae67588 -Merge: 9623342 aa59548 -Author: Neil Lathwood -Date: Tue Aug 9 19:47:51 2016 +0100 - - Merge pull request #45 from murrant/os-updates - - Do not detect os, detect package managers - -commit 9623342554317ba55f7a987d18250e941a0a7c1f -Merge: 0f5a115 7828777 -Author: Tony Murray -Date: Tue Aug 9 13:08:41 2016 -0500 - - Merge pull request #46 from murrant/distro - - Update distro to match the main repo file - -commit aa59548e0c3d6e5462cd2342ca671dc72430c3f1 -Author: Tony Murray -Date: Tue Aug 9 12:50:23 2016 -0500 - - Do not detect os, detect package managers. - Add pacman support. - -commit 78287777696f6569dfe575770f1c47553fddd5a9 -Author: Tony Murray -Date: Tue Aug 9 11:40:01 2016 -0500 - - Update distro to match the main repo file - -commit 0f5a1150f373371fc508e160e58c56cea5adbb99 -Merge: d6308e4 05fe3f8 -Author: Neil Lathwood -Date: Thu Aug 4 18:53:10 2016 +0100 - - Merge pull request #40 from florianbeer/patch-1 - - Add Debian and make update call more robust - -commit d6308e4e1c04d69688d724c7c5c04ab0a3c94fbc -Merge: 3740f3e 2accc28 -Author: Neil Lathwood -Date: Wed Aug 3 21:09:08 2016 +0100 - - Merge pull request #42 from crcro/app-dhcp-stats - - app-dhcp-stats snmp extend - -commit 2accc2848c44f8c2c33a455eb1a2e4ffe801921c -Author: crcro -Date: Wed Aug 3 22:09:55 2016 +0300 - - app-dhcp-stats snmp extend - -commit 05fe3f8cc195b797f69b0599ca2a2e198f0b5d0c -Author: Florian Beer -Date: Wed Aug 3 12:16:22 2016 +0200 - - Remove update call as this requires root - - See discussion here https://github.com/librenms/librenms-agent/pull/40#issuecomment-237198796 - -commit fac01628a07cf8083f91d9924ab8d63a9d4141db -Author: Florian Beer -Date: Wed Aug 3 04:51:35 2016 +0200 - - Add Debian and make update call more robust - - - Debian based systems need to update the index before being able to report upgradable packages. - - Debian old-stable doesn't have `apt` yet and Ubuntu 14.04 emits the following warning when using `apt` in a script: - `WARNING: /usr/bin/apt does not have a stable CLI interface yet. Use with caution in scripts.` - - By using `apt-get`, issuing a `update` call first and then counting the result of `grep 'Inst'`, this script now works on Debian 7, Debian 8, Ubuntu 14.04 and Ubuntu 16.04. - -commit 3740f3e147d7d97e10e4b8e77757ab67deb2bb84 -Merge: fb678cb 1964aec -Author: Tony Murray -Date: Tue Aug 2 20:35:16 2016 -0500 - - Merge pull request #38 from crcro/master - - app: nfs-v3-stats - -commit fb678cb58df6277be2176e8a45a08af1d8dcb8d5 -Merge: 1d4c452 8d7e0df -Author: Tony Murray -Date: Mon Aug 1 11:26:27 2016 -0500 - - Merge pull request #39 from xbeaudouin/fix_distro_freebsd - - Add FreeBSD detection to distro script - -commit 8d7e0df4eb1e35b776aa17d2e6c2ea202cc021a7 -Author: xavier.beaudouin -Date: Mon Aug 1 11:15:52 2016 +0200 - - Add FreeBSD detection to distro script - -commit 1d4c4529ae907b343b7ffcb6eaeb94563ad2cb69 -Merge: dde18e9 760f9de -Author: Paul Gear -Date: Sat Jul 30 14:14:39 2016 +1000 - - Merge pull request #37 from xbeaudouin/master - - Fix some bash scripts to work with FreeBSD - -commit 1964aece5e421391cc6cb589c668da0b5f2eeaee -Author: crcro -Date: Fri Jul 29 20:22:35 2016 +0300 - - added snmp extend script for os-updates application - -commit 57b6224254eb3992e09358df2d867573512f6809 -Author: crcro -Date: Fri Jul 29 20:19:41 2016 +0300 - - added snmp extend script for nfs-v3-stats application - -commit 760f9de567a2876b0ad793979754661946b92c5c -Author: xavier.beaudouin -Date: Fri Jul 29 13:23:20 2016 +0200 - - /bin/bash => /usr/bin/env bash to allow freebsd agent work without patching each files - -commit dde18e98954c83fb52ae89083214814b5515a6c1 -Merge: 18f4006 9a3846c -Author: Neil Lathwood -Date: Tue Jul 26 20:46:20 2016 +0100 - - Merge pull request #36 from murrant/powerdns-recursor - - PowerDNS Recursor agent - -commit 18f4006e09a1436013eee8ed77927585f714fc43 -Merge: f75fc9f fc07e27 -Author: Neil Lathwood -Date: Tue Jul 26 20:45:38 2016 +0100 - - Merge pull request #33 from murrant/mysql-php7 - - Use mysqli instead of mysql - -commit f75fc9fce5a82c47e1303f5514eb0c421ad5cf93 -Merge: bfdf71d c70d12c -Author: Tony Murray -Date: Fri Jul 22 21:13:58 2016 -0500 - - Merge pull request #35 from murrant/duplicate-nfsstats - - Remove duplicate nfsstats file - -commit 9a3846cac30515a7a01a44ecc9fc6e08e78df1f5 -Author: Tony Murray -Date: Fri Jul 22 15:33:16 2016 -0500 - - PowerDNS Recursor agent - -commit c70d12c83c00e180da8a7e8281acdbd8e4741fa1 -Author: Tony Murray -Date: Fri Jul 22 15:22:48 2016 -0500 - - Remove duplicate nfsstats file - -commit bfdf71d6995ced14ebd1e25042a60c7107a57dc0 -Merge: 41cb583 9501c2f -Author: Tony Murray -Date: Thu Jul 21 22:30:29 2016 -0500 - - Merge pull request #34 from murrant/nfs - - Copy nfsstats script from main repo. - -commit 9501c2f4ffd4649982521c387b3d9dcab1de83d9 -Author: Tony Murray -Date: Thu Jul 21 22:28:41 2016 -0500 - - Copy nfsstats script from main repo. - Send PR to remove scripts from the main repo. - -commit fc07e27c37c74d47c61aeac3cb966062f8da63a2 -Author: Tony Murray -Date: Thu Jul 21 22:26:02 2016 -0500 - - Fix permissions - -commit 41cb5835ff3b0ca41a6392f19e43d590bd08d785 -Merge: db44c10 9bad4df -Author: Tony Murray -Date: Thu Jul 21 21:48:27 2016 -0500 - - Merge pull request #32 from tuxis-ie/proxmox-issue-28 - - Proxmox issue 28 - -commit e80b025818f2f993f4443be3100c5bcd1331812a -Author: Tony Murray -Date: Thu Jul 21 21:31:25 2016 -0500 - - Use mysqli instead of mysql - -commit 9bad4dfb3e586d7892709284cccf17417cf5ec03 -Author: Mark Schouten -Date: Wed Jul 13 15:06:57 2016 +0200 - - Something like this @einarjh ? - -commit 6d27c7edb3f4972a89fbf5641c4ece106b5dbc09 -Author: Mark Schouten -Date: Mon Jul 11 17:06:14 2016 +0200 - - Wrap these calls in an eval to prevent it from dying if its a container instead of a qemu vm. Fixes #28 - -commit db44c1070950c2e06565a39395bb09f09a023b4a -Merge: d00ce4a 5b21301 -Author: Neil Lathwood -Date: Sat Jul 9 19:12:59 2016 +0100 - - Merge pull request #31 from librenms/nfsstats - - Added nfsstats.sh file - -commit 5b21301ecdb761fa0e32f9295c8ea60aef44f3a7 -Author: Neil Lathwood -Date: Sat Jul 9 19:12:13 2016 +0100 - - Added nfsstats.sh file - -commit d00ce4a15a6b52753d108d1aeb2a768e7bfafe36 -Merge: c996b54 ca5a5a1 -Author: Neil Lathwood -Date: Thu Jun 30 08:57:07 2016 +0100 - - Merge pull request #29 from murrant/powerdns-python3 - - Python3 fixes for powerdns agent. Compatible with python2. - -commit ca5a5a12c065eb67e48410ed09ff97630a76f6b8 -Author: Tony Murray -Date: Wed Jun 29 19:52:10 2016 -0500 - - Python3 fixes for powerdns agent. Compatible with python2. - -commit c996b54e79b317785c58963abb6f71c31e61ba10 -Merge: fb7912b 8328d71 -Author: Neil Lathwood -Date: Thu Jun 9 11:38:43 2016 +0100 - - Merge pull request #27 from murrant/rrdcached - - Local script to collect stats from rrdcached - -commit 8328d71c0995fa8f6dc7c50de940fbe9b242fc41 -Author: Tony Murray -Date: Wed Jun 8 20:35:19 2016 -0500 - - Local script to collect stats from rrdcached - Being able to connect to local unix sockets is the primary advantage of this. - -commit fb7912beda4181b23d8cbbbf500a1e7ed4527001 -Merge: 601ac84 8d856e2 -Author: Daniel Preussker -Date: Thu May 5 13:32:02 2016 +0200 - - Merge pull request #25 from Exa-Omicron/master - - Improved hddtemp agent module - -commit 8d856e27648b6df2d89af852ad1cd912319a965f -Author: Robert Verspuy -Date: Thu May 5 10:27:30 2016 +0200 - - Improved hddtemp agent module - - I had some issues with the netcat / daemon implementation of the module. - netcat was stallingor sometimes netcat did not return the full output of hddtemp. - Running hddtemp directly without running it as a daemon is much more stable for me. - - This new version also does not give any stdout output when hddtemp is not installed or when no disks can be found. - Running the script manually on a server does give stderr output for easy debugging. - -commit 601ac843c303d29b8149142a3fac967aaa4a2638 -Merge: 21817b6 1c13779 -Author: Tony Murray -Date: Thu Apr 21 09:46:49 2016 -0500 - - Merge pull request #23 from librenms/freebsd-agent - - Create check_mk_agent_freebsd - -commit 1c1377958e6c8cfd8ca7fd1fd4fcafdae92e1a1b -Author: Neil Lathwood -Date: Thu Apr 21 15:41:06 2016 +0100 - - Update check_mk_agent_freebsd - -commit cdd235a12a0bd4d0cbffe330048fd476aa5fddd5 -Author: Neil Lathwood -Date: Thu Apr 21 15:39:59 2016 +0100 - - Create check_mk_agent_freebsd - - Added freebsd agent - -commit 21817b6b36692bdca8fac8f3ee4a0258a2d2bcee -Author: Tony Murray -Date: Tue Mar 29 08:29:02 2016 -0500 - - Fix wording for systemd unit - -commit 88c4b00b19370bea3e597770793d90b24f24b10b -Merge: dd2b95d 50a3c25 -Author: Neil Lathwood -Date: Tue Mar 29 09:51:00 2016 +0100 - - Merge pull request #22 from murrant/master - - Add systemd unit files - -commit 50a3c25115e501db4bd9fc97a8a8e3b7d81a635e -Author: Tony Murray -Date: Mon Mar 28 12:56:26 2016 -0500 - - Add systemd unit files - -commit dd2b95d8d2eb35bf1b3f0aea34d843af33f1c28e -Merge: 6d0babe ff2bbe6 -Author: Neil Lathwood -Date: Wed Nov 25 13:37:25 2015 +0000 - - Merge pull request #17 from f0o/upstream-snapshot - - Snapshot upstream changes - -commit ff2bbe6882a9b79b93883980b0360f780fc24d76 -Author: f0o -Date: Wed Nov 25 13:26:26 2015 +0000 - - Snapshot upstream changes - -commit 6d0babe0973d5cb8e2d35fd33e2f45e96ae96c15 -Merge: 8e847b9 12e31c1 -Author: Daniel Preussker -Date: Wed Nov 25 13:28:17 2015 +0000 - - Merge pull request #16 from tuxis-ie/powerdns-support - - Powerdns support - -commit 12e31c16c3c42e6d1c73a196978acf18e554e4b0 -Author: Mark Schouten -Date: Mon Nov 23 14:10:17 2015 +0100 - - Add PowerDNS Authoritative Agent - -commit d16462bb5ac978cfd5b7cb213359989b2aabc791 -Author: Mark Schouten -Date: Mon Nov 23 14:10:15 2015 +0100 - - Add PowerDNS Authoritative Agent - -commit 8e847b986aa3af50eb6c2302c3d1f0df158a47bd -Merge: da7e40c 66d5028 -Author: Neil Lathwood -Date: Wed Nov 11 17:17:24 2015 -0400 - - Merge pull request #15 from SaaldjorMike/mysql1 - - Moved mysql tag a bit up and added a newline to error msg. - -commit 66d502837d2643c59d7f87af076fd851b0ba12c1 -Author: Mike Rostermund -Date: Wed Nov 11 14:21:49 2015 +0100 - - Moved mysql tag a bit up and added a newline to error msg. - -commit da7e40c43eb3155d3253c1eb695a78a0d9362a51 -Merge: f6f0079 0cc7b49 -Author: Neil Lathwood -Date: Tue Nov 10 08:08:34 2015 -0400 - - Merge pull request #14 from tuxis-ie/ceph-support - - Ceph support - -commit 0cc7b493978c06f0f3e73749bac1fbadf56c1be8 -Author: Mark Schouten -Date: Tue Nov 10 11:00:58 2015 +0100 - - Add support for Ceph - -commit 9b4c3b34009a441df579051336bf3ea0647fe73c -Author: Mark Schouten -Date: Tue Nov 10 10:58:24 2015 +0100 - - Add support for Ceph - -commit f6f0079c6620ee3d75adf7511006006353903dd3 -Merge: d90957a 30b7651 -Author: Daniel Preussker -Date: Wed Nov 4 13:42:29 2015 +0000 - - Merge pull request #13 from tuxis-ie/master - - Crap, forgot this line... - -commit 30b7651e0142826202276a7bf9a31343d759c68a -Author: Mark Schouten -Date: Wed Nov 4 14:40:19 2015 +0100 - - Crap, forgot this line... - -commit d90957a0bc9e484056eaf26b206672b940fc7a9f -Merge: 25fcd5a 6554087 -Author: Daniel Preussker -Date: Wed Nov 4 13:35:33 2015 +0000 - - Merge pull request #12 from tuxis-ie/master - - Fix the proxmox-agent for Proxmox VE 4.0 - -commit 65540872e7a1215cfdca1d4b480670a67cf50a77 -Author: Mark Schouten -Date: Wed Nov 4 14:30:21 2015 +0100 - - Fix the proxmox-agent for Proxmox VE 4.0 - -commit 25fcd5ae76682006ed61aa09212738381968208f -Merge: 20e2d22 b6bfbba -Author: Paul Gear -Date: Mon Oct 26 09:39:15 2015 +1000 - - Merge pull request #10 from librenms/laf-patch-1 - - Update distro to use env - -commit b6bfbbaf2c99945aceb92e9c7f950a53196c26fc -Author: Neil Lathwood -Date: Sun Oct 25 21:51:43 2015 +0000 - - Update distro to use env - -commit 20e2d220bde9e4edec76d00551c955274d06130c -Merge: 87a20db 2b96259 -Author: Daniel Preussker -Date: Fri Aug 28 09:07:49 2015 +0000 - - Merge pull request #7 from tuxis-ie/master - - Add a proxmox-agent - -commit 2b9625953240ade30cf5ccef22a9293a016b819b -Author: Mark Schouten -Date: Fri Aug 28 10:52:04 2015 +0200 - - Add license - -commit d6795c60a171eba023b8c0e5b151376c6bcfa0d1 -Author: Mark Schouten -Date: Fri Aug 28 10:49:24 2015 +0200 - - Add proxmox-agent - -commit fee2ed820bedb4613871aa9747b40121e3ae7879 -Author: Mark Schouten -Date: Fri Aug 28 10:49:19 2015 +0200 - - Add proxmox-agent - -commit 87a20db845517070fdb2eec70d264e18bfde2871 -Merge: 8ae2b15 6493263 -Author: Daniel Preussker -Date: Thu Aug 20 17:14:11 2015 +0000 - - Merge pull request #5 from tuxis-ie/master - - Add files to create a Debian-package - -commit 64932630f0b67e876d0859df491705b11a71aa07 -Author: Mark Schouten -Date: Thu Aug 20 14:18:10 2015 +0200 - - Do not include the README in the repodir - -commit 77864124dc119b0d89b1c852090e5f283b02123a -Author: Mark Schouten -Date: Thu Aug 20 10:34:50 2015 +0200 - - Add license - -commit 8ae2b1520b9e75583b87977427415c90256473e1 -Merge: 69551b0 63d3166 -Author: Daniel Preussker -Date: Tue Aug 18 15:14:00 2015 +0000 - - Merge pull request #6 from librenms/f0o-mysql-host-logic - - Fix MySQL Host Logic - -commit 63d31665cea2afaeadb8c8ba1b58b37605597b80 -Author: Daniel Preussker -Date: Tue Aug 18 15:08:50 2015 +0000 - - Fix MySQL Host Logic - -commit 51270e24c19bed95030a41e3ab7828bb2330d68d -Author: Mark Schouten -Date: Mon Aug 17 16:58:33 2015 +0200 - - Also include distro in this package - -commit 2b4d17280dd4cbff1b497e2f6ffc17bf75020ea9 -Author: Mark Schouten -Date: Mon Aug 17 16:57:48 2015 +0200 - - Strip comments (on Qemu boxes, this pollutes a lot - -commit 2833310e228e185e78ddbb96589f63e9d2d7b852 -Author: Mark Schouten -Date: Mon Aug 17 16:50:26 2015 +0200 - - Enable dpkg and dmi by default - -commit 3cd06768b5487261ddde819aad6428a3183ffbbf -Author: Mark Schouten -Date: Mon Aug 17 16:48:22 2015 +0200 - - Place all plugins in a repo-dir and add mk_enplug to enable plugins - -commit 7954d5a085f0ffe31fa1becb6d3132ca63b46942 -Author: Mark Schouten -Date: Mon Aug 17 16:19:04 2015 +0200 - - Add Conflicts/Provides and fix location for xinetd.d - -commit a7df28415a4645293835c79d15201539376be11d -Author: Mark Schouten -Date: Mon Aug 17 15:12:12 2015 +0200 - - Add files to create a Debian-package - -commit 69551b05e2673c899077a4539d1b6a6ec95b4290 -Merge: cfec5ec 4683c68 -Author: Daniel Preussker -Date: Tue Jul 28 20:11:44 2015 +0000 - - Merge pull request #4 from alangregory/master - - Added Snmpd.conf example and distro executable - -commit 4683c68d1d23f63ff9977c8a11543004cd4b8a34 -Author: Alan Gregory -Date: Tue Jul 28 15:58:29 2015 -0300 - - Added Snmpd.conf example and distro executable - -commit cfec5ec65dc93a6bc9260eb4f1d3f9379d1c7287 -Author: Daniel Preussker -Date: Tue Jun 9 17:34:00 2015 +0000 - - Delete README.md - -commit f1c9d6578a9f5df51047e5246624a96e55e043d4 -Merge: a47d95b 195a46c -Author: Daniel Preussker -Date: Mon May 18 13:07:29 2015 +0200 - - Merge pull request #1 from f0o/master - - Initial commit - -commit 195a46c1e377f6729acf38f294153ef40147d2ff -Author: f0o -Date: Mon May 18 10:57:45 2015 +0000 - - Initial commit - -commit a47d95b58cc05e32a3feaa7f0022857da80ba58a -Author: Daniel Preussker -Date: Mon May 18 09:28:15 2015 +0000 - - Initial commit From 6b96466473805951a35ad43738d559e8a3a9728e Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Tue, 2 Jun 2020 17:37:23 +0200 Subject: [PATCH 128/332] Add backupninja snmp script (#264) * Add backupninja snmp script * jsonify output * Better codestyle & apply JSON requirements * Optimize file using redis.py example * Properly use of the LibreNMS json format * typo --- snmp/backupninja.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 snmp/backupninja.py diff --git a/snmp/backupninja.py b/snmp/backupninja.py new file mode 100644 index 000000000..ce9408d67 --- /dev/null +++ b/snmp/backupninja.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +import io +import re +import os +import json + +version = 1 +error = 0 +error_string = '' + +logfile = '/var/log/backupninja.log' + +backupninja_datas = { + 'last_actions': 0, + 'last_fatal': 0, + 'last_error': 0, + 'last_warning': 0} + +if not os.path.isfile(logfile): + error_string = 'file unavailable' + error = 1 + break + +with io.open(logfile,'r') as f: + for line in reversed(list(f)): + match = re.search('^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$', line) + if match: + backupninja_datas['last_actions'] = int(match.group(2)) + backupninja_datas['last_fatal'] = int(match.group(3)) + backupninja_datas['last_error'] = int(match.group(4)) + backupninja_datas['last_warning'] = int(match.group(5)) + break + +output = {'version': version, + 'error': error, + 'errorString': error_string, + 'data': backupninja_datas} + +print(json.dumps(output)) From dc9695641b0e2b9ff160bf3b4d6916896e704c91 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 14 Jun 2020 23:46:58 +0200 Subject: [PATCH 129/332] Application sudo correction (#306) --- snmp/pureftpd.py | 2 +- snmp/raspberry.sh | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py index 1c768e1ba..a2c75672e 100755 --- a/snmp/pureftpd.py +++ b/snmp/pureftpd.py @@ -34,7 +34,7 @@ output_data['errorString'] = "Configfile Error: '%s'" % e -output = os.popen('sudo ' + pureftpwho_cmd + ' ' + pureftpwho_args).read() +output = os.popen(pureftpwho_cmd + ' ' + pureftpwho_args).read() data = {} diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index a2f924102..df4ffcea4 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -19,8 +19,6 @@ getStatusMPG4='codec_enabled MPG4' getStatusMJPG='codec_enabled MJPG' getStatusWMV9='codec_enabled WMV9' -[ $(id -u) -eq 0 ] || picmd="sudo $picmd" - $picmd $getTemp | $pised 's|[^0-9.]||g' $picmd $getVoltsCore | $pised 's|[^0-9.]||g' $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' From 2cf11d0484ee80ebd64ac7e6edc414c89efad058 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Wed, 17 Jun 2020 09:26:08 +1200 Subject: [PATCH 130/332] add check_mrpe script (#192) * add check_mrpe script * recommit Co-authored-by: slashdoom --- agent-local/check_mrpe | 60 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 agent-local/check_mrpe diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe new file mode 100644 index 000000000..e80c62d70 --- /dev/null +++ b/agent-local/check_mrpe @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +VERSION="0.1 (beta)" + +IFS=$'\n\t' +GLOBIGNORE="*" + +BIN_NC="$(command -v nc)" +BIN_SED="$(command -v sed)" + +pval="6556" + +EXITCODE=0 + +while getopts "Vha:H:p:" opt; do + case $opt in + V) printf "check_mrpe v$VERSION\n" + exit $EXITCODE + ;; + h) hflag=1 + ;; + a) aflag=1 + aval="$OPTARG" + ;; + H) Hflag=1 + Hval="$OPTARG" + ;; + p) pflag=1 + pval="$OPTARG" + ;; + \?) hflag=1 + EXITCODE=1 + ;; + esac +done + +if ! [ "$Hflag" ] && ! [ "$hflag" ]; then + printf "Error: Host not specified\n" + hflag=1 + EXITCODE=1 +fi + +if [ "$hflag" ]; then + printf "check_mrpe:\n" + printf "Usage:\n" + printf " check_mrpe [-H host] [-p port] [-a app]\n" + exit $EXITCODE +fi + +if [ "$aflag" ]; +then + SED_CMD="s/\((.*) $aval [0-9] \)\(.*\)/\2/p" +else + SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" +fi + +for i in `$BIN_NC --recv-only $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<< Date: Wed, 17 Jun 2020 00:04:29 +0200 Subject: [PATCH 131/332] DHCP Agent Update (#303) --- snmp/dhcp-status.sh | 36 ------------ snmp/dhcp.py | 135 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 36 deletions(-) delete mode 100755 snmp/dhcp-status.sh create mode 100755 snmp/dhcp.py diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh deleted file mode 100755 index a629d0a32..000000000 --- a/snmp/dhcp-status.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf add the below line and restart snmpd # -# extend dhcpstats /opt/dhcp-status.sh # -################################################################ -FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' -BIN_CAT='/usr/bin/cat' -BIN_GREP='/usr/bin/grep' -BIN_TR='/usr/bin/tr' -BIN_SED='/usr/bin/sed' -BIN_SORT='/usr/bin/sort' -BIN_WC='/usr/bin/wc' - -CONFIGFILE=/etc/snmp/dhcp-status.conf -if [ -f $CONFIGFILE ] ; then - . $CONFIGFILE -fi - -DHCP_LEASES='^lease' -DHCP_ACTIVE='^lease|binding state active' -DHCP_EXPIRED='^lease|binding state expired' -DHCP_RELEASED='^lease|binding state released' -DHCP_ABANDONED='^lease|binding state abandoned' -DHCP_RESET='^lease|binding state reset' -DHCP_BOOTP='^lease|binding state bootp' -DHCP_BACKUP='^lease|binding state backup' -DHCP_FREE='^lease|binding state free' -NO_ERROR='[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} binding' - -$BIN_CAT $FILE_DHCP | $BIN_GREP $DHCP_LEASES | $BIN_SORT -u | $BIN_WC -l - -for state in "$DHCP_ACTIVE" "$DHCP_EXPIRED" "$DHCP_RELEASED" "$DHCP_ABANDONED" "$DHCP_RESET" "$DHCP_BOOTP" "$DHCP_BACKUP" "$DHCP_FREE" -do - $BIN_GREP -E "$state" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -done diff --git a/snmp/dhcp.py b/snmp/dhcp.py new file mode 100755 index 000000000..12937370e --- /dev/null +++ b/snmp/dhcp.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 + +import subprocess +import json +from os.path import isfile + +CONFIGFILE = '/etc/snmp/dhcp.json' + +# Configfile is needed /etc/snmp/dhcp.json +# +# {"leasefile": "/var/lib/dhcp/dhcpd.leases" +# } +# + +error = 0 +error_string = '' +version = 2 + +with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + error = 1 + error_string = "Configfile Error: '%s'" % e + + +if not error: + leases = {'total': 0, + 'active': 0, + 'expired': 0, + 'released': 0, + 'abandoned': 0, + 'reset': 0, + 'bootp': 0, + 'backup': 0, + 'free': 0, + } + if not isfile(configfile['leasefile']): + error = 1 + error_string = 'Lease File not found' + else: + with open(configfile['leasefile']) as fp: + line = fp.readline() + while line: + line = fp.readline() + + if 'rewind' not in line: + if line.startswith('lease'): + leases['total'] += 1 + elif 'binding state active' in line: + leases['active'] += 1 + elif 'binding state expired' in line: + leases['expired'] += 1 + elif 'binding state released' in line: + leases['released'] += 1 + elif 'binding state abandoned' in line: + leases['abandoned'] += 1 + elif 'binding state reset' in line: + leases['reset'] += 1 + elif 'binding state bootp' in line: + leases['bootp'] += 1 + elif 'binding state backup' in line: + leases['backup'] += 1 + elif 'binding state free' in line: + leases['free'] += 1 + +shell_cmd = "dhcpd-pools -s i -A" +pool_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') + +data = {'leases': leases, + 'pools': [], + 'networks': [], + 'all_networks': [] + } + +category = None +jump_line = 0 +for p in pool_data: + line = p.decode('utf-8') + + if jump_line: + jump_line -= 1 + continue + + if line.startswith('Ranges:'): + category = 'pools' + jump_line = 1 + continue + + if line.startswith('Shared networks:'): + category = 'networks' + jump_line = 1 + continue + + if line.startswith('Sum of all ranges:'): + category = 'all_networks' + jump_line = 1 + continue + + if not len(line): + continue + + p = line.split() + + if category == 'pools': + data[category].append({'first_ip': p[1], + 'last_ip':p[3], + 'max': p[4], + 'cur': p[5], + 'percent': p[6], + }) + continue + + if category == 'networks': + data[category].append({'network': p[0], + 'max': p[1], + 'cur': p[2], + 'percent': p[3], + }) + continue + + if category == 'all_networks': + data[category] ={'max': p[2], + 'cur': p[3], + 'percent': p[4], + } + continue + + +output = {'version': version, + 'error': error, + 'errorString': error_string, + 'data': data} + +print (json.dumps(output)) From 5770fac54ba8fa3887b017dc5d8e83caff517485 Mon Sep 17 00:00:00 2001 From: N Date: Wed, 17 Jun 2020 22:22:13 +0100 Subject: [PATCH 132/332] Fix up smart script to also parse some values from NVMe drives (#308) --- snmp/smart | 185 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 106 insertions(+), 79 deletions(-) diff --git a/snmp/smart b/snmp/smart index 605340fb9..5793b90db 100755 --- a/snmp/smart +++ b/snmp/smart @@ -283,105 +283,132 @@ foreach my $line ( @disks ){ '233'=>'null', '9'=>'null', ); + + my @outputA; + + if($output =~ /NVMe Log/) + { + # we have an NVMe drive with annoyingly different output + my %mappings=( + 'Temperature' => 194, + 'Power Cycles' => 12, + 'Power On Hours' => 9, + ); + foreach(split(/\n/, $output )) + { + if(/:/) + { + my ($key, $val) = split(/:/); + $val =~ s/^\s+|\s+$|\D+//g; + if(exists($mappings{$key})) + { + $IDs{$mappings{$key}} = $val; + } + } + } - my @outputA=split( /\n/, $output ); - my $outputAint=0; - while ( defined($outputA[$outputAint]) ) { - my $line=$outputA[$outputAint]; - $line=~s/^ +//; - $line=~s/ +/ /g; - - if ( $line =~ /^[0123456789]+ / ) { - my @lineA=split(/\ /, $line, 10); - my $raw=$lineA[9]; - my $id=$lineA[0]; - - # Crucial SSD - # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left - if ( $id == 202 ) { - $IDs{231}=$raw; - } + } + else + { + @outputA=split( /\n/, $output ); + my $outputAint=0; + while ( defined($outputA[$outputAint]) ) { + my $line=$outputA[$outputAint]; + $line=~s/^ +//; + $line=~s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[9]; + my $id=$lineA[0]; + + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 ) { + $IDs{231}=$raw; + } - # single int raw values - if ( - ( $id == 5 ) || - ( $id == 10 ) || - ( $id == 173 ) || - ( $id == 177 ) || - ( $id == 183 ) || - ( $id == 184 ) || - ( $id == 187 ) || - ( $id == 196 ) || - ( $id == 197 ) || - ( $id == 198 ) || - ( $id == 199 ) || - ( $id == 231 ) || - ( $id == 233 ) - ) { - $IDs{$id}=$raw; - } + # single int raw values + if ( + ( $id == 5 ) || + ( $id == 10 ) || + ( $id == 173 ) || + ( $id == 177 ) || + ( $id == 183 ) || + ( $id == 184 ) || + ( $id == 187 ) || + ( $id == 196 ) || + ( $id == 197 ) || + ( $id == 198 ) || + ( $id == 199 ) || + ( $id == 231 ) || + ( $id == 233 ) + ) { + $IDs{$id}=$raw; + } - # 9, power on hours - if ( $id == 9 ) { - my @runtime=split(/[\ h]/, $raw); - $IDs{$id}=$runtime[0]; - } + # 9, power on hours + if ( $id == 9 ) { + my @runtime=split(/[\ h]/, $raw); + $IDs{$id}=$runtime[0]; + } - # 188, Command_Timeout - if ( $id == 188 ) { - my $total=0; - my @rawA=split( /\ /, $raw ); - my $rawAint=0; - while ( defined( $rawA[$rawAint] ) ) { - $total=$total+$rawA[$rawAint]; - $rawAint++; + # 188, Command_Timeout + if ( $id == 188 ) { + my $total=0; + my @rawA=split( /\ /, $raw ); + my $rawAint=0; + while ( defined( $rawA[$rawAint] ) ) { + $total=$total+$rawA[$rawAint]; + $rawAint++; + } + $IDs{$id}=$total; } - $IDs{$id}=$total; - } - # 190, airflow temp - # 194, temp - if ( - ( $id == 190 ) || - ( $id == 194 ) - ) { - my ( $temp )=split(/\ /, $raw); - $IDs{$id}=$temp; + # 190, airflow temp + # 194, temp + if ( + ( $id == 190 ) || + ( $id == 194 ) + ) { + my ( $temp )=split(/\ /, $raw); + $IDs{$id}=$temp; + } } - } - # SAS Wrapping - # Section by Cameron Munroe (munroenet[at]gmail.com) + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) - # Elements in Grown Defect List. - # Marking as 5 Reallocated_Sector_Ct + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct - if ($line =~ "Elements in grown defect list:"){ + if ($line =~ "Elements in grown defect list:"){ - my @lineA=split(/\ /, $line, 10); - my $raw=$lineA[5]; + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[5]; - # Reallocated Sector Count ID - $IDs{5}=$raw; + # Reallocated Sector Count ID + $IDs{5}=$raw; - } + } - # Current Drive Temperature - # Marking as 194 Temperature_Celsius + # Current Drive Temperature + # Marking as 194 Temperature_Celsius - if ($line =~ "Current Drive Temperature:"){ + if ($line =~ "Current Drive Temperature:"){ - my @lineA=split(/\ /, $line, 10); - my $raw=$lineA[3]; + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[3]; - # Temperature C ID - $IDs{194}=$raw; + # Temperature C ID + $IDs{194}=$raw; - } + } - # End of SAS Wrapper + # End of SAS Wrapper - $outputAint++; + $outputAint++; + } } #get the selftest logs From a6380230cc10463ae5c6db07050d79270656e46a Mon Sep 17 00:00:00 2001 From: yon2004 Date: Thu, 18 Jun 2020 17:42:28 +1000 Subject: [PATCH 133/332] Update ups-nut.sh (#298) * Update ups-nut.sh Added Support for the ups.status and the 14 supported states as per https://networkupstools.org/docs/developer-guide.chunked/ar01s04.html#_status_data * Update ups-nut.sh --- snmp/ups-nut.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index cc04c8468..18f431469 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -23,3 +23,18 @@ do echo "Unknown" fi done + +for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" +do + UNKNOWN=$(echo $TMP | grep -Eo "ups\.status:") + if [ -z "$UNKNOWN" ]; then + echo "Unknown" + else + OUT=$(echo $TMP | grep -Eo "$value") + if [ -n "$OUT" ]; then + echo "1" + else + echo "0" + fi + fi +done From a8cb5b0112848010660cca4ebca26313fc625caf Mon Sep 17 00:00:00 2001 From: Mark Westerterp Date: Tue, 30 Jun 2020 23:56:49 +0200 Subject: [PATCH 134/332] Make this script Python 3 compatible (#311) --- agent-local/ceph | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 9a83d3989..1301f79ec 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (C) 2015 Mark Schouten # @@ -18,12 +18,12 @@ from subprocess import check_output import json def cephversion(): - cephv = check_output(["/usr/bin/ceph", "version"]).replace('ceph version ', '') + cephv = check_output(["/usr/bin/ceph", "version"]).decode("utf-8").replace('ceph version ', '') major, minor = cephv.split('.')[0:2] return [int(major), int(minor)] def cephdf(): - cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0') + cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).decode("utf-8").replace('-inf', '0') s = json.loads(cephdf) try: @@ -50,7 +50,7 @@ def cephdf(): def osdperf(): global major - osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).replace('-inf', '0') + osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).decode("utf-8").replace('-inf', '0') if major > 13: for o in json.loads(osdperf)['osdstats']['osd_perf_infos']: @@ -61,33 +61,33 @@ def osdperf(): def poolstats(): global major - poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).replace('-inf', '0') + poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).decode("utf-8").replace('-inf', '0') for p in json.loads(poolstats): - try: + try: r = p['client_io_rate']['read_bytes_sec'] except: r = 0 - try: + try: w = p['client_io_rate']['write_bytes_sec'] except: w = 0 - try: + try: if major > 11: o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec'] else: o = p['client_io_rate']['op_per_sec'] except: o = 0 - + print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) major, minor = cephversion() -print "<<>>" -print "" +print ("<<>>") +print ("") poolstats() -print "" +print ("") osdperf() -print "" +print ("") cephdf() From e022601f3259da4c9b4db01233d9d07d34b57360 Mon Sep 17 00:00:00 2001 From: FingerlessGloves Date: Wed, 1 Jul 2020 18:27:21 +0100 Subject: [PATCH 135/332] Update proxmox versions detection on Distro (#316) Update proxmox versions detection on Distro Before change "Debian GNU/Linux 10" After change "Debian 10.3/PVE 6.1-8" --- snmp/distro | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/distro b/snmp/distro index 75fa74d2a..6771e8fd5 100755 --- a/snmp/distro +++ b/snmp/distro @@ -62,6 +62,7 @@ elif [ "${OS}" = "Linux" ] ; then fi if [ -f /usr/bin/pveversion ]; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" + IGNORE_OS_RELEASE=1 fi elif [ -f /etc/gentoo-release ] ; then From a6f943fc834de9130ab7fbc200ad2267f0047eab Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 4 Jul 2020 13:13:32 +0200 Subject: [PATCH 136/332] FreeRadius optional Configfile (#317) --- snmp/freeradius.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index 088acf3c1..11d343585 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +CONFIGFILE=/etc/snmp/freeradius.conf + # Set 0 for SNMP extend; set to 1 for Check_MK agent AGENT=0 @@ -8,6 +10,10 @@ RADIUS_SERVER='localhost' RADIUS_PORT='18121' RADIUS_KEY='adminsecret' +if [ -f $CONFIGFILE ]; do + . $CONFIGFILE +done + # Default radclient access request, shouldn't need to be changed RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' From 90c00c78d07630a505bb0e747c9f9ebc2f02dfa4 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 4 Jul 2020 17:44:40 +0200 Subject: [PATCH 137/332] Configurable env binary path (#318) --- snmp/ntp-server.sh | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index bbf5c737a..5871d0377 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -10,14 +10,24 @@ # If you are unsure, which to set, run this script and make sure that # the JSON output variables match that in "ntpq -c rv". # -BIN_NTPD='/usr/bin/env ntpd' -BIN_NTPQ='/usr/bin/env ntpq' -BIN_NTPDC='/usr/bin/env ntpdc' -BIN_GREP='/usr/bin/env grep' -BIN_TR='/usr/bin/env tr' -BIN_CUT='/usr/bin/env cut' -BIN_SED="/usr/bin/env sed" -BIN_AWK='/usr/bin/env awk' + +CONFIGFILE=/etc/snmp/ntp-server.conf + +BIN_ENV='/usr/bin/env' + +if [ -f $CONFIGFILE ] ; then + . $CONFIGFILE +fi + +BIN_NTPD="$BIN_ENV ntpd" +BIN_NTPQ="$BIN_ENV ntpq" +BIN_NTPDC="$BIN_ENV ntpdc" +BIN_GREP="$BIN_ENV grep" +BIN_TR="$BIN_ENV tr" +BIN_CUT="$BIN_ENV cut" +BIN_SED="$BIN_ENV sed" +BIN_AWK="$BIN_ENV awk" + NTPQV="p11" ################################################################ # Don't change anything unless you know what are you doing # From c222f71e32363742bbc1c917b087fa190985143d Mon Sep 17 00:00:00 2001 From: arrmo Date: Thu, 9 Jul 2020 16:13:51 -0500 Subject: [PATCH 138/332] Scripts for Openwrt (#314) --- snmp/Openwrt/wlClients.sh | 36 +++++++++++++++++++++++++++++++++++ snmp/Openwrt/wlFrequency.sh | 19 ++++++++++++++++++ snmp/Openwrt/wlInterfaces.txt | 2 ++ snmp/Openwrt/wlNoiseFloor.sh | 20 +++++++++++++++++++ snmp/Openwrt/wlRate.sh | 32 +++++++++++++++++++++++++++++++ snmp/Openwrt/wlSNR.sh | 30 +++++++++++++++++++++++++++++ 6 files changed, 139 insertions(+) create mode 100755 snmp/Openwrt/wlClients.sh create mode 100755 snmp/Openwrt/wlFrequency.sh create mode 100755 snmp/Openwrt/wlInterfaces.txt create mode 100755 snmp/Openwrt/wlNoiseFloor.sh create mode 100755 snmp/Openwrt/wlRate.sh create mode 100755 snmp/Openwrt/wlSNR.sh diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh new file mode 100755 index 000000000..cf6195f62 --- /dev/null +++ b/snmp/Openwrt/wlClients.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# wlClients.sh +# Counts connected (associated) Wi-Fi devices +# Arguments: targed interface. Assumes all interfaces if no argument + +# Check number of arguments +if [ $# -gt 1 ]; then + /bin/echo "Usage: wlClients.sh interface" + /bin/echo "Too many command line arguments, exiting." + exit 1 +fi + +# Get path to this script +scriptdir=$(dirname $(readlink -f -- $0)) + +# Get hostname, interface list. Set target, which is name returned for interface +hostname=`/bin/uname -n` +if [ $1 ]; then + interfaces=$1 + target=$1 +else + interfaces=`cat $scriptdir/wlInterfaces.txt | cut -f 1 -d","` + target=wlan +fi + +# Count associated devices +count=0 +for interface in $interfaces +do + new=`/usr/sbin/iw dev $interface station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l` + count=$(( $count + $new )) +done + +# Return snmp result +/bin/echo $count diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh new file mode 100755 index 000000000..119fb54af --- /dev/null +++ b/snmp/Openwrt/wlFrequency.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# wlFrequency.sh +# Returns wlFrequency, in MHz (not channel number) +# Arguments: targed interface + +# Check number of arguments +if [ $# -ne 1 ]; then + /bin/echo "Usage: wlFrequency.sh interface" + /bin/echo "Missing targeted interface, exiting." + exit 1 +fi + +# Get hostname, extract frequency +hostname=`/bin/uname -n` +frequency=`/usr/sbin/iw dev $1 info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" "` + +# Return snmp result +/bin/echo $frequency diff --git a/snmp/Openwrt/wlInterfaces.txt b/snmp/Openwrt/wlInterfaces.txt new file mode 100755 index 000000000..bfe882e0e --- /dev/null +++ b/snmp/Openwrt/wlInterfaces.txt @@ -0,0 +1,2 @@ +wlan0,wl-2.4G +wlan1,wl-5.0G diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh new file mode 100755 index 000000000..ab404364d --- /dev/null +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# wlNoiseFloor.sh +# Returns wlNoiseFloor, in dBm +# Arguments: targed interface + +# Check number of arguments +if [ $# -ne 1 ]; then + /bin/echo "Usage: wlNoiseFloor.sh interface" + /bin/echo "Missing targeted interface, exiting." + exit 1 +fi + +# Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one +# Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! +hostname=`/bin/uname -n` +noise=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1` + +# Return snmp result +/bin/echo $noise diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh new file mode 100755 index 000000000..76ab0c881 --- /dev/null +++ b/snmp/Openwrt/wlRate.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +# wlRate.sh +# Returns wlRate, bit rate in Mbit/s +# Arguments: +# $1: targeted interface +# $2: direction (tx or rx) +# $3: desired result (sum, avg, min, max) + +# Check number of arguments +if [ $# -ne 3 ]; then + /bin/echo "Usage: wlRate.sh interface direction result" + /bin/echo "Incorrect script usage, exiting." + exit 1 +fi + +# Get hostname, calculate result. Sum just for debug, and have to return integer +# => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) +hostname=`/bin/uname -n` +ratelist=`/usr/sbin/iw dev $1 station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" "` +if [ "$3" == "sum" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}'` +elif [ "$3" == "avg" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}'` +elif [ "$3" == "min" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}'` +elif [ "$3" == "max" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}'` +fi + +# Return snmp result +echo $result diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh new file mode 100755 index 000000000..337d55979 --- /dev/null +++ b/snmp/Openwrt/wlSNR.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +# wlSNR.sh +# Returns wlSNR, Signal-to-Noise ratio in dB +# Arguments: +# $1: targeted interface +# $2: desired result (sum, avg, min, max) + +# Check number of arguments +if [ $# -ne 2 ]; then + /bin/echo "Usage: wlSNR.sh interface result" + /bin/echo "Incorrect script usage, exiting." + exit 1 +fi + +# Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest) +hostname=`/bin/uname -n` +snrlist=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1` +if [ "$2" == "sum" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}'` +elif [ "$2" == "avg" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}'` +elif [ "$2" == "min" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}'` +elif [ "$2" == "max" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}'` +fi + +# Return snmp result +echo $result From 99e9a83ac0a7ff37b1dddea85c35b63a9a9c084d Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 11 Jul 2020 17:12:01 +0200 Subject: [PATCH 139/332] pi-hol script update (#319) --- snmp/pi-hole | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snmp/pi-hole b/snmp/pi-hole index 99309a198..67e35d613 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -2,11 +2,17 @@ set -euo pipefail IFS=$'\n\t' +CONFIGFILE='/etc/snmp/pi-hole.conf' + API_AUTH_KEY="" API_URL="localhost/admin/api.php" URL_READ_ONLY="?summaryRaw" URL_QUERY_TYPE="?getQueryTypes&auth=" +if [ -f $CONFIGFILE ]; then + . $CONFIGFILE +fi + #/ Description: BASH script to get Pi-hole stats #/ Examples: ./pi-hole-stats.sh From 8f731c83d0710f0192417da57d3b669cdcc1d2fc Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Tue, 14 Jul 2020 01:10:37 +0200 Subject: [PATCH 140/332] Fix parsing Error on MDADM if increasing disc count (#320) --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index 5265fe69f..f4ccf6c0d 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -36,7 +36,7 @@ if [ -d /dev/md ] ; then fi RAID_DEV_LIST=$($LS $RAID/slaves/) RAID_LEVEL=$($CAT $RAID/md/level) - RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks) + RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks| cut -d' ' -f1) RAID_STATE=$($CAT $RAID/md/array_state) RAID_ACTION=$($CAT $RAID/md/sync_action) RAID_DEGRADED=$($CAT $RAID/md/degraded) From aecfa970e74c321c38dc2f50bf071223c1c43fe9 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Sat, 18 Jul 2020 11:59:17 +1200 Subject: [PATCH 141/332] Update check_mrpe (#321) Include exit code when proper -a flag is specified. --- agent-local/check_mrpe | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index e80c62d70..6cd3a044c 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -1,6 +1,6 @@ #!/usr/bin/env bash -VERSION="0.1 (beta)" +VERSION="0.2 (beta)" IFS=$'\n\t' GLOBIGNORE="*" @@ -10,12 +10,12 @@ BIN_SED="$(command -v sed)" pval="6556" -EXITCODE=0 +EXITCODE=3 while getopts "Vha:H:p:" opt; do case $opt in V) printf "check_mrpe v$VERSION\n" - exit $EXITCODE + exit 0 ;; h) hflag=1 ;; @@ -29,7 +29,7 @@ while getopts "Vha:H:p:" opt; do pval="$OPTARG" ;; \?) hflag=1 - EXITCODE=1 + EXITCODE=0 ;; esac done @@ -50,11 +50,33 @@ fi if [ "$aflag" ]; then SED_CMD="s/\((.*) $aval [0-9] \)\(.*\)/\2/p" + SED_CMD_STATUS="s/(.*) $aval \([0-9]\) \(.*\)/\1/p" else SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" fi for i in `$BIN_NC --recv-only $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<< Date: Sat, 18 Jul 2020 19:37:45 +0300 Subject: [PATCH 142/332] Use sh in ups-nut.sh shebang (#315) since the `ups-nut.sh` script is not using any bash specific syntax. This change removes unneeded dependency on bash. --- snmp/ups-nut.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 18f431469..7e3d8a15a 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/sh ################################################################ # Instructions: # # 1. copy this script to /etc/snmp/ and make it executable: # From 506405d63a495eb68cb2dec33c8765e5a0986f11 Mon Sep 17 00:00:00 2001 From: Martin Date: Sat, 18 Jul 2020 18:40:16 +0200 Subject: [PATCH 143/332] Fix missing compatibility to Pi-hole V5 API (#322) To minimize the impact of future API changes in terms of data order or additional data being appended, required elements are selected directly with the jq tool in the expected order. --- snmp/pi-hole | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/pi-hole b/snmp/pi-hole index 67e35d613..fc5f52d77 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -71,10 +71,10 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.[]') + GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') echo $GET_STATS | tr " " "\n" # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[][]') + GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') echo $GET_QUERY_TYPE | tr " " "\n" } From 712e31a9c87c547dfa4dab7cc0ca3ce5648da03d Mon Sep 17 00:00:00 2001 From: Yoan Tanguy Date: Mon, 20 Jul 2020 22:18:22 +0200 Subject: [PATCH 144/332] Fix bash check from directory to block type (#295) * Fix bash check from directory to block type https://github.com/librenms/librenms-agent/issues/278 * Update mdadm fix array existance check Co-authored-by: SourceDoctor --- snmp/mdadm | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index f4ccf6c0d..ecbc8e510 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -14,10 +14,11 @@ VERSION=1 ERROR_CODE=0 ERROR_STRING="" -OUTPUT_DATA='['\ +OUTPUT_DATA='[' -if [ -d /dev/md ] ; then - for ARRAY_BLOCKDEVICE in $(ls -1 /dev/md/*) ; do +# use 'ls' command to check if md blocks exist +if $LS /dev/md?* 1> /dev/null 2>&1 ; then + for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) # ignore arrays with no slaves @@ -29,7 +30,7 @@ if [ -d /dev/md ] ; then continue fi - if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]]; then + if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]] ; then RAID_NAME=$($BASENAME $RAID) else RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE) @@ -101,11 +102,13 @@ if [ -d /dev/md ] ; then '","sync_completed":"'$RAID_SYNC_COMPLETED\ '"},' - OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA + OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA done -fi -OUTPUT_DATA=${OUTPUT_DATA: : -1}']' + OUTPUT_DATA=${OUTPUT_DATA: : -1}']' +else + OUTPUT_DATA=${OUTPUT_DATA}']' +fi OUTPUT='{"data":'$OUTPUT_DATA\ ',"error":"'$ERROR_CODE\ From 399d2630f4ab9299b75ec220773f6b907a1fc205 Mon Sep 17 00:00:00 2001 From: Hans Erasmus Date: Tue, 18 Aug 2020 20:37:44 +0200 Subject: [PATCH 145/332] Rewrite Apache SNMP Agent to Python3 (#326) * Updated to work with python3 Thanks to @murrant this file is now python3 compatible. * Formatted for PEP8 specs * Updated except according to request * Updated according to request As requested [here](https://github.com/librenms/librenms/pull/12009) by SourceDoctor. --- snmp/apache-stats.py | 112 +++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 58 deletions(-) diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index f098a8c55..1421c20e3 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -1,10 +1,10 @@ -#!/usr/bin/python -# Copyright (C) 2009 Glen Pitt-Pladdy +#!/usr/bin/python3 +# Copyright(C) 2009 Glen Pitt-Pladdy # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. +# of the License, or(at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -17,73 +17,69 @@ # # # - -CACHETIME = 30 -CACHEFILE = '/var/cache/librenms/apache-snmp' - -# check for cache file newer CACHETIME seconds ago import os import time -if os.path.isfile ( CACHEFILE ) \ - and ( time.time() - os.stat ( CACHEFILE )[8] ) < CACHETIME: - # use cached data - f = open ( CACHEFILE, 'r' ) - data = f.read() - f.close() +import urllib.request + +cachetime = 30 +cachefile = '/var/cache/librenms/apache-snmp' + +# Check for a cache file newer than cachetime seconds ago + +if os.path.isfile(cachefile) and (time.time() - os.stat(cachefile)[8]) < cachetime: + # Use cached data + f = open(cachefile, 'r') + data = f.read() + f.close() else: - # grab the status URL (fresh data) - # need debian package python-urlgrabber - from urlgrabber import urlread - data = urlread ( 'http://localhost/server-status?auto', - user_agent = 'SNMP Apache Stats' ) - # write file - f = open ( CACHEFILE+'.TMP.'+`os.getpid()`, 'w' ) - f.write ( data ) - f.close() - os.rename ( CACHEFILE+'.TMP.'+`os.getpid()`, CACHEFILE ) + # Grab the status URL (fresh data), needs package urllib3 + data = urllib.request.urlopen("http://localhost/server-status?auto").read().decode('UTF-8') + # Write file + f = open(cachefile+'.TMP.'+str(os.getpid()), 'w') + f.write(data) + f.close() + os.rename(cachefile+'.TMP.'+str(os.getpid()), cachefile) # dice up the data -scoreboardkey = [ '_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.' ] +scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.'] params = {} for line in data.splitlines(): - fields = line.split( ': ' ) - if len(fields) <= 1: - continue # "localhost" as first line cause out of index error - elif fields[0] == 'Scoreboard': - # count up the scoreboard into states - states = {} - for state in scoreboardkey: - states[state] = 0 - for state in fields[1]: - states[state] += 1 - elif fields[0] == 'Total kBytes': - # turn into base (byte) value - params[fields[0]] = int(fields[1])*1024 - elif len(fields) > 1: - # just store everything else - params[fields[0]] = fields[1] + fields = line.split(': ') + if len(fields) <= 1: + continue # "localhost" as first line causes out of index error + elif fields[0] == 'Scoreboard': + # count up the scoreboard into states + states = {} + for state in scoreboardkey: + states[state] = 0 + for state in fields[1]: + states[state] += 1 + elif fields[0] == 'Total kBytes': + # turn into base(byte) value + params[fields[0]] = int(fields[1])*1024 + elif len(fields) > 1: + # just store everything else + params[fields[0]] = fields[1] -# output the data in order (this is because some platforms don't have them all) +# output the data in order(this is because some platforms don't have them all) dataorder = [ - 'Total Accesses', - 'Total kBytes', - 'CPULoad', - 'Uptime', - 'ReqPerSec', - 'BytesPerSec', - 'BytesPerReq', - 'BusyWorkers', - 'IdleWorkers' + 'Total Accesses', + 'Total kBytes', + 'CPULoad', + 'Uptime', + 'ReqPerSec', + 'BytesPerSec', + 'BytesPerReq', + 'BusyWorkers', + 'IdleWorkers' ] for param in dataorder: - try: - print params[param] -# print param - except: # not all Apache's have all stats - print 'U' + try: + print(params[param]) + except KeyError: # not all Apache's have all stats + print('U') # print the scoreboard for state in scoreboardkey: - print states[state] -# print state + print(states[state]) From 241595592fe7261ca2c530d62804230c55e65cdc Mon Sep 17 00:00:00 2001 From: arrmo Date: Fri, 4 Sep 2020 16:29:03 -0500 Subject: [PATCH 146/332] Update distro to support BeagleBoard (#328) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 6771e8fd5..3f0d087f6 100755 --- a/snmp/distro +++ b/snmp/distro @@ -64,6 +64,10 @@ elif [ "${OS}" = "Linux" ] ; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" IGNORE_OS_RELEASE=1 fi + if [ -f /etc/dogtag ]; then + DIST=`cat /etc/dogtag` + IGNORE_OS_RELEASE=1 + fi elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" From 5118c51aab77218a6d2d114d2ebd02dff01dbb18 Mon Sep 17 00:00:00 2001 From: priiduonu Date: Sat, 5 Sep 2020 02:00:52 +0300 Subject: [PATCH 147/332] get correct ARM frequency reading in raspberry.sh (#325) Fixes #324 --- snmp/raspberry.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index df4ffcea4..41f2902a0 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -24,8 +24,8 @@ $picmd $getVoltsCore | $pised 's|[^0-9.]||g' $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' $picmd $getVoltsRamI | $pised 's|[^0-9.]||g' $picmd $getVoltsRamP | $pised 's|[^0-9.]||g' -$picmd $getFreqArm | $pised 's/frequency(45)=//g' -$picmd $getFreqCore | $pised 's/frequency(1)=//g' +$picmd $getFreqArm | $pised 's/frequency([0-9]*)=//g' +$picmd $getFreqCore | $pised 's/frequency([0-9]*)=//g' $picmd $getStatusH264 | $pised 's/H264=//g' $picmd $getStatusMPG2 | $pised 's/MPG2=//g' $picmd $getStatusWVC1 | $pised 's/WVC1=//g' From 173a76129031f26de8cd77dd371b716984db3576 Mon Sep 17 00:00:00 2001 From: Avinash Kumar Date: Fri, 18 Sep 2020 20:18:51 +0530 Subject: [PATCH 148/332] Opensips, Icecast and Voipmon snmp scipts (#331) * Icecast script added * Opensips script added * Voipmon script added * Open files added to icecast stats Co-authored-by: avinash kumar --- snmp/icecast-stats.sh | 14 ++++++++++++++ snmp/opensips-stats.sh | 17 +++++++++++++++++ snmp/voipmon-stats.sh | 13 +++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 snmp/icecast-stats.sh create mode 100644 snmp/opensips-stats.sh create mode 100644 snmp/voipmon-stats.sh diff --git a/snmp/icecast-stats.sh b/snmp/icecast-stats.sh new file mode 100644 index 000000000..c93c6bca0 --- /dev/null +++ b/snmp/icecast-stats.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Author: Sharad Kumar + +used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}') +cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}') + +pid=$(pidof icecast) +total_files=$(ls -l /proc/${pid}/fd | wc -l) + +echo "Used Memory="$used_memory +echo "CPU Load="$cpu_load +echo "Open files="$total_files + +exit diff --git a/snmp/opensips-stats.sh b/snmp/opensips-stats.sh new file mode 100644 index 000000000..e8fe2b249 --- /dev/null +++ b/snmp/opensips-stats.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Author: Sharad Kumar + +total_memory=$(opensipsctl fifo get_statistics total_size | awk '{print "Total Memory=" $2}') +used_memory=$(opensipsctl fifo get_statistics real_used_size | awk '{print "Used Memory=" $2}') +free_memory=$(opensipsctl fifo get_statistics free_size | awk '{print "Free Memory=" $2}') +load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Average=" sum}') +total_files=$(lsof -c opensips | wc -l) + + +echo $total_memory +echo $used_memory +echo $free_memory +echo $load_average +echo "Open files="$total_files + +exit diff --git a/snmp/voipmon-stats.sh b/snmp/voipmon-stats.sh new file mode 100644 index 000000000..671a04af9 --- /dev/null +++ b/snmp/voipmon-stats.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Author: Sharad Kumar + +used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}') +cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}') + +pid=$(pidof voipmonitor) +total_files=$(ls -l /proc/${pid}/fd | wc -l) + +echo "Used Memory="$used_memory +echo "CPU Load="$cpu_load +echo "Open files="$total_files +exit From 2455752eeb905055fdc5a802d8290e1c3ebf648d Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Mon, 21 Sep 2020 07:17:35 +0800 Subject: [PATCH 149/332] Add Proxmox MG identification (#329) Added distro support to identify Proxmox MG (PMG) version. --- snmp/distro | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snmp/distro b/snmp/distro index 3f0d087f6..ee29dc678 100755 --- a/snmp/distro +++ b/snmp/distro @@ -64,6 +64,11 @@ elif [ "${OS}" = "Linux" ] ; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" IGNORE_OS_RELEASE=1 fi + if [ -f /usr/bin/pmgversion ]; then + # pmgversion requires root permissions to run, please add NOPASSWD setting to visudo. + DIST="${DIST}/PMG `sudo /usr/bin/pmgversion | cut -d '/' -f 2`" + IGNORE_OS_RELEASE=1 + fi if [ -f /etc/dogtag ]; then DIST=`cat /etc/dogtag` IGNORE_OS_RELEASE=1 From 045a506d1c8e9cfa41964888f1acaaec4be06108 Mon Sep 17 00:00:00 2001 From: Clark Chen <9372896+clarkchentw@users.noreply.github.com> Date: Wed, 23 Sep 2020 19:11:08 -0500 Subject: [PATCH 150/332] Add support for Alpine (apk) (#332) * Add support for Alpine (apk) * Minor fix Add back #!/usr/bin/env bash --- snmp/osupdate | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index 8a391fa2b..6e6f8f533 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -26,6 +26,8 @@ BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' BIN_PKG='/usr/sbin/pkg' CMD_PKG=' audit -q -F' +BIN_APK='/sbin/apk' +CMD_APK=' version' ################################################################ # Don't change anything unless you know what are you doing # @@ -78,6 +80,14 @@ elif command -v pkg &>/dev/null ; then else echo "0"; fi +elif command -v apk &>/dev/null ; then + # Alpine + UPDATES=`$BIN_APK $CMD_APK | $BIN_WC $CMD_WC` + if [ $UPDATES -ge 2 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi else echo "0"; fi From 144dff530bee2f70e8b2735c1503455e1983040a Mon Sep 17 00:00:00 2001 From: Laurent Cheylus Date: Sat, 10 Oct 2020 12:49:14 +0200 Subject: [PATCH 151/332] Fix distro for FreeBSD/FreeNAS (#335) Check if /etc/version file present for FreeBSD OS Without fix on FreeBSD: ./distro cat: /etc/version: No such file or directory FreeBSD 12.1-RELEASE-p5 amd64 GENERIC --- snmp/distro | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index ee29dc678..f08606e7e 100755 --- a/snmp/distro +++ b/snmp/distro @@ -139,9 +139,11 @@ elif [ "${OS}" = "Darwin" ] ; then fi elif [ "${OS}" = "FreeBSD" ] ; then - DIST=$(cat /etc/version | cut -d'-' -f 1) - if [ "${DIST}" = "FreeNAS" ]; then - OSSTR=`cat /etc/version | cut -d' ' -f 1` + if [ -f /etc/version ] ; then + DIST=$(cat /etc/version | cut -d'-' -f 1) + if [ "${DIST}" = "FreeNAS" ]; then + OSSTR=`cat /etc/version | cut -d' ' -f 1` + fi else OSSTR=`/usr/bin/uname -mior` fi From 53584838b856df0603b4ad3c8a903959a6e70080 Mon Sep 17 00:00:00 2001 From: Roman Kuzmitskii Date: Sat, 10 Oct 2020 16:51:24 +0600 Subject: [PATCH 152/332] stop using bash for distro script. bash is too big for embedded (#334) --- snmp/distro | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index f08606e7e..9e8768cd5 100755 --- a/snmp/distro +++ b/snmp/distro @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh # Detects which OS and if it is Linux then it will detect which Linux Distribution. OS=`uname -s` @@ -19,7 +19,7 @@ elif [ "${OS}" = "Linux" ] ; then if [ -f /etc/fedora-release ]; then DIST=$(cat /etc/fedora-release | awk '{print $1}') REV=`cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//` - + elif [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then @@ -113,7 +113,7 @@ elif [ "${OS}" = "Linux" ] ; then # try standardized os version methods if [ -f /etc/os-release -a "${IGNORE_OS_RELEASE}" != 1 ] ; then - source /etc/os-release + . /etc/os-release STD_DIST="$NAME" STD_REV="$VERSION_ID" elif [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then From 9be1b41708c90f57d281abbc1e0cd314e4553c8f Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 22 Oct 2020 02:40:18 +0200 Subject: [PATCH 153/332] Delete snmpd.conf.example Out of date, let's use the one in the main repo instead --- snmp/snmpd.conf.example | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 snmp/snmpd.conf.example diff --git a/snmp/snmpd.conf.example b/snmp/snmpd.conf.example deleted file mode 100644 index a10ffcd64..000000000 --- a/snmp/snmpd.conf.example +++ /dev/null @@ -1,13 +0,0 @@ -# Change RANDOMSTRINGGOESHERE to your preferred SNMP community string -com2sec readonly default RANDOMSTRINGGOESHERE - -group MyROGroup v2c readonly -view all included .1 80 -access MyROGroup "" any noauth exact all none none - -syslocation Rack, Room, Building, City, Country [GPSX,Y] -syscontact Your Name - -#Distro Detection -extend .1.3.6.1.4.1.2021.7890.1 distro /usr/bin/distro - From c77f320b9709eb4628d96163ab2ae7e8211d75c9 Mon Sep 17 00:00:00 2001 From: Kevin Zink Date: Wed, 16 Dec 2020 01:05:49 +0100 Subject: [PATCH 154/332] Bugfix (#340) Fix syntax errors --- snmp/freeradius.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index 11d343585..dac7e9980 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -10,9 +10,9 @@ RADIUS_SERVER='localhost' RADIUS_PORT='18121' RADIUS_KEY='adminsecret' -if [ -f $CONFIGFILE ]; do +if [ -f $CONFIGFILE ]; then . $CONFIGFILE -done +fi # Default radclient access request, shouldn't need to be changed RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' From 8e2013cc2d3382b385e1f09339ba00f886d389f2 Mon Sep 17 00:00:00 2001 From: mkninc Date: Sat, 2 Jan 2021 02:59:44 +0100 Subject: [PATCH 155/332] Add TrueNAS 12 compatibility (#342) * Configurable paths for zpool/sysctl * Allow non int values * Ignore empty lines from sysctl output --- snmp/zfs-freebsd.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index 2227598df..d32e959a1 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -5,6 +5,9 @@ import json import subprocess +SYSCTL = '/sbin/sysctl' +ZPOOL = '/usr/local/sbin/zpool' + def percent(numerator, denominator, default=0): try: return numerator / denominator * 100 @@ -12,15 +15,19 @@ def percent(numerator, denominator, default=0): return default def main(args): - p = subprocess.run(['/sbin/sysctl', '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) - + p = subprocess.run([SYSCTL, '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) + if p.returncode != 0: return p.returncode def chomp(line): bits = [b.strip() for b in line.split(':')] - return bits[0], int(bits[1]) - stats = dict(chomp(l) for l in p.stdout.splitlines()) + try: + return bits[0], int(bits[1]) + except ValueError: + return bits[0], bits[1] + + stats = dict(chomp(l) for l in p.stdout.splitlines() if l) if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats: stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0 @@ -92,7 +99,7 @@ def chomp(line): output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses']) # pools - p = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) + p = subprocess.run([ZPOOL, 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) if p.returncode != 0: return p.returncode output['pools'] = [] From 8f80309e76daa2b0d625b86d7e6ffb6bb2751bb1 Mon Sep 17 00:00:00 2001 From: SharadKumar97 Date: Fri, 1 Jan 2021 20:00:49 -0600 Subject: [PATCH 156/332] Adding opensips3.X + version support (#338) --- snmp/opensip3-stats.sh | 19 +++++++++++++++++++ snmp/opensips-stats.sh | 1 + 2 files changed, 20 insertions(+) create mode 100644 snmp/opensip3-stats.sh diff --git a/snmp/opensip3-stats.sh b/snmp/opensip3-stats.sh new file mode 100644 index 000000000..fa85e023b --- /dev/null +++ b/snmp/opensip3-stats.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Author: Sharad Kumar +# This script is for OpenSIPS 3.X + version + +total_memory=$(opensips-cli -x mi get_statistics total_size | awk '/shmem:total_size/ { gsub(/[",]/,""); print "Total Memory=" $2}') +used_memory=$(opensips-cli -x mi get_statistics real_used_size | awk '/shmem:real_used_size/ { gsub(/[",]/,""); print "Used Memory=" $2}') +free_memory=$(opensips-cli -x mi get_statistics free_size | awk '/shmem:free_size/ { gsub(/[",]/,""); print "Free Memory=" $2}') +load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Average=" sum}') +total_files=$(lsof -c opensips | wc -l) + + +echo $total_memory +echo $used_memory +echo $free_memory +echo $load_average +echo "Open files="$total_files + +exit + diff --git a/snmp/opensips-stats.sh b/snmp/opensips-stats.sh index e8fe2b249..7127ec5ea 100644 --- a/snmp/opensips-stats.sh +++ b/snmp/opensips-stats.sh @@ -1,5 +1,6 @@ #!/bin/bash # Author: Sharad Kumar +# This script is for OpenSIPS 2.X + version total_memory=$(opensipsctl fifo get_statistics total_size | awk '{print "Total Memory=" $2}') used_memory=$(opensipsctl fifo get_statistics real_used_size | awk '{print "Used Memory=" $2}') From c2bf5f88886fcc9c0bc1abca510b19386139237b Mon Sep 17 00:00:00 2001 From: FingerlessGloves Date: Sat, 2 Jan 2021 02:03:51 +0000 Subject: [PATCH 157/332] Show version down to . release for any Debian (#337) Show version down to . release for any Debian based distro's Proxmox and Debian 10.6 tested. --- snmp/distro | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index 9e8768cd5..69cd452a9 100755 --- a/snmp/distro +++ b/snmp/distro @@ -54,6 +54,7 @@ elif [ "${OS}" = "Linux" ] ; then elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" + IGNORE_OS_RELEASE=1 if [ -f /usr/bin/lsb_release ] ; then ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` fi @@ -62,16 +63,13 @@ elif [ "${OS}" = "Linux" ] ; then fi if [ -f /usr/bin/pveversion ]; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" - IGNORE_OS_RELEASE=1 fi if [ -f /usr/bin/pmgversion ]; then # pmgversion requires root permissions to run, please add NOPASSWD setting to visudo. DIST="${DIST}/PMG `sudo /usr/bin/pmgversion | cut -d '/' -f 2`" - IGNORE_OS_RELEASE=1 fi if [ -f /etc/dogtag ]; then DIST=`cat /etc/dogtag` - IGNORE_OS_RELEASE=1 fi elif [ -f /etc/gentoo-release ] ; then From 4c306563941949a8cdcb59cc81fda5442b5a8c7c Mon Sep 17 00:00:00 2001 From: Mike Dixson Date: Sat, 2 Jan 2021 02:07:23 +0000 Subject: [PATCH 158/332] Update check_mpre (#336) --recv-only argument doesn't exist on some well used version of netcat. This method timesout after 1 second of idle time. Only possible downside is that if it takes more than 1 second to initiate the connection it may timeout too. --- agent-local/check_mrpe | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index 6cd3a044c..1b8401f99 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -55,7 +55,7 @@ else SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" fi -for i in `$BIN_NC --recv-only $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<<&1 | $BIN_SED '/^<<>>/,/^<<&1 | $BIN_SED '/^<<>>/,/^<< Date: Mon, 18 Jan 2021 17:54:03 +0000 Subject: [PATCH 159/332] Fix STDERR output corrupting json output in mdadm app (#344) When checking if arrays have slaves, the mdadm script, does an ls/$LS of the device to see if it exists. This $LS throws an error to STDERR if it does not match. This output is caught by snmp and corrupts the json output --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index ecbc8e510..b25629266 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -22,7 +22,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) # ignore arrays with no slaves - if [ -z "$($LS -1 $RAID/slaves)" ] ; then + if [ -z "$($LS -1 $RAID/slaves 2> /dev/null)" ] ; then continue fi # ignore "non existing" arrays From 7a757ba70b6adb0a291f739b03c858d1e74901e0 Mon Sep 17 00:00:00 2001 From: Russell Morris Date: Fri, 29 Jan 2021 07:31:05 -0600 Subject: [PATCH 160/332] Add snmp script for BeableBoard Temperature Sensors (#330) --- snmp/beagleboard.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100755 snmp/beagleboard.sh diff --git a/snmp/beagleboard.sh b/snmp/beagleboard.sh new file mode 100755 index 000000000..a73376d95 --- /dev/null +++ b/snmp/beagleboard.sh @@ -0,0 +1,2 @@ +#!/bin/sh +cat /sys/devices/virtual/thermal/thermal_zone*/temp From ff8eba473e76d1bda98344595dc79ee2a3d6d5df Mon Sep 17 00:00:00 2001 From: Kanok Chantrasmi Date: Sat, 6 Mar 2021 07:01:57 +0700 Subject: [PATCH 161/332] gpsd python error (#352) gpsd script occasionally results in python error. cause: the expected info from GPS unit on each update has more than 10 lines, therefore, python didn't find the expected wording and result in a python error correction: increase the line from gpspipe from 10 to 20 lines --- snmp/gpsd | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/gpsd b/snmp/gpsd index 48f1be4ad..70fe924c9 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -26,7 +26,7 @@ TMPFILE=$(mktemp) trap "rm -f $TMPFILE" 0 2 3 15 # Write GPSPIPE Data to Temp File -$BIN_GPIPE -w -n 10 > $TMPFILE +$BIN_GPIPE -w -n 20 > $TMPFILE # Parse Temp file for GPSD Data VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'` @@ -42,4 +42,4 @@ SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json; # Output info for SNMP Extend echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}' -rm $TMPFILE \ No newline at end of file +rm $TMPFILE From f505b1308363759cc54b3bc5828660f1e340c8ab Mon Sep 17 00:00:00 2001 From: Chewie Date: Sat, 6 Mar 2021 20:38:46 +0000 Subject: [PATCH 162/332] add DHCP stats to PiHole (#351) * add DHCP stats to PiHole * Update pi-hole Removed need for `calc` thanks to @jellyfrog 's suggestion --- snmp/pi-hole | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/snmp/pi-hole b/snmp/pi-hole index fc5f52d77..f5cc5c9ab 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -8,11 +8,17 @@ API_AUTH_KEY="" API_URL="localhost/admin/api.php" URL_READ_ONLY="?summaryRaw" URL_QUERY_TYPE="?getQueryTypes&auth=" +PICONFIGFILE='/etc/pihole/setupVars.conf' +DHCPLEASEFILE='/etc/pihole/dhcp.leases' if [ -f $CONFIGFILE ]; then . $CONFIGFILE fi +# read in pi-hole variables for DHCP range +if [ -f $PICONFIGFILE ]; then + . $PICONFIGFILE +fi #/ Description: BASH script to get Pi-hole stats #/ Examples: ./pi-hole-stats.sh @@ -66,6 +72,16 @@ debug() { else echo '[ok] URL_QUERY_TYPE not set' fi + if [ -f $PICONFIGFILE ]; then + echo '[ok] Pi-Hole config file exists, DHCP stats will be captured if scope active' + else + echo '[error] Pi-Hole config file does not exist, DHCP stats will not be captured if used' + fi + if [ -f $DHCPLEASEFILE ]; then + echo '[ok] DHCP lease file exists, DHCP stats will be captured if scope active' + else + echo '[error] DHCP lease file does not exist, DHCP stats will not be captured if used' + fi } exportdata() { @@ -76,6 +92,23 @@ exportdata() { # A / AAAA / PTR / SRV GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') echo $GET_QUERY_TYPE | tr " " "\n" + + # Find number of DHCP address in scope and current lease count + # case-insensitive compare, just in case :) + if [ "${DHCP_ACTIVE,,}" = "true" ]; then + # Max IP addresses in scope + # Convert IPs to decimal and subtract + IFS="." read -r -a array <<< $DHCP_START + DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) + IFS="." read -r -a array <<< $DHCP_END + DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) + expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL + # Current lease count + cat $DHCPLEASEFILE | wc -l + else + echo 0 + echo 0 + fi } if [ -z $* ]; then From ca16f20185ae58b01d75395686b71b79918963ec Mon Sep 17 00:00:00 2001 From: 0xbad0c0d3 <0xbad0c0d3@gmail.com> Date: Sun, 7 Mar 2021 21:26:47 +0200 Subject: [PATCH 163/332] docker stats script (#307) * docker stats script * Update docker-stats.sh Co-authored-by: Jellyfrog --- snmp/docker-stats.sh | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 snmp/docker-stats.sh diff --git a/snmp/docker-stats.sh b/snmp/docker-stats.sh new file mode 100644 index 000000000..7ac7473f2 --- /dev/null +++ b/snmp/docker-stats.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +VERSION=1 + +function dockerStatsFormat() { + cat <&1) +ERROR=$? +if [ $ERROR -ne 0 ];then + ERROR_STRING=${STATS} + unset STATS +fi +jq -nMc \ + --slurpfile stats <(echo "${STATS:-}") \ + --arg version "${VERSION:-1}" \ + --arg error "${ERROR:-0}" \ + --arg errorString "${ERROR_STRING:-}" \ + '{"version": $version, "data": $stats, "error": $error, "errorString": $errorString }' + +# vim: tabstop=2:shiftwidth=2:expandtab: From 5aa62834cbca260cb754ab3a0623acce733aeca5 Mon Sep 17 00:00:00 2001 From: yrebrac Date: Mon, 8 Mar 2021 06:27:44 +1100 Subject: [PATCH 164/332] Add powermon app script (#348) * added snmp/powermon-snmp.py * powermon script v1.3 * powermon script v1.3a * powermon-snmp.py v1.4 --- snmp/powermon-snmp.py | 362 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 362 insertions(+) create mode 100755 snmp/powermon-snmp.py diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py new file mode 100755 index 000000000..2b3ed74b6 --- /dev/null +++ b/snmp/powermon-snmp.py @@ -0,0 +1,362 @@ +#!/usr/bin/python3 +# +# Copyright(C) 2021 Ben Carbery yrebrac@upaya.net.au +# +# LICENSE - GPLv3 +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# version 3. See https://www.gnu.org/licenses/gpl-3.0.txt +# +# DESCRIPTION +# +# The script attempts to determine the current power consumption of the host via +# one or more methods. The scripts should make it easier to add your own methods +# if no included one is suitable for your host machine. +# +# The script should be called by the snmpd daemon on the host machine. This is +# achieved via the 'extend' functionality in snmpd. For example, in +# /etc/snmp/snmpd.conf: +# extend powermon /usr/local/bin/powermon-snmp.py +# +# CUSTOMISING RESULTS +# +# The results can be accessed via the nsExtend MIBs from another host, e.g. +# snmpwalk -v 2c -c \ +# +# +# The results are returned in a JSON format suitable for graphing in LibreNMS. +# A LibreNMS 'application' is available for this purpose. +# +# The application expects to see a single top-level reading in the results in +# terms of Watts. This can be derived from a reading from one of the sub- +# components, currently the ACPI 'meter' or 'psus'. But you must tell the script +# which is the top-level or final reading you want to use in the results. This +# allows you to sum results from dual PSUs or apply your own power factor for +# example. To achieve this see the definition of 'data["reading"]' at the end +# of the script, and modify as required. Two examples are provided. +# +# If you want to track your electricity cost you should also update the cost +# per kWh value below. When you cost changes you can update the value. The +# supply rate will be returned in the results +# +# COMPATIBILITY +# +# - Linux, not tested on other OS +# - Tested on python 3.6, 3.8 +# +# INSTALLATION +# +# - Sensors method: pip install PySensors +# - hpasmcli method: install hp-health package for your distribution +# - Copy this script somewhere, e.g. /usr/local/bin +# - Uncomment costPerkWh and change the value +# - Test then customise top-level reading +# - Add the 'extend' config to snmpd.conf +# - https://docs.librenms.org/Extensions/Applications/#powermon +# +# CHANGELOG +# +# 20210130 - v1.0 - initial, implemented PySensors method +# 20210131 - v1.1 - implemented hpasmcli method +# 20210204 - v1.2 - added top-level reading, librenms option +# 20210205 - v1.3 - added cents per kWh +# 20210205 - v1.4 - improvement to UI + +version = 1.4 + +### Libraries + +import os +import sys +import getopt +import json +import re +import shutil +import subprocess + +### Option defaults + +method = "" # must be one of methods array +verbose = False +warnings = False +librenms = True # Return results in a JSON format suitable for Librenms + # Set to false to return JSON data only +pretty = False # Pretty printing + +### Globals + +error = 0 +errorString = "" +data = {} +result = {} +usage = "USAGE: " + os.path.basename(__file__) + " [-h|--help] |" \ + + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" \ + + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" +methods = ["sensors", "hpasmcli"] +#costPerkWh = 0.15 # <<<< UNCOMMENT + +### General functions + +def errorMsg(message): + sys.stderr.write("ERROR: " + message + "\n") + +def usageError(message="Invalid argument"): + errorMsg(message) + sys.stderr.write(usage + "\n") + sys.exit(1) + +def warningMsg(message): + if verbose or warnings: + sys.stderr.write("WARN: " + message + "\n") + +def verboseMsg(message): + if verbose: + sys.stderr.write("INFO: " + message + "\n") + +def listMethods(): + global verbose + verbose = True + verboseMsg("Available methods are: " + str(methods).strip('[]')) + +### Data functions + +def getData(method): + if method == "sensors": + data = getSensorData() + + elif method == "hpasmcli": + data = getHPASMData() + else: + usageError("You must specify a method.") + + return data + +def getSensorData(): + global error, errorString + error = 2 + errorString = "No power sensor found" + + try: + import sensors + sensors.init() + + except ModuleNotFoundError as e: + errorMsg(str(e)) + verboseMsg("Try 'pip install PySensors'") + sys.exit(1) + + except FileNotFoundError as e: + errorMsg("Module 'sensors' appears to be missing a dependancy: " + str(e)) + verboseMsg("Try 'dnf install lm_sensors'") + sys.exit(1) + + except: + e = sys.exc_info() + errorMsg("Module sensors is installed but failed to initialise: " + str(e)) + sys.exit(1) + + sdata = {} + sdata["meter"] = {} + sdata["psu"] = {} + + re_meter = "^power_meter" + + power_chips = [] + try: + for chip in sensors.iter_detected_chips(): + chip_name = str(chip) + verboseMsg("Found chip: " + chip_name) + + if re.search(re_meter, chip_name): + verboseMsg("Found power meter: " + chip_name) + error = 0 + errorString = "" + + junk, meter_id = chip_name.split('acpi-', 1) + sdata["meter"][meter_id] = {} + + for feature in chip: + feature_label = str(feature.label) + verboseMsg("Found feature: " + feature_label) + + if re.search("^power", feature_label): + sdata["meter"][meter_id]["reading"] = feature.get_value() + + if feature.get_value() == 0: + # warning as downstream may try to divide by 0 + warningMsg("Sensors returned a zero value") + + else: + # store anything else in case label is something unexpected + sdata[chip_name][feature_label] = feature.get_value() + + except: + es = sys.exc_info() + error = 1 + errorString = "Unable to get data: General exception: " + str(es) + + finally: + sensors.cleanup() + return sdata + +def getHPASMData(): + global error, errorString + + exe = shutil.which('hpasmcli') + #if not os.access(candidate, os.W_OK): + cmd = [exe, '-s', 'show powermeter; show powersupply'] + warningMsg("hpasmcli only runs as root") + + try: + output = subprocess.run(cmd, capture_output=True, check=True, text=True, timeout=2) + + except subprocess.CalledProcessError as e: + errorMsg(str(e) + ": " + str(e.stdout).strip('\n')) + sys.exit(1) + + rawdata = str(output.stdout).replace('\t', ' ').replace('\n ', '\n').split('\n') + + hdata = {} + hdata["meter"] = {} + hdata["psu"] = {} + + re_meter = "^Power Meter #([0-9]+)" + re_meter_reading = "^Power Reading :" + re_psu = "^Power supply #[0-9]+" + re_psu_present = "^Present :" + re_psu_redundant = "^Redundant:" + re_psu_condition = "^Condition:" + re_psu_hotplug = "^Hotplug :" + re_psu_reading = "^Power :" + + for line in rawdata: + if re.match(re_meter, line): + verboseMsg("found power meter: " + line) + junk, meter_id = line.split('#', 1) + hdata["meter"][meter_id] = {} + + elif re.match(re_meter_reading, line): + verboseMsg("found power meter reading: " + line) + junk, meter_reading = line.split(':', 1) + hdata["meter"][meter_id]["reading"] = meter_reading.strip() + + elif re.match(re_psu, line): + verboseMsg("found power supply: " + line) + junk, psu_id = line.split('#', 1) + hdata["psu"][psu_id] = {} + + elif re.match(re_psu_present, line): + verboseMsg("found power supply present: " + line) + junk, psu_present = line.split(':', 1) + hdata["psu"][psu_id]["present"] = psu_present.strip() + + elif re.match(re_psu_redundant, line): + verboseMsg("found power supply redundant: " + line) + junk, psu_redundant = line.split(':', 1) + hdata["psu"][psu_id]["redundant"] = psu_redundant.strip() + + elif re.match(re_psu_condition, line): + verboseMsg("found power supply condition: " + line) + junk, psu_condition = line.split(':', 1) + hdata["psu"][psu_id]["condition"] = psu_condition.strip() + + elif re.match(re_psu_hotplug, line): + verboseMsg("found power supply hotplug: " + line) + junk, psu_hotplug = line.split(':', 1) + hdata["psu"][psu_id]["hotplug"] = psu_hotplug.strip() + + elif re.match(re_psu_reading, line): + verboseMsg("found power supply reading: " + line) + junk, psu_reading = line.split(':', 1) + hdata["psu"][psu_id]["reading"] = psu_reading.replace('Watts', '').strip() + + return hdata + +# Argument Parsing +try: + opts, args = getopt.gnu_getopt( + sys.argv[1:], 'm:hlNpvw', ['method', 'help', 'list-methods', 'no-librenms', 'pretty', 'verbose', 'warnings'] + ) + if len(args) != 0: + usageError("Unknown argument") + +except getopt.GetoptError as e: + usageError(str(e)) + +for opt, val in opts: + if opt in ["-h", "--help"]: + print(usage) + sys.exit(0) + + elif opt in ["-l", "--list-methods"]: + listMethods() + sys.exit(0) + + elif opt in ["-m", "--method"]: + if val not in methods: + usageError("Invalid method: '" + val + "'") + else: + method = val + + elif opt in ["-N", "--no-librenms"]: + librenms = False + + elif opt in ["-p", "--pretty"]: + pretty = True + + elif opt in ["-v", "--verbose"]: + verbose = True + + elif opt in ["-w", "--warnings"]: + warnings = True + + else: + continue + +# Electricity Cost +try: + costPerkWh + +except NameError: + errorMsg("cost per kWh is undefined (uncomment in script)") + sys.exit(1) + +# Get data +data = getData(method) +data["supply"] = {} +data["supply"]["rate"] = costPerkWh + +# Top-level reading +# CUSTOMISE THIS FOR YOUR HOST +# i.e. by running with -p -n -m and see what you get and then updating where +# in the JSON data the top-level reading is sourced from +try: + # Example 1 - take reading from ACPI meter id 1 + data["reading"] = data["meter"]["1"]["reading"] + + # Example 2 - sum the two power supplies and apply a power factor + #pf = 0.95 + #data["reading"] = str( float(data["psu"]["1"]["reading"]) \ + # + float(data["psu"]["2"]["reading"]) / pf ) + +except: + data["reading"] = 0.0 + +# Build result +if librenms: + result['version']=version + result['error']=error + result['errorString']=errorString + result['data']=data + +else: + result=data + +# Print result +if pretty: + print(json.dumps(result, indent=2)) + +else: + print(json.dumps(result)) + From 61064dc9febc6fc0b8c80ff5ff2041aba247356a Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 18 Mar 2021 12:24:30 +0100 Subject: [PATCH 165/332] Cleanup some code (#355) * Format with isort * Format with Black * Fix CRLF * Format with shellcheck * Fix some warning * Fix PHP style * Dont modifiy check_mk files * Fixes --- agent-local/ceph | 109 ++++++++---- agent-local/check_mrpe | 6 +- agent-local/freeswitch | 8 +- agent-local/hddtemp | 14 +- agent-local/nginx | 32 ++-- agent-local/nginx-python3.py | 8 +- agent-local/powerdns | 41 +++-- agent-local/powerdns-recursor | 11 +- agent-local/rocks.sh | 8 +- agent-local/rrdcached | 28 +-- agent-local/unbound.sh | 2 +- mk_enplug | 4 +- snmp/Openwrt/wlClients.sh | 10 +- snmp/Openwrt/wlFrequency.sh | 6 +- snmp/Openwrt/wlNoiseFloor.sh | 6 +- snmp/Openwrt/wlRate.sh | 14 +- snmp/Openwrt/wlSNR.sh | 14 +- snmp/apache-stats.py | 44 ++--- snmp/apache-stats.sh | 22 +-- snmp/backupninja.py | 44 +++-- snmp/certificate.py | 61 ++++--- snmp/chip.sh | 54 +++--- snmp/dhcp.py | 149 ++++++++-------- snmp/exim-stats.sh | 14 +- snmp/freeradius.sh | 84 ++++----- snmp/gpsd | 90 +++++----- snmp/icecast-stats.sh | 8 +- snmp/mailcow-dockerized-postfix | 42 +++-- snmp/mailscanner.php | 88 +++++----- snmp/mdadm | 38 ++-- snmp/mysql-stats | 288 +++++++++++++++++++----------- snmp/nginx | 8 +- snmp/nginx-python2 | 37 ++-- snmp/ntp-client | 18 +- snmp/ntp-server.sh | 96 +++++----- snmp/nvidia | 6 +- snmp/opensip3-stats.sh | 10 +- snmp/opensips-stats.sh | 10 +- snmp/osupdate | 32 ++-- snmp/phpfpmsp | 26 +-- snmp/pi-hole | 20 +-- snmp/postfix-queues | 2 +- snmp/powerdns-dnsdist | 124 ++++++------- snmp/powerdns-recursor | 9 +- snmp/powerdns.py | 20 ++- snmp/powermon-snmp.py | 118 ++++++++----- snmp/puppet_agent.py | 58 ++++--- snmp/pureftpd.py | 63 ++++--- snmp/raspberry.sh | 48 ++--- snmp/redis.py | 36 ++-- snmp/sdfsinfo | 8 +- snmp/seafile.py | 132 +++++++------- snmp/shoutcast.php | 188 ++++++++++---------- snmp/ups-apcups.sh | 6 +- snmp/ups-nut.sh | 8 +- snmp/voipmon-stats.sh | 8 +- snmp/zfs-freebsd.py | 297 ++++++++++++++++++------------- snmp/zfs-linux | 299 ++++++++++++++++++-------------- 58 files changed, 1682 insertions(+), 1352 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 1301f79ec..1493fa155 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -14,80 +14,117 @@ # # See http://www.gnu.org/licenses/gpl.txt for the full license -from subprocess import check_output import json +from subprocess import check_output + def cephversion(): - cephv = check_output(["/usr/bin/ceph", "version"]).decode("utf-8").replace('ceph version ', '') - major, minor = cephv.split('.')[0:2] + cephv = ( + check_output(["/usr/bin/ceph", "version"]) + .decode("utf-8") + .replace("ceph version ", "") + ) + major, minor = cephv.split(".")[0:2] return [int(major), int(minor)] + def cephdf(): - cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).decode("utf-8").replace('-inf', '0') + cephdf = ( + check_output(["/usr/bin/ceph", "-f", "json", "df"]) + .decode("utf-8") + .replace("-inf", "0") + ) s = json.loads(cephdf) try: - ts = s['stats']['total_bytes'] - except: - ts = s['stats']['total_space'] + ts = s["stats"]["total_bytes"] + except KeyError: + ts = s["stats"]["total_space"] try: - tu = s['stats']['total_used_bytes'] - except: - tu = s['stats']['total_used'] + tu = s["stats"]["total_used_bytes"] + except KeyError: + tu = s["stats"]["total_used"] try: - ta = s['stats']['total_avail_bytes'] - except: - ta = s['stats']['total_avail'] + ta = s["stats"]["total_avail_bytes"] + except KeyError: + ta = s["stats"]["total_avail"] print("c:%i:%i:%i" % (ts, tu, ta)) - for p in s['pools']: - b = p['stats']['bytes_used'] - a = p['stats']['max_avail'] - o = p['stats']['objects'] - print("%s:%i:%i:%i" % (p['name'], a, b, o)) + for p in s["pools"]: + b = p["stats"]["bytes_used"] + a = p["stats"]["max_avail"] + o = p["stats"]["objects"] + print("%s:%i:%i:%i" % (p["name"], a, b, o)) def osdperf(): global major - osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).decode("utf-8").replace('-inf', '0') + osdperf = ( + check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]) + .decode("utf-8") + .replace("-inf", "0") + ) if major > 13: - for o in json.loads(osdperf)['osdstats']['osd_perf_infos']: - print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + for o in json.loads(osdperf)["osdstats"]["osd_perf_infos"]: + print( + "osd.%s:%i:%i" + % ( + o["id"], + o["perf_stats"]["apply_latency_ms"], + o["perf_stats"]["commit_latency_ms"], + ) + ) else: - for o in json.loads(osdperf)['osd_perf_infos']: - print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + for o in json.loads(osdperf)["osd_perf_infos"]: + print( + "osd.%s:%i:%i" + % ( + o["id"], + o["perf_stats"]["apply_latency_ms"], + o["perf_stats"]["commit_latency_ms"], + ) + ) + def poolstats(): global major - poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).decode("utf-8").replace('-inf', '0') + poolstats = ( + check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]) + .decode("utf-8") + .replace("-inf", "0") + ) for p in json.loads(poolstats): try: - r = p['client_io_rate']['read_bytes_sec'] - except: + r = p["client_io_rate"]["read_bytes_sec"] + except KeyError: r = 0 try: - w = p['client_io_rate']['write_bytes_sec'] - except: + w = p["client_io_rate"]["write_bytes_sec"] + except KeyError: w = 0 try: if major > 11: - o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec'] + o = ( + p["client_io_rate"]["read_op_per_sec"] + + p["client_io_rate"]["write_op_per_sec"] + ) else: - o = p['client_io_rate']['op_per_sec'] - except: + o = p["client_io_rate"]["op_per_sec"] + except KeyError: o = 0 - print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) + print("%s:%i:%i:%i" % (p["pool_name"], o, w, r)) + major, minor = cephversion() -print ("<<>>") -print ("") +print("<<>>") +print("") poolstats() -print ("") +print("") osdperf() -print ("") +print("") cephdf() diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index 1b8401f99..d21b6d9a1 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -55,11 +55,11 @@ else SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" fi -for i in `$BIN_NC -w 1 $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<<&1 | $BIN_SED '/^<<>>/,/^<</dev/null 2>&1; then - disks=`lsblk -dnp|cut -d' ' -f1 | tr '\n' ' '` + disks=$(lsblk -dnp|cut -d' ' -f1 | tr '\n' ' ') else - disks=`find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' '` + disks=$(find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' ') fi -hddtemp=`which hddtemp 2>/dev/null` +hddtemp=$(which hddtemp 2>/dev/null) if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then if type parallel > /dev/null 2>&1; then # When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!) - output=`parallel ${hddtemp} -w -q ::: ${disks} 2>/dev/null` + output=$(parallel "${hddtemp}" -w -q ::: "${disks}" 2>/dev/null) else - output=`${hddtemp} -w -q ${disks} 2>/dev/null` + output=$(${hddtemp} -w -q "${disks}" 2>/dev/null) fi - content=`echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` + content=$(echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176') if [ "${content}" != "" ]; then echo '<<>>' - echo ${content} + echo "${content}" echo else echo "no hddtemp compatible disks found" >&2 diff --git a/agent-local/nginx b/agent-local/nginx index d6319f1b2..c1d5fd18d 100755 --- a/agent-local/nginx +++ b/agent-local/nginx @@ -1,9 +1,9 @@ #!/usr/bin/env python -import urllib2 import re +import urllib2 -data = urllib2.urlopen('http://127.0.0.1/nginx-status').read() +data = urllib2.urlopen("http://127.0.0.1/nginx-status").read() params = {} @@ -11,28 +11,24 @@ for line in data.split("\n"): smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) elif req: - params["Requests"] = req.group(3) + params["Requests"] = req.group(3) else: - pass + pass -dataorder = [ - "Active", - "Reading", - "Writing", - "Waiting", - "Requests" - ] +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] -print "<<>>\n"; +print "<<>>\n" for param in dataorder: if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) + print Active else: - print params[param] + print params[param] diff --git a/agent-local/nginx-python3.py b/agent-local/nginx-python3.py index 2464f89d1..fd710ba8c 100755 --- a/agent-local/nginx-python3.py +++ b/agent-local/nginx-python3.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -from urllib.request import urlopen import re +from urllib.request import urlopen -data = urlopen('http://127.0.0.1/nginx-status').read() +data = urlopen("http://127.0.0.1/nginx-status").read() params = {} @@ -24,7 +24,9 @@ for param in dataorder: if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) print(Active) else: print(params[param]) diff --git a/agent-local/powerdns b/agent-local/powerdns index d4fc6bb30..749633006 100755 --- a/agent-local/powerdns +++ b/agent-local/powerdns @@ -1,25 +1,40 @@ #!/usr/bin/env python3 -from subprocess import Popen, PIPE +from subprocess import PIPE, Popen -kvars = [ - 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', - 'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', - 'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', - 'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', - 'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', - 'udp4-queries', 'udp6-answers', 'udp6-queries' +kvars = [ + "corrupt-packets", + "deferred-cache-inserts", + "deferred-cache-lookup", + "latency", + "packetcache-hit", + "packetcache-miss", + "packetcache-size", + "qsize-q", + "query-cache-hit", + "query-cache-miss", + "recursing-answers", + "recursing-questions", + "servfail-packets", + "tcp-answers", + "tcp-queries", + "timedout-packets", + "udp-answers", + "udp-queries", + "udp4-answers", + "udp4-queries", + "udp6-answers", + "udp6-queries", ] rvars = {} -cmd = ['pdns_control', 'show', '*'] +cmd = ["pdns_control", "show", "*"] -for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): - v = l.split('=') +for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(","): + v = l.split("=") if len(v) > 1: - rvars[v[0]] = v[1] + rvars[v[0]] = v[1] print("<<>>") for k in kvars: print(rvars[k]) - diff --git a/agent-local/powerdns-recursor b/agent-local/powerdns-recursor index 6949c7f44..ed94d542c 100755 --- a/agent-local/powerdns-recursor +++ b/agent-local/powerdns-recursor @@ -1,13 +1,14 @@ #!/usr/bin/env python3 -import json, subprocess -from subprocess import Popen, PIPE +import json +import subprocess +from subprocess import PIPE, Popen -input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0] +input = Popen(["rec_control", "get-all"], stdout=PIPE).communicate()[0] data = [] for line in input.splitlines(): item = line.split() - data.append({'name': item[0].decode(), 'value': int(item[1].decode())}) + data.append({"name": item[0].decode(), "value": int(item[1].decode())}) -print('<<>>') +print("<<>>") print(json.dumps(data)) diff --git a/agent-local/rocks.sh b/agent-local/rocks.sh index f3547adaa..9ff1ec35e 100755 --- a/agent-local/rocks.sh +++ b/agent-local/rocks.sh @@ -15,8 +15,8 @@ PENDING_JOBS=$($QSTAT -u "*" -s p | wc -l) SUSPEND_JOBS=$($QSTAT -u "*" -s s | wc -l) ZOMBIE_JOBS=$($QSTAT -u "*" -s z | wc -l) -echo $RUNNING_JOBS; -echo $PENDING_JOBS; -echo $SUSPEND_JOBS; -echo $ZOMBIE_JOBS; +echo "$RUNNING_JOBS"; +echo "$PENDING_JOBS"; +echo "$SUSPEND_JOBS"; +echo "$ZOMBIE_JOBS"; diff --git a/agent-local/rrdcached b/agent-local/rrdcached index 0fca240fa..33ec66fd7 100755 --- a/agent-local/rrdcached +++ b/agent-local/rrdcached @@ -1,13 +1,13 @@ #!/usr/bin/env python +import os import socket import sys -import os # Unix socket -server_address = '/var/run/rrdcached.sock' +server_address = "/var/run/rrdcached.sock" # TCP socket -#server_address = 'localhost:42217' +# server_address = 'localhost:42217' sock = None try: @@ -15,31 +15,31 @@ try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if ':' in server_address: - split = server_address.rsplit(':', 1) - server_address = (split[0],int(split[1])) + if ":" in server_address: + split = server_address.rsplit(":", 1) + server_address = (split[0], int(split[1])) else: server_address = (server_address, 42217) sock.connect(server_address) except socket.error as e: - sys.stderr.write(str(e) + ': ' + str(server_address) + '\n') + sys.stderr.write(str(e) + ": " + str(server_address) + "\n") sys.exit(1) -buffer = '' +buffer = "" max = -1 try: sock.settimeout(5) - sock.sendall('STATS\n'.encode()) - while max == -1 or len(buffer.split('\n')) < max: + sock.sendall("STATS\n".encode()) + while max == -1 or len(buffer.split("\n")) < max: buffer += sock.recv(1024).decode() if max == -1: # the first line contains the number of following lines - max = int(buffer.split(' ')[0]) + 1 + max = int(buffer.split(" ")[0]) + 1 except socket.error as e: - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") sys.exit(1) sock.close() -print('<<>>') -print(buffer.rstrip('\n')) +print("<<>>") +print(buffer.rstrip("\n")) diff --git a/agent-local/unbound.sh b/agent-local/unbound.sh index d9b378892..9383701e6 100755 --- a/agent-local/unbound.sh +++ b/agent-local/unbound.sh @@ -1,5 +1,5 @@ #!/bin/bash -unboundctl=`which unbound-control` +unboundctl=$(which unbound-control) if [ "$?" != "0" ]; then #Unbound control executable doesn't exist exit diff --git a/mk_enplug b/mk_enplug index 5abecb72f..7ecbd73ec 100755 --- a/mk_enplug +++ b/mk_enplug @@ -56,7 +56,7 @@ if [ ! -z "$s" ]; then exit 1 fi - if [ `script_enabled $s` != "yes" ]; then - enable_script $s + if [ `script_enabled "$s"` != "yes" ]; then + enable_script "$s" fi fi diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index cf6195f62..5becad170 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -12,15 +12,15 @@ if [ $# -gt 1 ]; then fi # Get path to this script -scriptdir=$(dirname $(readlink -f -- $0)) +scriptdir=$(dirname $(readlink -f -- "$0")) # Get hostname, interface list. Set target, which is name returned for interface -hostname=`/bin/uname -n` -if [ $1 ]; then +hostname=$(/bin/uname -n) +if [ "$1" ]; then interfaces=$1 target=$1 else - interfaces=`cat $scriptdir/wlInterfaces.txt | cut -f 1 -d","` + interfaces=$(cat "$scriptdir"/wlInterfaces.txt | cut -f 1 -d",") target=wlan fi @@ -28,7 +28,7 @@ fi count=0 for interface in $interfaces do - new=`/usr/sbin/iw dev $interface station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l` + new=$(/usr/sbin/iw dev "$interface" station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) count=$(( $count + $new )) done diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 119fb54af..83e68b1d1 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -12,8 +12,8 @@ if [ $# -ne 1 ]; then fi # Get hostname, extract frequency -hostname=`/bin/uname -n` -frequency=`/usr/sbin/iw dev $1 info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" "` +hostname=$(/bin/uname -n) +frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") # Return snmp result -/bin/echo $frequency +/bin/echo "$frequency" diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index ab404364d..47d4b4ec2 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -13,8 +13,8 @@ fi # Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -hostname=`/bin/uname -n` -noise=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1` +hostname=$(/bin/uname -n) +noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result -/bin/echo $noise +/bin/echo "$noise" diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index 76ab0c881..08b68b1bd 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -16,17 +16,17 @@ fi # Get hostname, calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -hostname=`/bin/uname -n` -ratelist=`/usr/sbin/iw dev $1 station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" "` +hostname=$(/bin/uname -n) +ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") if [ "$3" == "sum" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') elif [ "$3" == "avg" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}') elif [ "$3" == "min" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}') elif [ "$3" == "max" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}') fi # Return snmp result -echo $result +echo "$result" diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index 337d55979..d19283d82 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -14,17 +14,17 @@ if [ $# -ne 2 ]; then fi # Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest) -hostname=`/bin/uname -n` -snrlist=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1` +hostname=$(/bin/uname -n) +snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) if [ "$2" == "sum" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') elif [ "$2" == "avg" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}') elif [ "$2" == "min" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}') elif [ "$2" == "max" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}') fi # Return snmp result -echo $result +echo "$result" diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 1421c20e3..d55ae8d52 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -22,63 +22,67 @@ import urllib.request cachetime = 30 -cachefile = '/var/cache/librenms/apache-snmp' +cachefile = "/var/cache/librenms/apache-snmp" # Check for a cache file newer than cachetime seconds ago if os.path.isfile(cachefile) and (time.time() - os.stat(cachefile)[8]) < cachetime: # Use cached data - f = open(cachefile, 'r') + f = open(cachefile, "r") data = f.read() f.close() else: # Grab the status URL (fresh data), needs package urllib3 - data = urllib.request.urlopen("http://localhost/server-status?auto").read().decode('UTF-8') + data = ( + urllib.request.urlopen("http://localhost/server-status?auto") + .read() + .decode("UTF-8") + ) # Write file - f = open(cachefile+'.TMP.'+str(os.getpid()), 'w') + f = open(cachefile + ".TMP." + str(os.getpid()), "w") f.write(data) f.close() - os.rename(cachefile+'.TMP.'+str(os.getpid()), cachefile) + os.rename(cachefile + ".TMP." + str(os.getpid()), cachefile) # dice up the data -scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.'] +scoreboardkey = ["_", "S", "R", "W", "K", "D", "C", "L", "G", "I", "."] params = {} for line in data.splitlines(): - fields = line.split(': ') + fields = line.split(": ") if len(fields) <= 1: continue # "localhost" as first line causes out of index error - elif fields[0] == 'Scoreboard': + elif fields[0] == "Scoreboard": # count up the scoreboard into states states = {} for state in scoreboardkey: states[state] = 0 for state in fields[1]: states[state] += 1 - elif fields[0] == 'Total kBytes': + elif fields[0] == "Total kBytes": # turn into base(byte) value - params[fields[0]] = int(fields[1])*1024 + params[fields[0]] = int(fields[1]) * 1024 elif len(fields) > 1: # just store everything else params[fields[0]] = fields[1] # output the data in order(this is because some platforms don't have them all) dataorder = [ - 'Total Accesses', - 'Total kBytes', - 'CPULoad', - 'Uptime', - 'ReqPerSec', - 'BytesPerSec', - 'BytesPerReq', - 'BusyWorkers', - 'IdleWorkers' + "Total Accesses", + "Total kBytes", + "CPULoad", + "Uptime", + "ReqPerSec", + "BytesPerSec", + "BytesPerReq", + "BusyWorkers", + "IdleWorkers", ] for param in dataorder: try: print(params[param]) except KeyError: # not all Apache's have all stats - print('U') + print("U") # print the scoreboard for state in scoreboardkey: diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh index 275d81586..9b677fd3a 100755 --- a/snmp/apache-stats.sh +++ b/snmp/apache-stats.sh @@ -195,17 +195,17 @@ for (( c=0; c<${#Scoreboard}; c++ )); do done # scoreboard output order must be this ... -echo ${Scoreboard_} -echo ${ScoreboardS} -echo ${ScoreboardR} -echo ${ScoreboardW} -echo ${ScoreboardK} -echo ${ScoreboardD} -echo ${ScoreboardC} -echo ${ScoreboardL} -echo ${ScoreboardG} -echo ${ScoreboardI} -echo ${ScoreboardDot} +echo "${Scoreboard_}" +echo "${ScoreboardS}" +echo "${ScoreboardR}" +echo "${ScoreboardW}" +echo "${ScoreboardK}" +echo "${ScoreboardD}" +echo "${ScoreboardC}" +echo "${ScoreboardL}" +echo "${ScoreboardG}" +echo "${ScoreboardI}" +echo "${ScoreboardDot}" # clean up if [ -f ${Tmp_File} ]; then diff --git a/snmp/backupninja.py b/snmp/backupninja.py index ce9408d67..80cf55f7f 100644 --- a/snmp/backupninja.py +++ b/snmp/backupninja.py @@ -1,39 +1,45 @@ #!/usr/bin/env python3 import io -import re -import os import json +import os +import re version = 1 error = 0 -error_string = '' +error_string = "" -logfile = '/var/log/backupninja.log' +logfile = "/var/log/backupninja.log" backupninja_datas = { - 'last_actions': 0, - 'last_fatal': 0, - 'last_error': 0, - 'last_warning': 0} + "last_actions": 0, + "last_fatal": 0, + "last_error": 0, + "last_warning": 0, +} if not os.path.isfile(logfile): - error_string = 'file unavailable' + error_string = "file unavailable" error = 1 break -with io.open(logfile,'r') as f: +with io.open(logfile, "r") as f: for line in reversed(list(f)): - match = re.search('^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$', line) + match = re.search( + "^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$", + line, + ) if match: - backupninja_datas['last_actions'] = int(match.group(2)) - backupninja_datas['last_fatal'] = int(match.group(3)) - backupninja_datas['last_error'] = int(match.group(4)) - backupninja_datas['last_warning'] = int(match.group(5)) + backupninja_datas["last_actions"] = int(match.group(2)) + backupninja_datas["last_fatal"] = int(match.group(3)) + backupninja_datas["last_error"] = int(match.group(4)) + backupninja_datas["last_warning"] = int(match.group(5)) break -output = {'version': version, - 'error': error, - 'errorString': error_string, - 'data': backupninja_datas} +output = { + "version": version, + "error": error, + "errorString": error_string, + "data": backupninja_datas, +} print(json.dumps(output)) diff --git a/snmp/certificate.py b/snmp/certificate.py index d97f66f12..c141afcd1 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -1,12 +1,11 @@ #!/usr/bin/env python3 -import socket -import ssl import datetime import json +import socket +import ssl - -CONFIGFILE='/etc/snmp/certificate.json' +CONFIGFILE = "/etc/snmp/certificate.json" # {"domains": [ # {"fqdn": "www.mydomain.com"}, # {"fqdn": "www2.mydomain.com"} @@ -34,55 +33,61 @@ def get_certificate_data(domain, port=443): # Manage expired certificates except ssl.SSLCertVerificationError as e: # Arbitrary start date - ssl_info['notBefore'] = "Jan 1 00:00:00 2020 GMT" + ssl_info["notBefore"] = "Jan 1 00:00:00 2020 GMT" # End date is now (we don't have the real one but the certificate is expired) one_minute_further = datetime.datetime.now() + datetime.timedelta(minutes=1) - ssl_info['notAfter'] = one_minute_further.strftime('%b %d %H:%M:%S %Y GMT') + ssl_info["notAfter"] = one_minute_further.strftime("%b %d %H:%M:%S %Y GMT") return ssl_info, error_msg output = {} -output['error'] = 0 -output['errorString'] = "" -output['version'] = 1 +output["error"] = 0 +output["errorString"] = "" +output["version"] = 1 -with open(CONFIGFILE, 'r') as json_file: +with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: - output['error'] = 1 - output['errorString'] = "Configfile Error: '%s'" % e + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % e -if not output['error']: +if not output["error"]: output_data_list = [] - for domain in configfile['domains']: + for domain in configfile["domains"]: output_data = {} - if 'port' not in domain.keys(): - domain['port'] = 443 - certificate_data, error_msg = get_certificate_data(domain['fqdn'], domain['port']) + if "port" not in domain.keys(): + domain["port"] = 443 + certificate_data, error_msg = get_certificate_data( + domain["fqdn"], domain["port"] + ) - output_data['cert_name'] = domain['fqdn'] + output_data["cert_name"] = domain["fqdn"] if not error_msg: - ssl_date_format = r'%b %d %H:%M:%S %Y %Z' - validity_end = datetime.datetime.strptime(certificate_data['notAfter'], ssl_date_format) - validity_start = datetime.datetime.strptime(certificate_data['notBefore'], ssl_date_format) + ssl_date_format = r"%b %d %H:%M:%S %Y %Z" + validity_end = datetime.datetime.strptime( + certificate_data["notAfter"], ssl_date_format + ) + validity_start = datetime.datetime.strptime( + certificate_data["notBefore"], ssl_date_format + ) cert_age = datetime.datetime.now() - validity_start cert_still_valid = validity_end - datetime.datetime.now() - output_data['age'] = cert_age.days - output_data['remaining_days'] = cert_still_valid.days + output_data["age"] = cert_age.days + output_data["remaining_days"] = cert_still_valid.days else: - output_data['age'] = None - output_data['remaining_days'] = None - output['error'] = 1 - output['errorString'] = "%s: %s" % (domain['fqdn'], error_msg) + output_data["age"] = None + output_data["remaining_days"] = None + output["error"] = 1 + output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg) output_data_list.append(output_data) - output['data'] = output_data_list + output["data"] = output_data_list print(json.dumps(output)) diff --git a/snmp/chip.sh b/snmp/chip.sh index 07012d906..4dc2fac05 100644 --- a/snmp/chip.sh +++ b/snmp/chip.sh @@ -18,13 +18,13 @@ BAT_D=0 if [ $STATUS_ACIN == 1 ]; then # ACIN voltage - REG=`i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - ACIN=`echo "$REG*0.0017"|bc` + REG=$(i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + ACIN=$(echo "$REG*0.0017"|bc) # ACIN Current - REG=`i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - ACIN_C=`echo "$REG*0.000625"|bc` + REG=$(i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + ACIN_C=$(echo "$REG*0.000625"|bc) else ACIN=0 ACIN_C=0 @@ -32,14 +32,14 @@ fi if [ $STATUS_VBUS == 1 ]; then # VBUS voltage - REG=`i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - VBUS=`echo "$REG*0.0017"|bc` + REG=$(i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + VBUS=$(echo "$REG*0.0017"|bc) # VBUS Current - REG=`i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - VBUS_C=`echo "$REG*0.000375"|bc` + REG=$(i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + VBUS_C=$(echo "$REG*0.000375"|bc) else VBUS=0 VBUS_C=0 @@ -47,24 +47,24 @@ fi if [ $STATUS_BATCON == 1 ]; then # Battery Voltage - REG=`i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - VBAT=`echo "$REG*0.0011"|bc` + REG=$(i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + VBAT=$(echo "$REG*0.0011"|bc) if [ $STATUS_CHG_DIR == 1 ]; then # Battery Charging Current - REG=`i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG_C=`printf "%d" "$REG"` - BAT_C=`echo "scale=2;$REG_C*0.001"|bc` + REG=$(i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG_C=$(printf "%d" "$REG") + BAT_C=$(echo "scale=2;$REG_C*0.001"|bc) else # Battery Discharge Current - REG=`i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG_D=`printf "%d" "$REG"` - BAT_D=`echo "scale=2;$REG_D*0.001"|bc` + REG=$(i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG_D=$(printf "%d" "$REG") + BAT_D=$(echo "scale=2;$REG_D*0.001"|bc) fi # Battery % - REG=`i2cget -y -f 0 0x34 0xB9` - BAT_PERCENT=`printf "%d" "$REG"` + REG=$(i2cget -y -f 0 0x34 0xB9) + BAT_PERCENT=$(printf "%d" "$REG") else VBAT=0 BATT_CUR=0 @@ -72,11 +72,11 @@ else fi # Temperature -REG=`i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` -REG=`printf "%d" "$REG"` -THERM=`echo "($REG*0.1)-144.7"|bc` +REG=$(i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') +REG=$(printf "%d" "$REG") +THERM=$(echo "($REG*0.1)-144.7"|bc) -echo $THERM +echo "$THERM" echo $ACIN echo $ACIN_C echo $VBUS diff --git a/snmp/dhcp.py b/snmp/dhcp.py index 12937370e..532665dd8 100755 --- a/snmp/dhcp.py +++ b/snmp/dhcp.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 -import subprocess import json +import subprocess from os.path import isfile -CONFIGFILE = '/etc/snmp/dhcp.json' +CONFIGFILE = "/etc/snmp/dhcp.json" # Configfile is needed /etc/snmp/dhcp.json # @@ -13,10 +13,10 @@ # error = 0 -error_string = '' +error_string = "" version = 2 -with open(CONFIGFILE, 'r') as json_file: +with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: @@ -25,75 +25,76 @@ if not error: - leases = {'total': 0, - 'active': 0, - 'expired': 0, - 'released': 0, - 'abandoned': 0, - 'reset': 0, - 'bootp': 0, - 'backup': 0, - 'free': 0, - } - if not isfile(configfile['leasefile']): + leases = { + "total": 0, + "active": 0, + "expired": 0, + "released": 0, + "abandoned": 0, + "reset": 0, + "bootp": 0, + "backup": 0, + "free": 0, + } + if not isfile(configfile["leasefile"]): error = 1 - error_string = 'Lease File not found' + error_string = "Lease File not found" else: - with open(configfile['leasefile']) as fp: + with open(configfile["leasefile"]) as fp: line = fp.readline() while line: line = fp.readline() - if 'rewind' not in line: - if line.startswith('lease'): - leases['total'] += 1 - elif 'binding state active' in line: - leases['active'] += 1 - elif 'binding state expired' in line: - leases['expired'] += 1 - elif 'binding state released' in line: - leases['released'] += 1 - elif 'binding state abandoned' in line: - leases['abandoned'] += 1 - elif 'binding state reset' in line: - leases['reset'] += 1 - elif 'binding state bootp' in line: - leases['bootp'] += 1 - elif 'binding state backup' in line: - leases['backup'] += 1 - elif 'binding state free' in line: - leases['free'] += 1 + if "rewind" not in line: + if line.startswith("lease"): + leases["total"] += 1 + elif "binding state active" in line: + leases["active"] += 1 + elif "binding state expired" in line: + leases["expired"] += 1 + elif "binding state released" in line: + leases["released"] += 1 + elif "binding state abandoned" in line: + leases["abandoned"] += 1 + elif "binding state reset" in line: + leases["reset"] += 1 + elif "binding state bootp" in line: + leases["bootp"] += 1 + elif "binding state backup" in line: + leases["backup"] += 1 + elif "binding state free" in line: + leases["free"] += 1 shell_cmd = "dhcpd-pools -s i -A" -pool_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') +pool_data = ( + subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE) + .stdout.read() + .split(b"\n") +) -data = {'leases': leases, - 'pools': [], - 'networks': [], - 'all_networks': [] - } +data = {"leases": leases, "pools": [], "networks": [], "all_networks": []} category = None jump_line = 0 for p in pool_data: - line = p.decode('utf-8') + line = p.decode("utf-8") if jump_line: jump_line -= 1 continue - if line.startswith('Ranges:'): - category = 'pools' + if line.startswith("Ranges:"): + category = "pools" jump_line = 1 continue - if line.startswith('Shared networks:'): - category = 'networks' + if line.startswith("Shared networks:"): + category = "networks" jump_line = 1 continue - if line.startswith('Sum of all ranges:'): - category = 'all_networks' + if line.startswith("Sum of all ranges:"): + category = "all_networks" jump_line = 1 continue @@ -102,34 +103,38 @@ p = line.split() - if category == 'pools': - data[category].append({'first_ip': p[1], - 'last_ip':p[3], - 'max': p[4], - 'cur': p[5], - 'percent': p[6], - }) + if category == "pools": + data[category].append( + { + "first_ip": p[1], + "last_ip": p[3], + "max": p[4], + "cur": p[5], + "percent": p[6], + } + ) continue - if category == 'networks': - data[category].append({'network': p[0], - 'max': p[1], - 'cur': p[2], - 'percent': p[3], - }) + if category == "networks": + data[category].append( + { + "network": p[0], + "max": p[1], + "cur": p[2], + "percent": p[3], + } + ) continue - if category == 'all_networks': - data[category] ={'max': p[2], - 'cur': p[3], - 'percent': p[4], - } + if category == "all_networks": + data[category] = { + "max": p[2], + "cur": p[3], + "percent": p[4], + } continue -output = {'version': version, - 'error': error, - 'errorString': error_string, - 'data': data} +output = {"version": version, "error": error, "errorString": error_string, "data": data} -print (json.dumps(output)) +print(json.dumps(output)) diff --git a/snmp/exim-stats.sh b/snmp/exim-stats.sh index 4b430d549..729205f62 100644 --- a/snmp/exim-stats.sh +++ b/snmp/exim-stats.sh @@ -19,17 +19,17 @@ # ------------------------------------------------------------- # # restart snmpd and activate the app for desired host # ################################################################# -BIN_EXIM=`which exim` -BIN_GREP=`which grep` -BIN_WC=`which wc` +BIN_EXIM=$(which exim) +BIN_GREP=$(which grep) +BIN_WC=$(which wc) CFG_EXIM_1='-bp' CFG_EXIM_2='-bpc' CFG_GREP='frozen' CFG_WC='-l' ################################################################# -FROZEN=`$BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC` -echo $FROZEN +FROZEN=$($BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC) +echo "$FROZEN" -QUEUE=`$BIN_EXIM $CFG_EXIM_2` -echo $QUEUE \ No newline at end of file +QUEUE=$($BIN_EXIM $CFG_EXIM_2) +echo "$QUEUE" diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index dac7e9980..8a9423b38 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -25,46 +25,46 @@ if [ $AGENT == 1 ]; then echo "<<>>" fi -RESULT=`echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY` +RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY) -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' diff --git a/snmp/gpsd b/snmp/gpsd index 70fe924c9..eed38c4bc 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -1,45 +1,45 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 Mike Centola -# -# Please make sure the paths below are correct. -# Alternatively you can put them in $0.conf, meaning if you've named -# this script gpsd.sh then it must go in gpsd.sh.conf . -# -# -################################################################ -# Don't change anything unless you know what are you doing # -################################################################ - -BIN_GPIPE='/usr/bin/env gpspipe' -BIN_GREP='/usr/bin/env grep' -BIN_PYTHON='/usr/bin/env python' - -# Check for config file -CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG -fi - -# Create Temp File -TMPFILE=$(mktemp) -trap "rm -f $TMPFILE" 0 2 3 15 - -# Write GPSPIPE Data to Temp File -$BIN_GPIPE -w -n 20 > $TMPFILE - -# Parse Temp file for GPSD Data -VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'` -GPSDMODE=`cat $TMPFILE | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]'` -HDOP=`cat $TMPFILE | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]'` -VDOP=`cat $TMPFILE | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]'` -LAT=`cat $TMPFILE | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]'` -LONG=`cat $TMPFILE | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]'` -ALT=`cat $TMPFILE | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]'` -SATS=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])'` -SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])'` - -# Output info for SNMP Extend -echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}' - -rm $TMPFILE +#!/usr/bin/env bash +# +# Copyright (c) 2019 Mike Centola +# +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script gpsd.sh then it must go in gpsd.sh.conf . +# +# +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ + +BIN_GPIPE='/usr/bin/env gpspipe' +BIN_GREP='/usr/bin/env grep' +BIN_PYTHON='/usr/bin/env python' + +# Check for config file +CONFIG=$0".conf" +if [ -f "$CONFIG" ]; then + . "$CONFIG" +fi + +# Create Temp File +TMPFILE=$(mktemp) +trap "rm -f $TMPFILE" 0 2 3 15 + +# Write GPSPIPE Data to Temp File +$BIN_GPIPE -w -n 20 > "$TMPFILE" + +# Parse Temp file for GPSD Data +VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]') +GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]') +HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]') +VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]') +LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]') +LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]') +ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]') +SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])') +SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])') + +# Output info for SNMP Extend +echo '{"data":{"mode":"'"$GPSDMODE"'", "hdop":"'"$HDOP"'", "vdop":"'"$VDOP"'", "latitude":"'"$LAT"'", "longitude":"'"$LONG"'", "altitude":"'"$ALT"'", "satellites":"'"$SATS"'", "satellites_used":"'"$SATSUSED"'"}, "error":"0", "errorString":"", "version":"'"$VERSION"'"}' + +rm "$TMPFILE" diff --git a/snmp/icecast-stats.sh b/snmp/icecast-stats.sh index c93c6bca0..541c174c8 100644 --- a/snmp/icecast-stats.sh +++ b/snmp/icecast-stats.sh @@ -5,10 +5,10 @@ used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}') cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}') pid=$(pidof icecast) -total_files=$(ls -l /proc/${pid}/fd | wc -l) +total_files=$(ls -l /proc/"${pid}"/fd | wc -l) -echo "Used Memory="$used_memory -echo "CPU Load="$cpu_load -echo "Open files="$total_files +echo "Used Memory=""$used_memory" +echo "CPU Load=""$cpu_load" +echo "Open files=""$total_files" exit diff --git a/snmp/mailcow-dockerized-postfix b/snmp/mailcow-dockerized-postfix index 354f14fbd..8fd536481 100644 --- a/snmp/mailcow-dockerized-postfix +++ b/snmp/mailcow-dockerized-postfix @@ -21,9 +21,9 @@ # requirements: mailcow-dockerized and pflogsumm # -import subprocess -import re import json +import re +import subprocess # LibreNMS poller interval librenms_poller_interval = 300 @@ -34,37 +34,46 @@ def libre_to_mcd_postfix(libre_seconds): def cli_get_docker_container(): - return subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True).decode('utf8').strip() + return ( + subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True) + .decode("utf8") + .strip() + ) def cli_command(): - cli_part = "docker logs --since " + libre_to_mcd_postfix(librenms_poller_interval) \ - + "m " + cli_get_docker_container() + "| pflogsumm --smtpd-stats" + cli_part = ( + "docker logs --since " + + libre_to_mcd_postfix(librenms_poller_interval) + + "m " + + cli_get_docker_container() + + "| pflogsumm --smtpd-stats" + ) return cli_part def get_output(): - return subprocess.check_output(cli_command(), shell=True).decode('utf8') + return subprocess.check_output(cli_command(), shell=True).decode("utf8") def output_cleaning(input): - output = re.split('\n', input) + output = re.split("\n", input) return list(filter(None, output)) def entry_generator(input): - entry = re.sub(' +', ':', input.strip().lstrip()) - return entry.split(':') + entry = re.sub(" +", ":", input.strip().lstrip()) + return entry.split(":") # limit our needed output -mcd_postfix_data = get_output().split('messages') -data = mcd_postfix_data[1].split('smtpd') +mcd_postfix_data = get_output().split("messages") +data = mcd_postfix_data[1].split("smtpd") # postfix stats only mcd_postfix_info = data[0] # smtpd stats only -mcd_smtpd_info = data[1].split('Per-Hour Traffic Summary')[0] +mcd_smtpd_info = data[1].split("Per-Hour Traffic Summary")[0] # postfix stats export mcd_postfix = output_cleaning(mcd_postfix_info) @@ -74,17 +83,16 @@ points_label = [] for entry in mcd_postfix: data_labels = entry_generator(entry) - if data_labels[0].find('k') == -1: + if data_labels[0].find("k") == -1: points_data.append(data_labels[0]) else: - data_point = data_labels[0].replace('k', '', 1) + data_point = data_labels[0].replace("k", "", 1) data_point = int(data_point) * 1024 points_data.append(data_point) - points_label.append(re.sub('[^a-zA-Z]+', '', data_labels[1])) + points_label.append(re.sub("[^a-zA-Z]+", "", data_labels[1])) entries = dict(zip(points_label, points_data)) export = {"data": entries, "error": "0", "errorString": "", "version": "1"} -data = re.sub(' ', '', json.dumps(export)) +data = re.sub(" ", "", json.dumps(export)) print(data) - diff --git a/snmp/mailscanner.php b/snmp/mailscanner.php index b80fdb9ff..8a9f3e274 100755 --- a/snmp/mailscanner.php +++ b/snmp/mailscanner.php @@ -17,60 +17,60 @@ /// /////////////////////////////////////////////////////////////////////////////////////// - // START SETTINGS /// - $mailstats = "/opt/librenms/scripts/watchmaillog/watchmaillog_counters"; + $mailstats = '/opt/librenms/scripts/watchmaillog/watchmaillog_counters'; // END SETTINGS /// - /// // DO NOT EDIT BENETH THIS LINE /// /////////////////////////////////////////////////////////////////////////////////////// - function doSNMPv2($vars) { - $stats = array(); - if (file_exists($vars)) { - $data = file($vars); - foreach ($data as $item=>$value) { - if (!empty($value)) { - $temp = explode(':', trim($value)); - if (isset($temp[1])) { - $stats[$temp[0]] = $temp[1]; - } - } - } - } - $var = array(); - $var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : "U"); - $var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : "U"); - $var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : "U"); - $var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : "U"); - $var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : "U"); - $var['spam'] = (isset($stats['spam']) ? $stats['spam'] : "U"); - $var['virus'] = (isset($stats['virus']) ? $stats['virus'] : "U"); - foreach ($var as $item=>$count) { - echo $count."\n"; - } - } - - function clearStats($mailstats) { - if (file_exists($mailstats)) { - $fp = fopen($mailstats, 'w'); - fwrite($fp, "mess_recv:0\n"); - fwrite($fp, "mess_rejected:0\n"); - fwrite($fp, "mess_relay:0\n"); - fwrite($fp, "mess_sent:0\n"); - fwrite($fp, "mess_waiting:0\n"); - fwrite($fp, "spam:0\n"); - fwrite($fp, "virus:0\n"); - fclose($fp); - } - } + function doSNMPv2($vars) + { + $stats = []; + if (file_exists($vars)) { + $data = file($vars); + foreach ($data as $item=>$value) { + if (!empty($value)) { + $temp = explode(':', trim($value)); + if (isset($temp[1])) { + $stats[$temp[0]] = $temp[1]; + } + } + } + } + $var = []; + $var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : 'U'); + $var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : 'U'); + $var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : 'U'); + $var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : 'U'); + $var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : 'U'); + $var['spam'] = (isset($stats['spam']) ? $stats['spam'] : 'U'); + $var['virus'] = (isset($stats['virus']) ? $stats['virus'] : 'U'); + foreach ($var as $item=>$count) { + echo $count."\n"; + } + } + + function clearStats($mailstats) + { + if (file_exists($mailstats)) { + $fp = fopen($mailstats, 'w'); + fwrite($fp, "mess_recv:0\n"); + fwrite($fp, "mess_rejected:0\n"); + fwrite($fp, "mess_relay:0\n"); + fwrite($fp, "mess_sent:0\n"); + fwrite($fp, "mess_waiting:0\n"); + fwrite($fp, "spam:0\n"); + fwrite($fp, "virus:0\n"); + fclose($fp); + } + } - doSNMPv2($mailstats); - //clearStats($mailstats); + doSNMPv2($mailstats); + //clearStats($mailstats); ?> diff --git a/snmp/mdadm b/snmp/mdadm index b25629266..8565f8d69 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -19,10 +19,10 @@ OUTPUT_DATA='[' # use 'ls' command to check if md blocks exist if $LS /dev/md?* 1> /dev/null 2>&1 ; then for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do - RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) + RAID="/sys/block/"$($BASENAME $($REALPATH "$ARRAY_BLOCKDEVICE")) # ignore arrays with no slaves - if [ -z "$($LS -1 $RAID/slaves 2> /dev/null)" ] ; then + if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then continue fi # ignore "non existing" arrays @@ -30,27 +30,27 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then continue fi - if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]] ; then - RAID_NAME=$($BASENAME $RAID) + if [[ $($BASENAME "$ARRAY_BLOCKDEVICE") = [[:digit:]] ]] ; then + RAID_NAME=$($BASENAME "$RAID") else - RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE) + RAID_NAME=$($BASENAME "$ARRAY_BLOCKDEVICE") fi - RAID_DEV_LIST=$($LS $RAID/slaves/) - RAID_LEVEL=$($CAT $RAID/md/level) - RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks| cut -d' ' -f1) - RAID_STATE=$($CAT $RAID/md/array_state) - RAID_ACTION=$($CAT $RAID/md/sync_action) - RAID_DEGRADED=$($CAT $RAID/md/degraded) + RAID_DEV_LIST=$($LS "$RAID"/slaves/) + RAID_LEVEL=$($CAT "$RAID"/md/level) + RAID_DISC_COUNT=$($CAT "$RAID"/md/raid_disks| cut -d' ' -f1) + RAID_STATE=$($CAT "$RAID"/md/array_state) + RAID_ACTION=$($CAT "$RAID"/md/sync_action) + RAID_DEGRADED=$($CAT "$RAID"/md/degraded) if [ "$RAID_SYNC_SPEED" = "none" ] ; then RAID_SYNC_SPEED=0 else - let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024" + let "RAID_SYNC_SPEED=$($CAT "$RAID"/md/sync_speed)*1024" fi - if [ "$($CAT $RAID/md/sync_completed)" != "none" ] ; then - let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)" - elif [ $RAID_DEGRADED -eq 1 ] ; then + if [ "$($CAT "$RAID"/md/sync_completed)" != "none" ] ; then + let "RAID_SYNC_COMPLETED=100*$($CAT "$RAID"/md/sync_completed)" + elif [ "$RAID_DEGRADED" -eq 1 ] ; then RAID_SYNC_COMPLETED=0 else RAID_SYNC_COMPLETED=100 @@ -58,7 +58,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then # divide with 2 to size like in /proc/mdstat # and multiply with 1024 to get size in bytes - let "RAID_SIZE=$($CAT $RAID/size)*1024/2" + let "RAID_SIZE=$($CAT "$RAID"/size)*1024/2" RAID_DEVICE_LIST='[' ALL_DEVICE_COUNT=0 @@ -73,7 +73,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then RAID_MISSING_DEVICES='[' for D in $RAID_DEV_LIST ; do - if [ -L $RAID/slaves/$D ] && [ -f $RAID/slaves/$D ] ; then + if [ -L "$RAID"/slaves/"$D" ] && [ -f "$RAID"/slaves/"$D" ] ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",' fi done @@ -83,7 +83,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" - if [ $RAID_HOTSPARE_COUNT -lt 0 ] ; then + if [ "$RAID_HOTSPARE_COUNT" -lt 0 ] ; then RAID_HOTSPARE_COUNT=0 fi @@ -115,5 +115,5 @@ OUTPUT='{"data":'$OUTPUT_DATA\ '","errorString":"'$ERROR_STRING\ '","version":"'$VERSION'"}' -echo $OUTPUT +echo "$OUTPUT" diff --git a/snmp/mysql-stats b/snmp/mysql-stats index d191bdbbc..c8f32cca2 100755 --- a/snmp/mysql-stats +++ b/snmp/mysql-stats @@ -1,52 +1,117 @@ #!/usr/bin/env python2 -import warnings import re -warnings.filterwarnings(action="ignore", message='the sets module is deprecated') -import sets -import MySQLdb +import warnings + +warnings.filterwarnings(action="ignore", message="the sets module is deprecated") import base64 -conn = MySQLdb.connect(host='', - user='', - passwd='', - db='') -cursor = conn.cursor () +import MySQLdb +import sets + +conn = MySQLdb.connect(host="", user="", passwd="", db="") +cursor = conn.cursor() -cursor.execute ("SHOW GLOBAL STATUS") + +cursor.execute("SHOW GLOBAL STATUS") rows = cursor.fetchall() datavariables = { - 'Command Counters': ['Com_delete','Com_insert','Com_insert_select','Com_load','Com_replace','Com_replace_select', 'Com_select', 'Com_update', 'Com_update_multi'], - 'Connections': ['max_connections', 'Max_used_connections', 'Aborted_clients', 'Aborted_connects','Threads_connected','Connections'], - 'Files and Tables': ['table_open_cache','Open_files','Open_tables','Opened_tables'], - 'InnoDB Buffer Pool': ['ib_bpool_size','ib_bpool_dbpages', 'ib_bpool_free','ib_bpool_modpages'], - 'InnoDB Buffer Pool Activity': ['ib_bpool_read','ib_bpool_created', 'ib_bpool_written'], - 'InnoDB Insert Buffer': ['ib_ibuf_inserts','ib_ibuf_merged_rec', 'ib_ibuf_merges'], - 'InnoDB IO': ['ib_io_read','ib_io_write','ib_io_log', 'ib_io_fsync'], - 'InnoDB IO Pending': ['ib_iop_log','ib_iop_sync', 'ib_iop_flush_log', 'ib_iop_flush_bpool', 'ib_iop_ibuf_aio','ib_iop_aioread','ib_iop_aiowrite'], - 'InnoDB Log': ['innodb_log_buffer_size','ib_log_flush','ib_log_written'], - 'InnoDB Row Operations': ['Innodb_rows_deleted','Innodb_rows_inserted','Innodb_rows_read','Innodb_rows_updated'], - 'InnoDB Semaphores': ['ib_spin_rounds','ib_spin_waits','ib_os_waits'], - 'InnoDB Transactions': ['ib_tnx'], - 'MyISAM Indexes': ['Key_read_requests','Key_reads','Key_write_requests','Key_writes'], - 'Network Traffic': ['Bytes_received','Bytes_sent'], - 'Query Cache': ['Qcache_queries_in_cache','Qcache_hits','Qcache_inserts','Qcache_not_cached','Qcache_lowmem_prunes'], - 'Query Cache Memory': ['query_cache_size','Qcache_free_memory'], - 'Select Types': ['Select_full_join','Select_full_range_join','Select_range','Select_range_check','Select_scan'], - 'Slow Queries': ['Slow_queries'], - 'Sorts': ['Sort_rows','Sort_range','Sort_merge_passes','Sort_scan'], - 'Table Locks': ['Table_locks_immediate','Table_locks_waited'], - 'Temporary Objects': ['Created_tmp_disk_tables','Created_tmp_tables','Created_tmp_files'] - } + "Command Counters": [ + "Com_delete", + "Com_insert", + "Com_insert_select", + "Com_load", + "Com_replace", + "Com_replace_select", + "Com_select", + "Com_update", + "Com_update_multi", + ], + "Connections": [ + "max_connections", + "Max_used_connections", + "Aborted_clients", + "Aborted_connects", + "Threads_connected", + "Connections", + ], + "Files and Tables": [ + "table_open_cache", + "Open_files", + "Open_tables", + "Opened_tables", + ], + "InnoDB Buffer Pool": [ + "ib_bpool_size", + "ib_bpool_dbpages", + "ib_bpool_free", + "ib_bpool_modpages", + ], + "InnoDB Buffer Pool Activity": [ + "ib_bpool_read", + "ib_bpool_created", + "ib_bpool_written", + ], + "InnoDB Insert Buffer": ["ib_ibuf_inserts", "ib_ibuf_merged_rec", "ib_ibuf_merges"], + "InnoDB IO": ["ib_io_read", "ib_io_write", "ib_io_log", "ib_io_fsync"], + "InnoDB IO Pending": [ + "ib_iop_log", + "ib_iop_sync", + "ib_iop_flush_log", + "ib_iop_flush_bpool", + "ib_iop_ibuf_aio", + "ib_iop_aioread", + "ib_iop_aiowrite", + ], + "InnoDB Log": ["innodb_log_buffer_size", "ib_log_flush", "ib_log_written"], + "InnoDB Row Operations": [ + "Innodb_rows_deleted", + "Innodb_rows_inserted", + "Innodb_rows_read", + "Innodb_rows_updated", + ], + "InnoDB Semaphores": ["ib_spin_rounds", "ib_spin_waits", "ib_os_waits"], + "InnoDB Transactions": ["ib_tnx"], + "MyISAM Indexes": [ + "Key_read_requests", + "Key_reads", + "Key_write_requests", + "Key_writes", + ], + "Network Traffic": ["Bytes_received", "Bytes_sent"], + "Query Cache": [ + "Qcache_queries_in_cache", + "Qcache_hits", + "Qcache_inserts", + "Qcache_not_cached", + "Qcache_lowmem_prunes", + ], + "Query Cache Memory": ["query_cache_size", "Qcache_free_memory"], + "Select Types": [ + "Select_full_join", + "Select_full_range_join", + "Select_range", + "Select_range_check", + "Select_scan", + ], + "Slow Queries": ["Slow_queries"], + "Sorts": ["Sort_rows", "Sort_range", "Sort_merge_passes", "Sort_scan"], + "Table Locks": ["Table_locks_immediate", "Table_locks_waited"], + "Temporary Objects": [ + "Created_tmp_disk_tables", + "Created_tmp_tables", + "Created_tmp_files", + ], +} data = {} for row in rows: data[row[0]] = row[1] cursor = "" -cursor = conn.cursor () -cursor.execute ("SHOW VARIABLES") +cursor = conn.cursor() +cursor.execute("SHOW VARIABLES") rows = cursor.fetchall() for row in rows: @@ -59,73 +124,98 @@ rows = cursor.fetchall() for row in rows: for line in row[2].split("\n"): - ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line) - ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line) - ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line) - ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line) - ib_b_reg = re.match(r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line) - ib_insert_buffer = re.match(r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line) - ib_io = re.match(r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs", line) - ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line) - ib_io_p1 = re.match(r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line) - ib_io_p2 = re.match(r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)", line) - ib_io_p3 = re.match(r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?", line) - ib_log_p1 = re.match(r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line) - ib_log_p2 = re.match(r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line) - ib_semaphore = re.match(r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line) - ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line) + ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line) + ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line) + ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line) + ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line) + ib_b_reg = re.match( + r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line + ) + ib_insert_buffer = re.match( + r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line + ) + ib_io = re.match( + r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs", + line, + ) + ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line) + ib_io_p1 = re.match( + r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line + ) + ib_io_p2 = re.match( + r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)", + line, + ) + ib_io_p3 = re.match( + r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?", + line, + ) + ib_log_p1 = re.match( + r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line + ) + ib_log_p2 = re.match( + r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line + ) + ib_semaphore = re.match( + r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line + ) + ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line) - if ib_bpool_size: - data['ib_bpool_size'] = ib_bpool_size.group(1) - elif ib_bpool_free: - data['ib_bpool_free'] = ib_bpool_free.group(1) - elif ib_bpool_dbpages: - data['ib_bpool_dbpages'] = ib_bpool_dbpages.group(1) - elif ib_bpool_modpages: - data['ib_bpool_modpages'] = ib_bpool_modpages.group(1) - elif ib_insert_buffer: - data['ib_ibuf_inserts'] = ib_insert_buffer.group(1) - data['ib_ibuf_merged_rec'] = ib_insert_buffer.group(2) - data['ib_ibuf_merges'] = ib_insert_buffer.group(3) - elif ib_io: - data['ib_io_read'] = ib_io.group(1) - data['ib_io_write'] = ib_io.group(2) - data['ib_io_fsync'] = ib_io.group(3) - elif ib_io_log: - data['ib_io_log'] = ib_io_log.group(1) - elif ib_io_p1: - data['ib_iop_aioread'] = ib_io_p1.group(1) - data['ib_iop_aiowrite'] = ib_io_p1.group(2) - elif ib_io_p2: - data['ib_iop_ibuf_aio'] = ib_io_p2.group(1) - data['ib_iop_log'] = ib_io_p2.group(2) - data['ib_iop_sync'] = ib_io_p2.group(3) - elif ib_io_p3: - data['ib_iop_flush_log'] = ib_io_p3.group(1) - data['ib_iop_flush_bpool'] = ib_io_p3.group(2) - elif ib_log_p1: - data['ib_log_written'] = ib_log_p1.group(1) - if ib_log_p1.group(2): - data['ib_log_written'] = int(data['ib_log_written']) + int(ib_log_p1.group(2)) - elif ib_log_p2: - data['ib_log_flush'] = ib_log_p2.group(1) - if ib_log_p2.group(2): - data['ib_log_flush'] = int(data['ib_log_flush']) + int(ib_log_p2.group(2)) - elif ib_semaphore: - data['ib_spin_waits'] = ib_semaphore.group(1) - data['ib_spin_rounds'] = ib_semaphore.group(2) - data['ib_os_waits'] = ib_semaphore.group(3) - elif ib_tnx: - data['ib_tnx'] = ib_tnx.group(1) - if ib_tnx.group(2): - data['ib_tnx'] = int(data['ib_tnx']) + int(ib_tnx.group(2)) - elif ib_b_reg: - data['ib_bpool_read'] = ib_b_reg.group(1) - data['ib_bpool_created'] = ib_b_reg.group(2) - data['ib_bpool_written'] = ib_b_reg.group(3) + if ib_bpool_size: + data["ib_bpool_size"] = ib_bpool_size.group(1) + elif ib_bpool_free: + data["ib_bpool_free"] = ib_bpool_free.group(1) + elif ib_bpool_dbpages: + data["ib_bpool_dbpages"] = ib_bpool_dbpages.group(1) + elif ib_bpool_modpages: + data["ib_bpool_modpages"] = ib_bpool_modpages.group(1) + elif ib_insert_buffer: + data["ib_ibuf_inserts"] = ib_insert_buffer.group(1) + data["ib_ibuf_merged_rec"] = ib_insert_buffer.group(2) + data["ib_ibuf_merges"] = ib_insert_buffer.group(3) + elif ib_io: + data["ib_io_read"] = ib_io.group(1) + data["ib_io_write"] = ib_io.group(2) + data["ib_io_fsync"] = ib_io.group(3) + elif ib_io_log: + data["ib_io_log"] = ib_io_log.group(1) + elif ib_io_p1: + data["ib_iop_aioread"] = ib_io_p1.group(1) + data["ib_iop_aiowrite"] = ib_io_p1.group(2) + elif ib_io_p2: + data["ib_iop_ibuf_aio"] = ib_io_p2.group(1) + data["ib_iop_log"] = ib_io_p2.group(2) + data["ib_iop_sync"] = ib_io_p2.group(3) + elif ib_io_p3: + data["ib_iop_flush_log"] = ib_io_p3.group(1) + data["ib_iop_flush_bpool"] = ib_io_p3.group(2) + elif ib_log_p1: + data["ib_log_written"] = ib_log_p1.group(1) + if ib_log_p1.group(2): + data["ib_log_written"] = int(data["ib_log_written"]) + int( + ib_log_p1.group(2) + ) + elif ib_log_p2: + data["ib_log_flush"] = ib_log_p2.group(1) + if ib_log_p2.group(2): + data["ib_log_flush"] = int(data["ib_log_flush"]) + int( + ib_log_p2.group(2) + ) + elif ib_semaphore: + data["ib_spin_waits"] = ib_semaphore.group(1) + data["ib_spin_rounds"] = ib_semaphore.group(2) + data["ib_os_waits"] = ib_semaphore.group(3) + elif ib_tnx: + data["ib_tnx"] = ib_tnx.group(1) + if ib_tnx.group(2): + data["ib_tnx"] = int(data["ib_tnx"]) + int(ib_tnx.group(2)) + elif ib_b_reg: + data["ib_bpool_read"] = ib_b_reg.group(1) + data["ib_bpool_created"] = ib_b_reg.group(2) + data["ib_bpool_written"] = ib_b_reg.group(3) for category in datavariables: for variable in datavariables[category]: - if variable in data: - print data[variable] + if variable in data: + print data[variable] diff --git a/snmp/nginx b/snmp/nginx index e2a64118d..201da897c 100755 --- a/snmp/nginx +++ b/snmp/nginx @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -from urllib.request import urlopen import re +from urllib.request import urlopen -data = urlopen('http://localhost/nginx-status').read() +data = urlopen("http://localhost/nginx-status").read() params = {} @@ -22,7 +22,9 @@ dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) print(Active) else: print(params[param]) diff --git a/snmp/nginx-python2 b/snmp/nginx-python2 index 06efab6e6..fd0c574b5 100755 --- a/snmp/nginx-python2 +++ b/snmp/nginx-python2 @@ -1,28 +1,31 @@ #!/usr/bin/env python2 -import urllib2 import re -data = urllib2.urlopen('http://localhost/nginx-status').read() +import urllib2 + +data = urllib2.urlopen("http://localhost/nginx-status").read() params = {} for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] + if param == "Active": + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) + print Active + else: + print params[param] diff --git a/snmp/ntp-client b/snmp/ntp-client index 925155abe..0df9ee07b 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -19,17 +19,17 @@ BIN_AWK='/usr/bin/env awk' BIN_HEAD='/usr/bin/env head' CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG +if [ -f "$CONFIG" ]; then + . "$CONFIG" fi -NTP_OFFSET=`$BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_VERSION=`$BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}'` +NTP_OFFSET=$($BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_FREQUENCY=$($BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_SYS_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_CLK_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_WANDER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_VERSION=$($BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}') -echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}' +echo '{"data":{"offset":"'"$NTP_OFFSET"'","frequency":"'"$NTP_FREQUENCY"'","sys_jitter":"'"$NTP_SYS_JITTER"'","clk_jitter":"'"$NTP_CLK_JITTER"'","clk_wander":"'"$NTP_WANDER"'"},"version":"'"$NTP_VERSION"'","error":"0","errorString":""}' exit 0 diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 5871d0377..6fa2f6908 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -33,67 +33,67 @@ NTPQV="p11" # Don't change anything unless you know what are you doing # ################################################################ CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG +if [ -f "$CONFIG" ]; then + . "$CONFIG" fi VERSION=1 -STRATUM=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` +STRATUM=$($BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2) # parse the ntpq info that requires version specific info -NTPQ_RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` +NTPQ_RAW=$($BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g') if [ $NTPQV = "p11" ]; then - OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` - FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` - SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` - CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` - CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}'` + OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') + FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') + SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') + CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') + CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $7}') fi if [ $NTPQV = "p1" ]; then - OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}'` - FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` - SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` - CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` - CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` + OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $2}') + FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') + SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') + CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') + CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') fi -VER=`$BIN_NTPD --version` +VER=$($BIN_NTPD --version) if [ "$VER" = '4.2.6p5' ]; then - USECMD=`echo $BIN_NTPDC -c iostats` + USECMD=$(echo "$BIN_NTPDC" -c iostats) else - USECMD=`echo $BIN_NTPQ -c iostats localhost` + USECMD=$(echo "$BIN_NTPQ" -c iostats localhost) fi -CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' '` +CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') -TIMESINCERESET=`echo $CMD2 | $BIN_AWK -F ' ' '{print $1}'` -RECEIVEDBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $2}'` -FREERECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $3}'` -USEDRECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $4}'` -LOWWATERREFILLS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $5}'` -DROPPEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $6}'` -IGNOREDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $7}'` -RECEIVEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $8}'` -PACKETSSENT=`echo $CMD2 | $BIN_AWK -F ' ' '{print $9}'` -PACKETSENDFAILURES=`echo $CMD2 | $BIN_AWK -F ' ' '{print $10}'` -INPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $11}'` -USEFULINPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $12}'` +TIMESINCERESET=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $1}') +RECEIVEDBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $2}') +FREERECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $3}') +USEDRECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $4}') +LOWWATERREFILLS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $5}') +DROPPEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $6}') +IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}') +RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}') +PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}') +PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}') +INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') +USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}') -echo '{"data":{"offset":"'$OFFSET\ -'","frequency":"'$FREQUENCY\ -'","sys_jitter":"'$SYS_JITTER\ -'","clk_jitter":"'$CLK_JITTER\ -'","clk_wander":"'$CLK_WANDER\ -'","stratum":"'$STRATUM\ -'","time_since_reset":"'$TIMESINCERESET\ -'","receive_buffers":"'$RECEIVEDBUFFERS\ -'","free_receive_buffers":"'$FREERECEIVEBUFFERS\ -'","used_receive_buffers":"'$USEDRECEIVEBUFFERS\ -'","low_water_refills":"'$LOWWATERREFILLS\ -'","dropped_packets":"'$DROPPEDPACKETS\ -'","ignored_packets":"'$IGNOREDPACKETS\ -'","received_packets":"'$RECEIVEDPACKETS\ -'","packets_sent":"'$PACKETSSENT\ -'","packet_send_failures":"'$PACKETSENDFAILURES\ -'","input_wakeups":"'$PACKETSENDFAILURES\ -'","useful_input_wakeups":"'$USEFULINPUTWAKEUPS\ +echo '{"data":{"offset":"'"$OFFSET"\ +'","frequency":"'"$FREQUENCY"\ +'","sys_jitter":"'"$SYS_JITTER"\ +'","clk_jitter":"'"$CLK_JITTER"\ +'","clk_wander":"'"$CLK_WANDER"\ +'","stratum":"'"$STRATUM"\ +'","time_since_reset":"'"$TIMESINCERESET"\ +'","receive_buffers":"'"$RECEIVEDBUFFERS"\ +'","free_receive_buffers":"'"$FREERECEIVEBUFFERS"\ +'","used_receive_buffers":"'"$USEDRECEIVEBUFFERS"\ +'","low_water_refills":"'"$LOWWATERREFILLS"\ +'","dropped_packets":"'"$DROPPEDPACKETS"\ +'","ignored_packets":"'"$IGNOREDPACKETS"\ +'","received_packets":"'"$RECEIVEDPACKETS"\ +'","packets_sent":"'"$PACKETSSENT"\ +'","packet_send_failures":"'"$PACKETSENDFAILURES"\ +'","input_wakeups":"'"$PACKETSENDFAILURES"\ +'","useful_input_wakeups":"'"$USEFULINPUTWAKEUPS"\ '"},"error":"0","errorString":"","version":"'$VERSION'"}' diff --git a/snmp/nvidia b/snmp/nvidia index d9d73a755..8bb900f35 100644 --- a/snmp/nvidia +++ b/snmp/nvidia @@ -17,10 +17,10 @@ sed='/usr/bin/env sed' # 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3 $nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' -lines=`$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l` +lines=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l) # if we are less than 5 then all GPUs were printed -if [ $lines -lt 5 ]; then +if [ "$lines" -lt 5 ]; then exit 0; fi @@ -35,5 +35,5 @@ do loop=0 fi - gpu=`expr $gpu + 1` + gpu=$(expr $gpu + 1) done diff --git a/snmp/opensip3-stats.sh b/snmp/opensip3-stats.sh index fa85e023b..a3302c6bd 100644 --- a/snmp/opensip3-stats.sh +++ b/snmp/opensip3-stats.sh @@ -9,11 +9,11 @@ load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Averag total_files=$(lsof -c opensips | wc -l) -echo $total_memory -echo $used_memory -echo $free_memory -echo $load_average -echo "Open files="$total_files +echo "$total_memory" +echo "$used_memory" +echo "$free_memory" +echo "$load_average" +echo "Open files=""$total_files" exit diff --git a/snmp/opensips-stats.sh b/snmp/opensips-stats.sh index 7127ec5ea..b8f5260a2 100644 --- a/snmp/opensips-stats.sh +++ b/snmp/opensips-stats.sh @@ -9,10 +9,10 @@ load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Averag total_files=$(lsof -c opensips | wc -l) -echo $total_memory -echo $used_memory -echo $free_memory -echo $load_average -echo "Open files="$total_files +echo "$total_memory" +echo "$used_memory" +echo "$free_memory" +echo "$load_average" +echo "Open files=""$total_files" exit diff --git a/snmp/osupdate b/snmp/osupdate index 6e6f8f533..1f4f94852 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -34,56 +34,56 @@ CMD_APK=' version' ################################################################ if command -v zypper &>/dev/null ; then # OpenSUSE - UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 2 ]; then + UPDATES=$($BIN_ZYPPER "$CMD_ZYPPER" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-2)); else echo "0"; fi elif command -v dnf &>/dev/null ; then # Fedora - UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then + UPDATES=$($BIN_DNF "$CMD_DNF" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; fi elif command -v pacman &>/dev/null ; then # Arch - UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then + UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; fi elif command -v yum &>/dev/null ; then # CentOS / Redhat - UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then + UPDATES=$($BIN_YUM "$CMD_YUM" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; fi elif command -v apt-get &>/dev/null ; then # Debian / Devuan / Ubuntu - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` - if [ $UPDATES -ge 1 ]; then - echo $UPDATES; + UPDATES=$($BIN_APT "$CMD_APT" | $BIN_GREP $CMD_GREP 'Inst') + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; else echo "0"; fi elif command -v pkg &>/dev/null ; then # FreeBSD - UPDATES=`$BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then - echo $UPDATES; + UPDATES=$($BIN_PKG "$CMD_PKG" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; else echo "0"; fi elif command -v apk &>/dev/null ; then # Alpine - UPDATES=`$BIN_APK $CMD_APK | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 2 ]; then + UPDATES=$($BIN_APK "$CMD_APK" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-1)); else echo "0"; diff --git a/snmp/phpfpmsp b/snmp/phpfpmsp index 3eb0e0c50..a4d7a4339 100644 --- a/snmp/phpfpmsp +++ b/snmp/phpfpmsp @@ -78,7 +78,7 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - phpfpm_response=($(curl -Ss ${opts} "${url}")) + phpfpm_response=($(curl -Ss "${opts}" "${url}")) [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 if [[ "${phpfpm_response[0]}" != "pool:" \ @@ -131,16 +131,16 @@ phpfpm_slow_requests=0 exit 1 fi -echo $phpfpm_pool -echo $phpfpm_start_time -echo $phpfpm_start_since -echo $phpfpm_accepted_conn -echo $phpfpm_listen_queue -echo $phpfpm_max_listen_queue -echo $phpfpm_listen_queue_len -echo $phpfpm_idle_processes -echo $phpfpm_active_processes -echo $phpfpm_total_processes -echo $phpfpm_max_active_processes -echo $phpfpm_max_children_reached +echo "$phpfpm_pool" +echo "$phpfpm_start_time" +echo "$phpfpm_start_since" +echo "$phpfpm_accepted_conn" +echo "$phpfpm_listen_queue" +echo "$phpfpm_max_listen_queue" +echo "$phpfpm_listen_queue_len" +echo "$phpfpm_idle_processes" +echo "$phpfpm_active_processes" +echo "$phpfpm_total_processes" +echo "$phpfpm_max_active_processes" +echo "$phpfpm_max_children_reached" echo $phpfpm_slow_requests diff --git a/snmp/pi-hole b/snmp/pi-hole index f5cc5c9ab..342ef105b 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -55,19 +55,19 @@ debug() { echo '[ok] API_URL is set' fi - if [ -z $API_AUTH_KEY ]; then + if [ -z "$API_AUTH_KEY" ]; then echo '[warning] API_AUTH_KEY is not set, some values will not be available' else echo '[ok] API_AUTH_KEY is set' fi - if [ -z ${URL_READ_ONLY} ]; then + if [ -z "${URL_READ_ONLY}" ]; then echo '[error] URL_READ_ONLY is not set' else echo '[ok] URL_READ_ONLY is set' fi - if [ -z ${URL_QUERY_TYPE} ]; then + if [ -z "${URL_QUERY_TYPE}" ]; then echo '[error] URL_QUERY_TYPE is not set' else echo '[ok] URL_QUERY_TYPE not set' @@ -87,20 +87,20 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') - echo $GET_STATS | tr " " "\n" + GET_STATS=$(curl -s $API_URL"$URL_READ_ONLY" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') + echo "$GET_STATS" | tr " " "\n" # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') - echo $GET_QUERY_TYPE | tr " " "\n" + GET_QUERY_TYPE=$(curl -s $API_URL"$URL_QUERY_TYPE""$API_AUTH_KEY" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') + echo "$GET_QUERY_TYPE" | tr " " "\n" # Find number of DHCP address in scope and current lease count # case-insensitive compare, just in case :) if [ "${DHCP_ACTIVE,,}" = "true" ]; then # Max IP addresses in scope # Convert IPs to decimal and subtract - IFS="." read -r -a array <<< $DHCP_START + IFS="." read -r -a array <<< "$DHCP_START" DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) - IFS="." read -r -a array <<< $DHCP_END + IFS="." read -r -a array <<< "$DHCP_END" DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL # Current lease count @@ -111,7 +111,7 @@ exportdata() { fi } -if [ -z $* ]; then +if [ -z "$*" ]; then exportdata fi expr "$*" : ".*--help" > /dev/null && usage diff --git a/snmp/postfix-queues b/snmp/postfix-queues index dc1951cc1..1d3491e61 100755 --- a/snmp/postfix-queues +++ b/snmp/postfix-queues @@ -8,6 +8,6 @@ QUEUES="incoming active deferred hold" for i in $QUEUES; do - COUNT=`qshape $i | grep TOTAL | awk '{print $2}'` + COUNT=$(qshape "$i" | grep TOTAL | awk '{print $2}') printf "$COUNT\n" done diff --git a/snmp/powerdns-dnsdist b/snmp/powerdns-dnsdist index 0572fb5cd..48e5d1ce4 100644 --- a/snmp/powerdns-dnsdist +++ b/snmp/powerdns-dnsdist @@ -6,7 +6,7 @@ API_AUTH_USER="admin" API_AUTH_PASS="" API_URL="" API_STATS="jsonstat?command=stats" -TMP_FILE=`/usr/bin/mktemp` +TMP_FILE=$(/usr/bin/mktemp) #/ Description: BASH script to get PowerDNS dnsdist stats #/ Examples: ./powerdns-dnsdist @@ -65,100 +65,100 @@ debug() { exportdata() { # get current data - curl -s -u$API_AUTH_USER:$API_AUTH_PASS $API_URL$API_STATS | jq '.' > $TMP_FILE + curl -s -u$API_AUTH_USER:"$API_AUTH_PASS" "$API_URL""$API_STATS" | jq '.' > "$TMP_FILE" # generate export values - JSON_VALUES=$(cat $TMP_FILE) + JSON_VALUES=$(cat "$TMP_FILE") - STAT_CACHE_HIT=$(echo $JSON_VALUES | jq '."cache-hits"') - echo $STAT_CACHE_HIT + STAT_CACHE_HIT=$(echo "$JSON_VALUES" | jq '."cache-hits"') + echo "$STAT_CACHE_HIT" - STAT_CACHE_MISS=$(echo $JSON_VALUES | jq '."cache-misses"') - echo $STAT_CACHE_MISS + STAT_CACHE_MISS=$(echo "$JSON_VALUES" | jq '."cache-misses"') + echo "$STAT_CACHE_MISS" - STAT_DOWNSTREAM_ERR=$(echo $JSON_VALUES | jq '."downstream-send-errors"') - echo $STAT_DOWNSTREAM_ERR + STAT_DOWNSTREAM_ERR=$(echo "$JSON_VALUES" | jq '."downstream-send-errors"') + echo "$STAT_DOWNSTREAM_ERR" - STAT_DOWNSTREAM_TIMEOUT=$(echo $JSON_VALUES | jq '."downstream-timeouts"') - echo $STAT_DOWNSTREAM_TIMEOUT + STAT_DOWNSTREAM_TIMEOUT=$(echo "$JSON_VALUES" | jq '."downstream-timeouts"') + echo "$STAT_DOWNSTREAM_TIMEOUT" - STAT_DYNAMIC_BLOCK_SIZE=$(echo $JSON_VALUES | jq '."dyn-block-nmg-size"') - echo $STAT_DYNAMIC_BLOCK_SIZE + STAT_DYNAMIC_BLOCK_SIZE=$(echo "$JSON_VALUES" | jq '."dyn-block-nmg-size"') + echo "$STAT_DYNAMIC_BLOCK_SIZE" - STAT_DYNAMIC_BLOCK=$(echo $JSON_VALUES | jq '."dyn-blocked"') - echo $STAT_DYNAMIC_BLOCK + STAT_DYNAMIC_BLOCK=$(echo "$JSON_VALUES" | jq '."dyn-blocked"') + echo "$STAT_DYNAMIC_BLOCK" - STAT_QUERIES_COUNT=$(echo $JSON_VALUES | jq '.queries') - echo $STAT_QUERIES_COUNT + STAT_QUERIES_COUNT=$(echo "$JSON_VALUES" | jq '.queries') + echo "$STAT_QUERIES_COUNT" - STAT_QUERIES_RECURSIVE=$(echo $JSON_VALUES | jq '.rdqueries') - echo $STAT_QUERIES_RECURSIVE + STAT_QUERIES_RECURSIVE=$(echo "$JSON_VALUES" | jq '.rdqueries') + echo "$STAT_QUERIES_RECURSIVE" - STAT_QUERIES_EMPTY=$(echo $JSON_VALUES | jq '."empty-queries"') - echo $STAT_QUERIES_EMPTY + STAT_QUERIES_EMPTY=$(echo "$JSON_VALUES" | jq '."empty-queries"') + echo "$STAT_QUERIES_EMPTY" - STAT_QUERIES_DROP_NO_POLICY=$(echo $JSON_VALUES | jq '."no-policy"') - echo $STAT_QUERIES_DROP_NO_POLICY + STAT_QUERIES_DROP_NO_POLICY=$(echo "$JSON_VALUES" | jq '."no-policy"') + echo "$STAT_QUERIES_DROP_NO_POLICY" - STAT_QUERIES_DROP_NC=$(echo $JSON_VALUES | jq '."noncompliant-queries"') - echo $STAT_QUERIES_DROP_NC + STAT_QUERIES_DROP_NC=$(echo "$JSON_VALUES" | jq '."noncompliant-queries"') + echo "$STAT_QUERIES_DROP_NC" - STAT_QUERIES_DROP_NC_ANSWER=$(echo $JSON_VALUES | jq '."noncompliant-responses"') - echo $STAT_QUERIES_DROP_NC_ANSWER + STAT_QUERIES_DROP_NC_ANSWER=$(echo "$JSON_VALUES" | jq '."noncompliant-responses"') + echo "$STAT_QUERIES_DROP_NC_ANSWER" - STAT_QUERIES_SELF_ANSWER=$(echo $JSON_VALUES | jq '."self-answered"') - echo $STAT_QUERIES_SELF_ANSWER + STAT_QUERIES_SELF_ANSWER=$(echo "$JSON_VALUES" | jq '."self-answered"') + echo "$STAT_QUERIES_SELF_ANSWER" - STAT_QUERIES_SERVFAIL=$(echo $JSON_VALUES | jq '."servfail-responses"') - echo $STAT_QUERIES_SERVFAIL + STAT_QUERIES_SERVFAIL=$(echo "$JSON_VALUES" | jq '."servfail-responses"') + echo "$STAT_QUERIES_SERVFAIL" - STAT_QUERIES_FAILURE=$(echo $JSON_VALUES | jq '."trunc-failures"') - echo $STAT_QUERIES_FAILURE + STAT_QUERIES_FAILURE=$(echo "$JSON_VALUES" | jq '."trunc-failures"') + echo "$STAT_QUERIES_FAILURE" - STAT_QUERIES_ACL_DROPS=$(echo $JSON_VALUES | jq '."acl-drops"') - echo $STAT_QUERIES_ACL_DROPS + STAT_QUERIES_ACL_DROPS=$(echo "$JSON_VALUES" | jq '."acl-drops"') + echo "$STAT_QUERIES_ACL_DROPS" - STAT_RULE_DROP=$(echo $JSON_VALUES | jq '."rule-drop"') - echo $STAT_RULE_DROP + STAT_RULE_DROP=$(echo "$JSON_VALUES" | jq '."rule-drop"') + echo "$STAT_RULE_DROP" - STAT_RULE_NXDOMAIN=$(echo $JSON_VALUES | jq '."rule-nxdomain"') - echo $STAT_RULE_NXDOMAIN + STAT_RULE_NXDOMAIN=$(echo "$JSON_VALUES" | jq '."rule-nxdomain"') + echo "$STAT_RULE_NXDOMAIN" - STAT_RULE_REFUSED=$(echo $JSON_VALUES | jq '."rule-refused"') - echo $STAT_RULE_REFUSED + STAT_RULE_REFUSED=$(echo "$JSON_VALUES" | jq '."rule-refused"') + echo "$STAT_RULE_REFUSED" - STAT_LATENCY_AVG_100=$(echo $JSON_VALUES | jq '."latency-avg100"') - echo $STAT_LATENCY_AVG_100 + STAT_LATENCY_AVG_100=$(echo "$JSON_VALUES" | jq '."latency-avg100"') + echo "$STAT_LATENCY_AVG_100" - STAT_LATENCY_AVG_1000=$(echo $JSON_VALUES | jq '."latency-avg1000"') - echo $STAT_LATENCY_AVG_1000 + STAT_LATENCY_AVG_1000=$(echo "$JSON_VALUES" | jq '."latency-avg1000"') + echo "$STAT_LATENCY_AVG_1000" - STAT_LATENCY_AVG_10000=$(echo $JSON_VALUES | jq '."latency-avg10000"') - echo $STAT_LATENCY_AVG_10000 + STAT_LATENCY_AVG_10000=$(echo "$JSON_VALUES" | jq '."latency-avg10000"') + echo "$STAT_LATENCY_AVG_10000" - STAT_LATENCY_AVG_1000000=$(echo $JSON_VALUES | jq '."latency-avg1000000"') - echo $STAT_LATENCY_AVG_1000000 + STAT_LATENCY_AVG_1000000=$(echo "$JSON_VALUES" | jq '."latency-avg1000000"') + echo "$STAT_LATENCY_AVG_1000000" - STAT_LATENCY_SLOW=$(echo $JSON_VALUES | jq '."latency-slow"') - echo $STAT_LATENCY_SLOW + STAT_LATENCY_SLOW=$(echo "$JSON_VALUES" | jq '."latency-slow"') + echo "$STAT_LATENCY_SLOW" - STAT_LATENCY_0_1=$(echo $JSON_VALUES | jq '."latency0-1"') - echo $STAT_LATENCY_0_1 + STAT_LATENCY_0_1=$(echo "$JSON_VALUES" | jq '."latency0-1"') + echo "$STAT_LATENCY_0_1" - STAT_LATENCY_1_10=$(echo $JSON_VALUES | jq '."latency1-10"') - echo $STAT_LATENCY_1_10 + STAT_LATENCY_1_10=$(echo "$JSON_VALUES" | jq '."latency1-10"') + echo "$STAT_LATENCY_1_10" - STAT_LATENCY_10_50=$(echo $JSON_VALUES | jq '."latency10-50"') - echo $STAT_LATENCY_10_50 + STAT_LATENCY_10_50=$(echo "$JSON_VALUES" | jq '."latency10-50"') + echo "$STAT_LATENCY_10_50" - STAT_LATENCY_50_100=$(echo $JSON_VALUES | jq '."latency50-100"') - echo $STAT_LATENCY_50_100 + STAT_LATENCY_50_100=$(echo "$JSON_VALUES" | jq '."latency50-100"') + echo "$STAT_LATENCY_50_100" - STAT_LATENCY_100_1000=$(echo $JSON_VALUES | jq '."latency100-1000"') - echo $STAT_LATENCY_100_1000 + STAT_LATENCY_100_1000=$(echo "$JSON_VALUES" | jq '."latency100-1000"') + echo "$STAT_LATENCY_100_1000" } -if [ -z $* ]; then +if [ -z "$*" ]; then exportdata fi expr "$*" : ".*--help" > /dev/null && usage diff --git a/snmp/powerdns-recursor b/snmp/powerdns-recursor index d673738bf..64c764c0a 100755 --- a/snmp/powerdns-recursor +++ b/snmp/powerdns-recursor @@ -1,12 +1,13 @@ #!/usr/bin/python -import json, subprocess -from subprocess import Popen, PIPE +import json +import subprocess +from subprocess import PIPE, Popen -input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0] +input = Popen(["rec_control", "get-all"], stdout=PIPE).communicate()[0] data = [] for line in input.splitlines(): item = line.split() - data.append({'name': item[0].decode(), 'value': int(item[1].decode())}) + data.append({"name": item[0].decode(), "value": int(item[1].decode())}) print(json.dumps(data)) diff --git a/snmp/powerdns.py b/snmp/powerdns.py index 75cc1fae8..088273da7 100755 --- a/snmp/powerdns.py +++ b/snmp/powerdns.py @@ -3,24 +3,26 @@ import json import subprocess -pdnscontrol = '/usr/bin/pdns_control' +pdnscontrol = "/usr/bin/pdns_control" -process = subprocess.Popen([pdnscontrol, 'show', '*'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +process = subprocess.Popen( + [pdnscontrol, "show", "*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE +) input = process.communicate() stdout = input[0].decode() stderr = input[1].decode() data = {} -for var in stdout.split(','): - if '=' in var: - key, value = var.split('=') +for var in stdout.split(","): + if "=" in var: + key, value = var.split("=") data[key] = value output = { - 'version': 1, - 'error': process.returncode, - 'errorString': stderr, - 'data': data + "version": 1, + "error": process.returncode, + "errorString": stderr, + "data": data, } print(json.dumps(output)) diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py index 2b3ed74b6..e280fe710 100755 --- a/snmp/powermon-snmp.py +++ b/snmp/powermon-snmp.py @@ -67,22 +67,22 @@ ### Libraries -import os -import sys import getopt import json +import os import re import shutil import subprocess +import sys ### Option defaults -method = "" # must be one of methods array +method = "" # must be one of methods array verbose = False warnings = False -librenms = True # Return results in a JSON format suitable for Librenms - # Set to false to return JSON data only -pretty = False # Pretty printing +librenms = True # Return results in a JSON format suitable for Librenms +# Set to false to return JSON data only +pretty = False # Pretty printing ### Globals @@ -90,40 +90,51 @@ errorString = "" data = {} result = {} -usage = "USAGE: " + os.path.basename(__file__) + " [-h|--help] |" \ - + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" \ - + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" +usage = ( + "USAGE: " + + os.path.basename(__file__) + + " [-h|--help] |" + + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" + + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" +) methods = ["sensors", "hpasmcli"] -#costPerkWh = 0.15 # <<<< UNCOMMENT +# costPerkWh = 0.15 # <<<< UNCOMMENT ### General functions + def errorMsg(message): sys.stderr.write("ERROR: " + message + "\n") + def usageError(message="Invalid argument"): errorMsg(message) sys.stderr.write(usage + "\n") sys.exit(1) + def warningMsg(message): if verbose or warnings: sys.stderr.write("WARN: " + message + "\n") + def verboseMsg(message): if verbose: sys.stderr.write("INFO: " + message + "\n") + def listMethods(): global verbose verbose = True - verboseMsg("Available methods are: " + str(methods).strip('[]')) + verboseMsg("Available methods are: " + str(methods).strip("[]")) + ### Data functions + def getData(method): if method == "sensors": - data = getSensorData() + data = getSensorData() elif method == "hpasmcli": data = getHPASMData() @@ -132,6 +143,7 @@ def getData(method): return data + def getSensorData(): global error, errorString error = 2 @@ -139,6 +151,7 @@ def getSensorData(): try: import sensors + sensors.init() except ModuleNotFoundError as e: @@ -173,7 +186,7 @@ def getSensorData(): error = 0 errorString = "" - junk, meter_id = chip_name.split('acpi-', 1) + junk, meter_id = chip_name.split("acpi-", 1) sdata["meter"][meter_id] = {} for feature in chip: @@ -192,91 +205,105 @@ def getSensorData(): sdata[chip_name][feature_label] = feature.get_value() except: - es = sys.exc_info() - error = 1 - errorString = "Unable to get data: General exception: " + str(es) + es = sys.exc_info() + error = 1 + errorString = "Unable to get data: General exception: " + str(es) finally: sensors.cleanup() return sdata + def getHPASMData(): global error, errorString - exe = shutil.which('hpasmcli') - #if not os.access(candidate, os.W_OK): - cmd = [exe, '-s', 'show powermeter; show powersupply'] + exe = shutil.which("hpasmcli") + # if not os.access(candidate, os.W_OK): + cmd = [exe, "-s", "show powermeter; show powersupply"] warningMsg("hpasmcli only runs as root") try: - output = subprocess.run(cmd, capture_output=True, check=True, text=True, timeout=2) + output = subprocess.run( + cmd, capture_output=True, check=True, text=True, timeout=2 + ) except subprocess.CalledProcessError as e: - errorMsg(str(e) + ": " + str(e.stdout).strip('\n')) + errorMsg(str(e) + ": " + str(e.stdout).strip("\n")) sys.exit(1) - rawdata = str(output.stdout).replace('\t', ' ').replace('\n ', '\n').split('\n') + rawdata = str(output.stdout).replace("\t", " ").replace("\n ", "\n").split("\n") hdata = {} hdata["meter"] = {} hdata["psu"] = {} - re_meter = "^Power Meter #([0-9]+)" - re_meter_reading = "^Power Reading :" - re_psu = "^Power supply #[0-9]+" - re_psu_present = "^Present :" + re_meter = "^Power Meter #([0-9]+)" + re_meter_reading = "^Power Reading :" + re_psu = "^Power supply #[0-9]+" + re_psu_present = "^Present :" re_psu_redundant = "^Redundant:" re_psu_condition = "^Condition:" - re_psu_hotplug = "^Hotplug :" - re_psu_reading = "^Power :" + re_psu_hotplug = "^Hotplug :" + re_psu_reading = "^Power :" for line in rawdata: if re.match(re_meter, line): verboseMsg("found power meter: " + line) - junk, meter_id = line.split('#', 1) + junk, meter_id = line.split("#", 1) hdata["meter"][meter_id] = {} elif re.match(re_meter_reading, line): verboseMsg("found power meter reading: " + line) - junk, meter_reading = line.split(':', 1) + junk, meter_reading = line.split(":", 1) hdata["meter"][meter_id]["reading"] = meter_reading.strip() elif re.match(re_psu, line): verboseMsg("found power supply: " + line) - junk, psu_id = line.split('#', 1) + junk, psu_id = line.split("#", 1) hdata["psu"][psu_id] = {} elif re.match(re_psu_present, line): verboseMsg("found power supply present: " + line) - junk, psu_present = line.split(':', 1) + junk, psu_present = line.split(":", 1) hdata["psu"][psu_id]["present"] = psu_present.strip() elif re.match(re_psu_redundant, line): verboseMsg("found power supply redundant: " + line) - junk, psu_redundant = line.split(':', 1) + junk, psu_redundant = line.split(":", 1) hdata["psu"][psu_id]["redundant"] = psu_redundant.strip() elif re.match(re_psu_condition, line): verboseMsg("found power supply condition: " + line) - junk, psu_condition = line.split(':', 1) + junk, psu_condition = line.split(":", 1) hdata["psu"][psu_id]["condition"] = psu_condition.strip() elif re.match(re_psu_hotplug, line): verboseMsg("found power supply hotplug: " + line) - junk, psu_hotplug = line.split(':', 1) + junk, psu_hotplug = line.split(":", 1) hdata["psu"][psu_id]["hotplug"] = psu_hotplug.strip() elif re.match(re_psu_reading, line): verboseMsg("found power supply reading: " + line) - junk, psu_reading = line.split(':', 1) - hdata["psu"][psu_id]["reading"] = psu_reading.replace('Watts', '').strip() + junk, psu_reading = line.split(":", 1) + hdata["psu"][psu_id]["reading"] = psu_reading.replace("Watts", "").strip() return hdata + # Argument Parsing try: opts, args = getopt.gnu_getopt( - sys.argv[1:], 'm:hlNpvw', ['method', 'help', 'list-methods', 'no-librenms', 'pretty', 'verbose', 'warnings'] + sys.argv[1:], + "m:hlNpvw", + [ + "method", + "help", + "list-methods", + "no-librenms", + "pretty", + "verbose", + "warnings", + ], ) if len(args) != 0: usageError("Unknown argument") @@ -336,8 +363,8 @@ def getHPASMData(): data["reading"] = data["meter"]["1"]["reading"] # Example 2 - sum the two power supplies and apply a power factor - #pf = 0.95 - #data["reading"] = str( float(data["psu"]["1"]["reading"]) \ + # pf = 0.95 + # data["reading"] = str( float(data["psu"]["1"]["reading"]) \ # + float(data["psu"]["2"]["reading"]) / pf ) except: @@ -345,13 +372,13 @@ def getHPASMData(): # Build result if librenms: - result['version']=version - result['error']=error - result['errorString']=errorString - result['data']=data + result["version"] = version + result["error"] = error + result["errorString"] = errorString + result["data"] = data else: - result=data + result = data # Print result if pretty: @@ -359,4 +386,3 @@ def getHPASMData(): else: print(json.dumps(result)) - diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index 9d0f343cb..9cb64f17b 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -1,17 +1,17 @@ #!/usr/bin/env python3 import json -import yaml from os.path import isfile from time import time +import yaml output = {} -output['error'] = 0 -output['errorString'] = "" -output['version'] = 1 +output["error"] = 0 +output["errorString"] = "" +output["version"] = 1 -CONFIGFILE = '/etc/snmp/puppet.json' +CONFIGFILE = "/etc/snmp/puppet.json" # optional config file # { # "agent": { @@ -20,13 +20,15 @@ # } -summary_files = ['/var/cache/puppet/state/last_run_summary.yaml', - '/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml'] +summary_files = [ + "/var/cache/puppet/state/last_run_summary.yaml", + "/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml", +] def parse_yaml_file(filename): try: - yaml_data = yaml.load(open(filename, 'r')) + yaml_data = yaml.load(open(filename, "r")) msg = None except yaml.scanner.ScannerError as e: yaml_data = [] @@ -42,7 +44,7 @@ def time_processing(data): new_data = {} for k in data.keys(): - if k == 'last_run': + if k == "last_run": # generate difference to last run (seconds) new_data[k] = round(time() - data[k]) continue @@ -53,36 +55,36 @@ def time_processing(data): def processing(data): new_data = {} - for k in ['changes', 'events', 'resources', 'version']: + for k in ["changes", "events", "resources", "version"]: new_data[k] = data[k] - new_data['time'] = time_processing(data['time']) + new_data["time"] = time_processing(data["time"]) return new_data # extend last_run_summary_file list with optional custom file if isfile(CONFIGFILE): - with open(CONFIGFILE, 'r') as json_file: + with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: - output['error'] = 1 - output['errorString'] = "Configfile Error: '%s'" % e + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % e else: configfile = None -if not output['error'] and configfile: +if not output["error"] and configfile: try: - if 'agent' in configfile.keys(): - custom_summary_file = configfile['agent']['summary_file'] + if "agent" in configfile.keys(): + custom_summary_file = configfile["agent"]["summary_file"] summary_files.insert(0, custom_summary_file) - except KeyError: - output['error'] = 1 - output['errorString'] = "Configfile Error: '%s'" % e + except KeyError as e: + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % e # search existing summary file from list -if not output['error']: +if not output["error"]: summary_file = None for sum_file in summary_files: if isfile(sum_file): @@ -90,17 +92,17 @@ def processing(data): break if not summary_file: - output['error'] = 1 - output['errorString'] = "no puppet agent run summary file found" + output["error"] = 1 + output["errorString"] = "no puppet agent run summary file found" # open summary file -if not output['error']: +if not output["error"]: msg, data = parse_yaml_file(summary_file) if msg: - output['error'] = 1 - output['errorString'] = msg + output["error"] = 1 + output["errorString"] = msg -output['data'] = processing(data) +output["data"] = processing(data) -print (json.dumps(output)) +print(json.dumps(output)) diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py index a2c75672e..f75ec4c7a 100755 --- a/snmp/pureftpd.py +++ b/snmp/pureftpd.py @@ -1,48 +1,61 @@ #!/usr/bin/env python3 -import os import json +import os -CONFIGFILE = '/etc/snmp/pureftpd.json' +CONFIGFILE = "/etc/snmp/pureftpd.json" -pureftpwho_cmd = '/usr/sbin/pure-ftpwho' -pureftpwho_args = '-v -s -n' +pureftpwho_cmd = "/usr/sbin/pure-ftpwho" +pureftpwho_args = "-v -s -n" output_data = {} -output_data['version'] = 1 -output_data['errorString'] = "" -output_data['error'] = 0 +output_data["version"] = 1 +output_data["errorString"] = "" +output_data["error"] = 0 if os.path.isfile(CONFIGFILE): - with open(CONFIGFILE, 'r') as json_file: + with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: - output_data['error'] = 1 - output_data['errorString'] = "Configfile Error: '%s'" % e + output_data["error"] = 1 + output_data["errorString"] = "Configfile Error: '%s'" % e else: configfile = None -if not output_data['error'] and configfile: +if not output_data["error"] and configfile: try: - if 'pureftpwho_cmd' in configfile.keys(): - pureftpwho_cmd = configfile['pureftpwho_cmd'] - except KeyError: - output_data['error'] = 1 - output_data['errorString'] = "Configfile Error: '%s'" % e + if "pureftpwho_cmd" in configfile.keys(): + pureftpwho_cmd = configfile["pureftpwho_cmd"] + except KeyError as e: + output_data["error"] = 1 + output_data["errorString"] = "Configfile Error: '%s'" % e -output = os.popen(pureftpwho_cmd + ' ' + pureftpwho_args).read() +output = os.popen(pureftpwho_cmd + " " + pureftpwho_args).read() data = {} -for line in output.split('\n'): +for line in output.split("\n"): if not len(line): continue - pid, acct, time, state, file, peer, local, port, transfered, total, percent, bandwidth = line.split('|') + ( + pid, + acct, + time, + state, + file, + peer, + local, + port, + transfered, + total, + percent, + bandwidth, + ) = line.split("|") if "IDLE" in state: state = "IDLE" @@ -54,13 +67,11 @@ if acct not in data.keys(): data[acct] = {} if state not in data[acct]: - data[acct][state] = {'bitrate': 0, - 'connections': 0 - } + data[acct][state] = {"bitrate": 0, "connections": 0} bandwidth_bit = int(bandwidth) * 1024 * 8 - data[acct][state]['bitrate'] += bandwidth_bit - data[acct][state]['connections'] += 1 + data[acct][state]["bitrate"] += bandwidth_bit + data[acct][state]["connections"] += 1 -output_data['data'] = data +output_data["data"] = data -print (json.dumps(output_data)) +print(json.dumps(output_data)) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 41f2902a0..404e81528 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -20,27 +20,27 @@ getStatusMJPG='codec_enabled MJPG' getStatusWMV9='codec_enabled WMV9' $picmd $getTemp | $pised 's|[^0-9.]||g' -$picmd $getVoltsCore | $pised 's|[^0-9.]||g' -$picmd $getVoltsRamC | $pised 's|[^0-9.]||g' -$picmd $getVoltsRamI | $pised 's|[^0-9.]||g' -$picmd $getVoltsRamP | $pised 's|[^0-9.]||g' -$picmd $getFreqArm | $pised 's/frequency([0-9]*)=//g' -$picmd $getFreqCore | $pised 's/frequency([0-9]*)=//g' -$picmd $getStatusH264 | $pised 's/H264=//g' -$picmd $getStatusMPG2 | $pised 's/MPG2=//g' -$picmd $getStatusWVC1 | $pised 's/WVC1=//g' -$picmd $getStatusMPG4 | $pised 's/MPG4=//g' -$picmd $getStatusMJPG | $pised 's/MJPG=//g' -$picmd $getStatusWMV9 | $pised 's/WMV9=//g' -$picmd $getStatusH264 | $pised 's/enabled/2/g' -$picmd $getStatusMPG2 | $pised 's/enabled/2/g' -$picmd $getStatusWVC1 | $pised 's/enabled/2/g' -$picmd $getStatusMPG4 | $pised 's/enabled/2/g' -$picmd $getStatusMJPG | $pised 's/enabled/2/g' -$picmd $getStatusWMV9 | $pised 's/enabled/2/g' -$picmd $getStatusH264 | $pised 's/disabled/1/g' -$picmd $getStatusMPG2 | $pised 's/disabled/1/g' -$picmd $getStatusWVC1 | $pised 's/disabled/1/g' -$picmd $getStatusMPG4 | $pised 's/disabled/1/g' -$picmd $getStatusMJPG | $pised 's/disabled/1/g' -$picmd $getStatusWMV9 | $pised 's/disabled/1/g' +$picmd "$getVoltsCore" | $pised 's|[^0-9.]||g' +$picmd "$getVoltsRamC" | $pised 's|[^0-9.]||g' +$picmd "$getVoltsRamI" | $pised 's|[^0-9.]||g' +$picmd "$getVoltsRamP" | $pised 's|[^0-9.]||g' +$picmd "$getFreqArm" | $pised 's/frequency([0-9]*)=//g' +$picmd "$getFreqCore" | $pised 's/frequency([0-9]*)=//g' +$picmd "$getStatusH264" | $pised 's/H264=//g' +$picmd "$getStatusMPG2" | $pised 's/MPG2=//g' +$picmd "$getStatusWVC1" | $pised 's/WVC1=//g' +$picmd "$getStatusMPG4" | $pised 's/MPG4=//g' +$picmd "$getStatusMJPG" | $pised 's/MJPG=//g' +$picmd "$getStatusWMV9" | $pised 's/WMV9=//g' +$picmd "$getStatusH264" | $pised 's/enabled/2/g' +$picmd "$getStatusMPG2" | $pised 's/enabled/2/g' +$picmd "$getStatusWVC1" | $pised 's/enabled/2/g' +$picmd "$getStatusMPG4" | $pised 's/enabled/2/g' +$picmd "$getStatusMJPG" | $pised 's/enabled/2/g' +$picmd "$getStatusWMV9" | $pised 's/enabled/2/g' +$picmd "$getStatusH264" | $pised 's/disabled/1/g' +$picmd "$getStatusMPG2" | $pised 's/disabled/1/g' +$picmd "$getStatusWVC1" | $pised 's/disabled/1/g' +$picmd "$getStatusMPG4" | $pised 's/disabled/1/g' +$picmd "$getStatusMJPG" | $pised 's/disabled/1/g' +$picmd "$getStatusWMV9" | $pised 's/disabled/1/g' diff --git a/snmp/redis.py b/snmp/redis.py index 097dda78c..cd861e1f1 100755 --- a/snmp/redis.py +++ b/snmp/redis.py @@ -1,10 +1,14 @@ #!/usr/bin/env python3 -import subprocess import json +import subprocess shell_cmd = "redis-cli info" -all_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') +all_data = ( + subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE) + .stdout.read() + .split(b"\n") +) version = 1 error = 0 @@ -13,24 +17,24 @@ # stdout list to json try: - category = '' + category = "" for d in all_data: - d = d.replace(b'\r', b'') + d = d.replace(b"\r", b"") - if d in [b'']: + if d in [b""]: continue - if d.startswith(b'#'): - category = d.replace(b'# ', b'').decode("utf-8") + if d.startswith(b"#"): + category = d.replace(b"# ", b"").decode("utf-8") redis_data[category] = {} continue if not len(category): error = 2 - error_string = 'category not defined' + error_string = "category not defined" break - k, v = d.split(b':') + k, v = d.split(b":") k = k.decode("utf-8") v = v.decode("utf-8") @@ -38,11 +42,13 @@ except: error = 1 - error_string = 'data extracting error' + error_string = "data extracting error" -output = {'version': version, - 'error': error, - 'errorString': error_string, - 'data': redis_data} +output = { + "version": version, + "error": error, + "errorString": error_string, + "data": redis_data, +} -print (json.dumps(output)) +print(json.dumps(output)) diff --git a/snmp/sdfsinfo b/snmp/sdfsinfo index 56901ec97..6c83d241d 100644 --- a/snmp/sdfsinfo +++ b/snmp/sdfsinfo @@ -18,9 +18,9 @@ # ################################################################## -SDFSCLI_BIN=`which sdfscli` +SDFSCLI_BIN=$(which sdfscli) SDFSCLI_CMD=' --volume-info' -GREP_BIN=`which grep` +GREP_BIN=$(which grep) GREP_CMD=' -o -E ' -DATAPOINTS=`$SDFSCLI_BIN $SDFSCLI_CMD | $GREP_BIN $GREP_CMD "(([0-9]+)\.?([0-9]+)?)"` -echo $DATAPOINTS +DATAPOINTS=$($SDFSCLI_BIN "$SDFSCLI_CMD" | $GREP_BIN "$GREP_CMD" "(([0-9]+)\.?([0-9]+)?)") +echo "$DATAPOINTS" diff --git a/snmp/seafile.py b/snmp/seafile.py index c34cf6e6e..10834dcaa 100755 --- a/snmp/seafile.py +++ b/snmp/seafile.py @@ -14,9 +14,10 @@ # Clients -> plattform (count) # Clients -> version (count) -import requests import json +import requests + # Configfile content example: # {"url": "https://seafile.mydomain.org", # "username": "some_admin_login@mail.address", @@ -25,65 +26,65 @@ # "hide_monitoring_account": true # } -CONFIGFILE='/etc/snmp/seafile.json' +CONFIGFILE = "/etc/snmp/seafile.json" error = 0 -error_string = '' +error_string = "" version = 1 def get_data(url_path, data=None, token=None): - complete_url = "%s/%s" % (url, url_path) - headers = {'Accept': 'application/json'} - if token: - headers['Authorization'] = "Token %s" % token + complete_url = "%s/%s" % (url, url_path) + headers = {"Accept": "application/json"} + if token: + headers["Authorization"] = "Token %s" % token + try: + if token: + r = requests.get(complete_url, data=data, headers=headers) + else: + r = requests.post(complete_url, data=data, headers=headers) try: - if token: - r = requests.get(complete_url, data=data, headers=headers) - else: - r = requests.post(complete_url, data=data, headers=headers) - try: - return r.json() - except json.decoder.JSONDecodeError: - return 'no valid json returned - url correct?' - except requests.exceptions.RequestException as err: - return str(err) + return r.json() + except json.decoder.JSONDecodeError: + return "no valid json returned - url correct?" + except requests.exceptions.RequestException as err: + return str(err) def get_devices(): # get all devices - url_path = 'api/v2.1/admin/devices/' + url_path = "api/v2.1/admin/devices/" return get_data(url_path, token=token) def get_groups(): # get all groups - url_path = 'api/v2.1/admin/groups/' + url_path = "api/v2.1/admin/groups/" return get_data(url_path, token=token) def get_sysinfo(): # get all groups - url_path = 'api/v2.1/admin/sysinfo/' + url_path = "api/v2.1/admin/sysinfo/" return get_data(url_path, token=token) def get_account_information(): # get all accounts withs details account_list = [] - for account in get_data('api2/accounts/', token=token): + for account in get_data("api2/accounts/", token=token): # get account details - url_path = 'api2/accounts/%s/' % account['email'] + url_path = "api2/accounts/%s/" % account["email"] account_data = get_data(url_path, token=token) # get libraries by owner - url_path = 'api/v2.1/admin/libraries/?owner=%s' % account['email'] - account_data['repos'] = get_data(url_path, token=token)['repos'] + url_path = "api/v2.1/admin/libraries/?owner=%s" % account["email"] + account_data["repos"] = get_data(url_path, token=token)["repos"] # get deleted libraries by owner - url_path = 'api/v2.1/admin/trash-libraries/?owner=%s' % account['email'] - account_data['trash_repos'] = get_data(url_path, token=token)['repos'] + url_path = "api/v2.1/admin/trash-libraries/?owner=%s" % account["email"] + account_data["trash_repos"] = get_data(url_path, token=token)["repos"] account_list.append(account_data) return account_list @@ -96,55 +97,53 @@ def resort_devices(device_list): for device in device_list: # don't list information assigned to monitor account if hide_monitoring_account: - if device['user'] == configfile['username']: + if device["user"] == configfile["username"]: continue - if device['platform'] not in platform.keys(): - platform[device['platform']] = 1 + if device["platform"] not in platform.keys(): + platform[device["platform"]] = 1 else: - platform[device['platform']] += 1 + platform[device["platform"]] += 1 - if device['client_version'] not in client_version.keys(): - client_version[device['client_version']] = 1 + if device["client_version"] not in client_version.keys(): + client_version[device["client_version"]] = 1 else: - client_version[device['client_version']] += 1 + client_version[device["client_version"]] += 1 - data['platform'] = [] + data["platform"] = [] for k, v in platform.items(): - data['platform'].append({'os_name': k, - 'clients':v}) - data['client_version'] = [] + data["platform"].append({"os_name": k, "clients": v}) + data["client_version"] = [] for k, v in client_version.items(): - data['client_version'].append({'client_version': k, - 'clients':v}) + data["client_version"].append({"client_version": k, "clients": v}) return data def resort_groups(group_list): - data = {'count': len(group_list)} + data = {"count": len(group_list)} return data def resort_accounts(account_list): - if account_identifier in ['name', 'email']: + if account_identifier in ["name", "email"]: identifier = account_identifier else: - identifier = 'name' + identifier = "name" - accepted_key_list = ['is_active', 'usage'] + accepted_key_list = ["is_active", "usage"] data = [] for user_account in account_list: # don't list information assigned to monitor account if hide_monitoring_account: - if user_account['email'] == configfile['username']: + if user_account["email"] == configfile["username"]: continue new_account = {} - new_account['owner'] = user_account[identifier] - new_account['repos'] = len(user_account['repos']) - new_account['trash_repos'] = len(user_account['trash_repos']) + new_account["owner"] = user_account[identifier] + new_account["repos"] = len(user_account["repos"]) + new_account["trash_repos"] = len(user_account["trash_repos"]) for k in user_account.keys(): if k not in accepted_key_list: @@ -152,11 +151,11 @@ def resort_accounts(account_list): new_account[k] = user_account[k] data.append(new_account) - return sorted(data, key=lambda k: k['owner'].lower()) + return sorted(data, key=lambda k: k["owner"].lower()) # ------------------------ MAIN -------------------------------------------------------- -with open(CONFIGFILE, 'r') as json_file: +with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: @@ -164,24 +163,24 @@ def resort_accounts(account_list): error_string = "Configfile Error: '%s'" % e if not error: - url = configfile['url'] - username = configfile['username'] - password = configfile['password'] + url = configfile["url"] + username = configfile["username"] + password = configfile["password"] try: - account_identifier = configfile['account_identifier'] + account_identifier = configfile["account_identifier"] except KeyError: account_identifier = None try: - hide_monitoring_account = configfile['hide_monitoring_account'] + hide_monitoring_account = configfile["hide_monitoring_account"] except KeyError: hide_monitoring_account = False # get token - login_data = {'username': username, 'password': password} - ret = get_data('api2/auth-token/', data=login_data) + login_data = {"username": username, "password": password} + ret = get_data("api2/auth-token/", data=login_data) if type(ret) != str: - if 'token' in ret.keys(): - token = ret['token'] + if "token" in ret.keys(): + token = ret["token"] else: error = 1 try: @@ -194,18 +193,13 @@ def resort_accounts(account_list): data = {} if not error: - ret= get_account_information() + ret = get_account_information() if not error: - data['accounts'] = resort_accounts(ret) - data['devices'] = resort_devices(get_devices()['devices']) - data['groups'] = resort_groups(get_groups()['groups']) - data['sysinfo'] = get_sysinfo() + data["accounts"] = resort_accounts(ret) + data["devices"] = resort_devices(get_devices()["devices"]) + data["groups"] = resort_groups(get_groups()["groups"]) + data["sysinfo"] = get_sysinfo() -output = {'error': error, - 'errorString': error_string, - 'version': version, - 'data': data - } +output = {"error": error, "errorString": error_string, "version": version, "data": data} print(json.dumps(output)) - diff --git a/snmp/shoutcast.php b/snmp/shoutcast.php index 4c588355a..637d3abe4 100755 --- a/snmp/shoutcast.php +++ b/snmp/shoutcast.php @@ -18,105 +18,113 @@ /// /////////////////////////////////////////////////////////////////////////////////////// - // START SETTINGS /// - $config = "/opt/librenms/scripts/shoutcast.conf"; - $cache = "/opt/librenms/scripts/shoutcast.cache"; + $config = '/opt/librenms/scripts/shoutcast.conf'; + $cache = '/opt/librenms/scripts/shoutcast.cache'; // END SETTINGS /// - /// // DO NOT EDIT BENETH THIS LINE /// /////////////////////////////////////////////////////////////////////////////////////// - /* Do NOT run this script through a web browser */ - if (!isset($_SERVER["argv"][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) { - die('This script is only meant to run at the command line.'); - } - - $cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : ""); - - function get_data($host, $port) { - $fp = @fsockopen($host, $port, $errno, $errstr, 5); - if(!$fp) { $connect = 0; } - if (!isset($connect)) { - fputs($fp, "GET /7.html HTTP/1.0\r\n" - . "User-Agent: All In One - SHOUTcast Stats Parser" - . " (Mozilla Compatible)\r\n\r\n"); - while (!feof($fp)) { - $rawdata = fgets($fp, 1024); - } - fclose($fp); - } - preg_match('/body>(.*)<\/body/', $rawdata, $matches); - $res = explode(',', $matches[1], 7); - $res[7] = $host; - $res[8] = $port; - return $res; - } - - function get_list($config) { - if (file_exists($config)) { - $servers = file($config); - $data = array(); - foreach ($servers as $item=>$server) { - list($host, $port) = explode(":", $server, 2); - array_push($data, get_data(trim($host), trim($port))); - } - return $data; - } - } - - function doSNMPv2($vars) { - $res = array(); - foreach ($vars as $items=>$server) { - $var = array(); - $var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : "0"); - //$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0"); - $var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : "0"); - $var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : "0"); - $var['current'] = (isset($server['0']) ? $server['0'] : "0"); - $var['status'] = (isset($server['1']) ? $server['1'] : "0"); - $var['peak'] = (isset($server['2']) ? $server['2'] : "0"); - $var['max'] = (isset($server['3']) ? $server['3'] : "0"); - $var['unique'] = (isset($server['4']) ? $server['4'] : "0"); - $host = (isset($server['7']) ? $server['7'] : "unknown"); - $port = (isset($server['8']) ? $server['8'] : "unknown"); - $tmp = $host.":".$port; - foreach ($var as $item=>$value) { - $tmp .= ";".$value; - } - array_push($res, $tmp); - } - return $res; - } - - function makeCacheFile($data, $cache) { - $fp = fopen($cache, 'w'); - foreach ($data as $item=>$value) { - fwrite($fp, $value."\n"); - } - fclose($fp); - } - - function readCacheFile($cache) { - if (file_exists($cache)) { - $data = file($cache); - foreach ($data as $item=>$value) { - echo trim($value)."\n"; - } - } - } - - if ($cmd == "makeCache") { - $servers = get_list($config); - $data = doSNMPv2($servers); - makeCacheFile($data, $cache); - } else { - readCacheFile($cache); - } + /* Do NOT run this script through a web browser */ + if (!isset($_SERVER['argv'][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) { + exit('This script is only meant to run at the command line.'); + } + + $cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : ''); + + function get_data($host, $port) + { + $fp = @fsockopen($host, $port, $errno, $errstr, 5); + if (!$fp) { + $connect = 0; + } + if (!isset($connect)) { + fputs($fp, "GET /7.html HTTP/1.0\r\n" + .'User-Agent: All In One - SHOUTcast Stats Parser' + ." (Mozilla Compatible)\r\n\r\n"); + while (!feof($fp)) { + $rawdata = fgets($fp, 1024); + } + fclose($fp); + } + preg_match('/body>(.*)<\/body/', $rawdata, $matches); + $res = explode(',', $matches[1], 7); + $res[7] = $host; + $res[8] = $port; + + return $res; + } + + function get_list($config) + { + if (file_exists($config)) { + $servers = file($config); + $data = []; + foreach ($servers as $item=>$server) { + list($host, $port) = explode(':', $server, 2); + array_push($data, get_data(trim($host), trim($port))); + } + + return $data; + } + } + + function doSNMPv2($vars) + { + $res = []; + foreach ($vars as $items=>$server) { + $var = []; + $var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : '0'); + //$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0"); + $var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : '0'); + $var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : '0'); + $var['current'] = (isset($server['0']) ? $server['0'] : '0'); + $var['status'] = (isset($server['1']) ? $server['1'] : '0'); + $var['peak'] = (isset($server['2']) ? $server['2'] : '0'); + $var['max'] = (isset($server['3']) ? $server['3'] : '0'); + $var['unique'] = (isset($server['4']) ? $server['4'] : '0'); + $host = (isset($server['7']) ? $server['7'] : 'unknown'); + $port = (isset($server['8']) ? $server['8'] : 'unknown'); + $tmp = $host.':'.$port; + foreach ($var as $item=>$value) { + $tmp .= ';'.$value; + } + array_push($res, $tmp); + } + + return $res; + } + + function makeCacheFile($data, $cache) + { + $fp = fopen($cache, 'w'); + foreach ($data as $item=> $value) { + fwrite($fp, $value."\n"); + } + fclose($fp); + } + + function readCacheFile($cache) + { + if (file_exists($cache)) { + $data = file($cache); + foreach ($data as $item=>$value) { + echo trim($value)."\n"; + } + } + } + + if ($cmd == 'makeCache') { + $servers = get_list($config); + $data = doSNMPv2($servers); + makeCacheFile($data, $cache); + } else { + readCacheFile($cache); + } ?> diff --git a/snmp/ups-apcups.sh b/snmp/ups-apcups.sh index 0e41a14e1..64b55c30f 100755 --- a/snmp/ups-apcups.sh +++ b/snmp/ups-apcups.sh @@ -17,13 +17,13 @@ BIN_GREP='/usr/bin/grep' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -TMP=`$BIN_APCS 2>/dev/null` +TMP=$($BIN_APCS 2>/dev/null) for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" do - OUT=`echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo $value | $BIN_CUT -d ":" -f 2` + OUT=$(echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo "$value" | $BIN_CUT -d ":" -f 2) if [ -n "$OUT" ]; then - echo $OUT + echo "$OUT" else echo "Unknown" fi diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 7e3d8a15a..7fa5a0ba3 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -16,9 +16,9 @@ TMP=$(upsc $UPS_NAME 2>/dev/null) for value in "battery\.charge: [0-9.]+" "battery\.(runtime\.)?low: [0-9]+" "battery\.runtime: [0-9]+" "battery\.voltage: [0-9.]+" "battery\.voltage\.nominal: [0-9]+" "input\.voltage\.nominal: [0-9.]+" "input\.voltage: [0-9.]+" "ups\.load: [0-9.]+" do - OUT=$(echo $TMP | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1) + OUT=$(echo "$TMP" | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1) if [ -n "$OUT" ]; then - echo $OUT + echo "$OUT" else echo "Unknown" fi @@ -26,11 +26,11 @@ done for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" do - UNKNOWN=$(echo $TMP | grep -Eo "ups\.status:") + UNKNOWN=$(echo "$TMP" | grep -Eo "ups\.status:") if [ -z "$UNKNOWN" ]; then echo "Unknown" else - OUT=$(echo $TMP | grep -Eo "$value") + OUT=$(echo "$TMP" | grep -Eo "$value") if [ -n "$OUT" ]; then echo "1" else diff --git a/snmp/voipmon-stats.sh b/snmp/voipmon-stats.sh index 671a04af9..1dcab4d9f 100644 --- a/snmp/voipmon-stats.sh +++ b/snmp/voipmon-stats.sh @@ -5,9 +5,9 @@ used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}') cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}') pid=$(pidof voipmonitor) -total_files=$(ls -l /proc/${pid}/fd | wc -l) +total_files=$(ls -l /proc/"${pid}"/fd | wc -l) -echo "Used Memory="$used_memory -echo "CPU Load="$cpu_load -echo "Open files="$total_files +echo "Used Memory=""$used_memory" +echo "CPU Load=""$cpu_load" +echo "Open files=""$total_files" exit diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index d32e959a1..4ebd6d683 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -5,124 +5,187 @@ import json import subprocess -SYSCTL = '/sbin/sysctl' -ZPOOL = '/usr/local/sbin/zpool' +SYSCTL = "/sbin/sysctl" +ZPOOL = "/usr/local/sbin/zpool" + def percent(numerator, denominator, default=0): - try: - return numerator / denominator * 100 - except ZeroDivisionError: - return default + try: + return numerator / denominator * 100 + except ZeroDivisionError: + return default + def main(args): - p = subprocess.run([SYSCTL, '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) - - if p.returncode != 0: - return p.returncode - - def chomp(line): - bits = [b.strip() for b in line.split(':')] - try: - return bits[0], int(bits[1]) - except ValueError: - return bits[0], bits[1] - - stats = dict(chomp(l) for l in p.stdout.splitlines() if l) - if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats: - stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0 - - output = dict() - - # ARC misc - output['deleted'] = stats['kstat.zfs.misc.arcstats.deleted'] - output['evict_skip'] = stats['kstat.zfs.misc.arcstats.evict_skip'] - output['mutex_skip'] = stats['kstat.zfs.misc.arcstats.mutex_miss'] - output['recycle_miss'] = stats['kstat.zfs.misc.arcstats.recycle_miss'] - - # ARC size - output['target_size_per'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 - output['arc_size_per'] = stats['kstat.zfs.misc.arcstats.size'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 - output['target_size_arat'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] - output['min_size_per'] = stats['kstat.zfs.misc.arcstats.c_min'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 - - output['arc_size'] = stats['kstat.zfs.misc.arcstats.size'] - output['target_size_max'] = stats['kstat.zfs.misc.arcstats.c_max'] - output['target_size_min'] = stats['kstat.zfs.misc.arcstats.c_min'] - output['target_size'] = stats['kstat.zfs.misc.arcstats.c'] - - # ARC size breakdown - output['mfu_size'] = stats['kstat.zfs.misc.arcstats.size'] - stats['kstat.zfs.misc.arcstats.p'] - output['p'] = stats['kstat.zfs.misc.arcstats.p'] - output['rec_used_per'] = stats['kstat.zfs.misc.arcstats.p'] / stats['kstat.zfs.misc.arcstats.size'] * 100 - output['freq_used_per'] = output['mfu_size'] / stats['kstat.zfs.misc.arcstats.size'] * 100 - - # ARC misc efficiency stats - output['arc_hits'] = stats['kstat.zfs.misc.arcstats.hits'] - output['arc_misses'] = stats['kstat.zfs.misc.arcstats.misses'] - output['demand_data_hits'] = stats['kstat.zfs.misc.arcstats.demand_data_hits'] - output['demand_data_misses'] = stats['kstat.zfs.misc.arcstats.demand_data_misses'] - output['demand_meta_hits'] = stats['kstat.zfs.misc.arcstats.demand_metadata_hits'] - output['demand_meta_misses'] = stats['kstat.zfs.misc.arcstats.demand_metadata_misses'] - output['mfu_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mfu_ghost_hits'] - output['mfu_hits'] = stats['kstat.zfs.misc.arcstats.mfu_hits'] - output['mru_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mru_ghost_hits'] - output['mru_hits'] = stats['kstat.zfs.misc.arcstats.mru_hits'] - output['pre_data_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_data_hits'] - output['pre_data_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_data_misses'] - output['pre_meta_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_hits'] - output['pre_meta_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_misses'] - - output['anon_hits'] = output['arc_hits'] - (output['mfu_hits'] + output['mru_hits'] + output['mfu_ghost_hits'] + output['mru_ghost_hits']) - output['arc_accesses_total'] = output['arc_hits'] + output['arc_misses'] - output['demand_data_total'] = output['demand_data_hits'] + output['demand_data_misses'] - output['pre_data_total'] = output['pre_data_hits'] + output['pre_data_misses'] - output['real_hits'] = output['mfu_hits'] + output['mru_hits'] - - # ARC efficiency percents - output['cache_hits_per'] = percent(output['arc_hits'], output['arc_accesses_total']) - output['cache_miss_per'] = percent(output['arc_misses'], output['arc_accesses_total']) - output['actual_hit_per'] = percent(output['real_hits'], output['arc_accesses_total']) - output['data_demand_per'] = percent(output['demand_data_hits'], output['demand_data_total']) - output['data_pre_per'] = percent(output['pre_data_hits'], output['pre_data_total']) - output['anon_hits_per'] = percent(output['anon_hits'], output['arc_hits']) - output['mru_per'] = percent(output['mru_hits'], output['arc_hits']) - output['mfu_per'] = percent(output['mfu_hits'], output['arc_hits']) - output['mru_ghost_per'] = percent(output['mru_ghost_hits'], output['arc_hits']) - output['mfu_ghost_per'] = percent(output['mfu_ghost_hits'], output['arc_hits']) - output['demand_hits_per'] = percent(output['demand_data_hits'], output['arc_hits']) - output['pre_hits_per'] = percent(output['pre_data_hits'], output['arc_hits']) - output['meta_hits_per'] = percent(output['demand_meta_hits'], output['arc_hits']) - output['pre_meta_hits_per'] = percent(output['pre_meta_hits'], output['arc_hits']) - output['demand_misses_per'] = percent(output['demand_data_misses'], output['arc_misses']) - output['pre_misses_per'] = percent(output['pre_data_misses'], output['arc_misses']) - output['meta_misses_per'] = percent(output['demand_meta_misses'], output['arc_misses']) - output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses']) - - # pools - p = subprocess.run([ZPOOL, 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) - if p.returncode != 0: - return p.returncode - output['pools'] = [] - fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup'] - for l in p.stdout.splitlines(): - p = dict(zip(fields, l.split('\t'))) - if p['ckpoint'] == '-': - p['ckpoint'] = 0 - if p['expandsz'] == '-': - p['expandsz'] = 0 - p['frag'] = p['frag'].rstrip('%') - if p['frag'] == '-': - p['frag'] = 0 - p['cap'] = p['cap'].rstrip('%') - if p['cap'] == '-': - p['cap'] = 0 - p['dedup'] = p['dedup'].rstrip('x') - output['pools'].append(p) - - print(json.dumps(output)) - - return 0 - -if __name__ == '__main__': - import sys - sys.exit(main(sys.argv[1:])) + p = subprocess.run( + [SYSCTL, "-q", "kstat.zfs", "vfs.zfs"], + stdout=subprocess.PIPE, + universal_newlines=True, + ) + + if p.returncode != 0: + return p.returncode + + def chomp(line): + bits = [b.strip() for b in line.split(":")] + try: + return bits[0], int(bits[1]) + except ValueError: + return bits[0], bits[1] + + stats = dict(chomp(line) for line in p.stdout.splitlines() if line) + if "kstat.zfs.misc.arcstats.recycle_miss" not in stats: + stats["kstat.zfs.misc.arcstats.recycle_miss"] = 0 + + output = dict() + + # ARC misc + output["deleted"] = stats["kstat.zfs.misc.arcstats.deleted"] + output["evict_skip"] = stats["kstat.zfs.misc.arcstats.evict_skip"] + output["mutex_skip"] = stats["kstat.zfs.misc.arcstats.mutex_miss"] + output["recycle_miss"] = stats["kstat.zfs.misc.arcstats.recycle_miss"] + + # ARC size + output["target_size_per"] = ( + stats["kstat.zfs.misc.arcstats.c"] + / stats["kstat.zfs.misc.arcstats.c_max"] + * 100 + ) + output["arc_size_per"] = ( + stats["kstat.zfs.misc.arcstats.size"] + / stats["kstat.zfs.misc.arcstats.c_max"] + * 100 + ) + output["target_size_arat"] = ( + stats["kstat.zfs.misc.arcstats.c"] / stats["kstat.zfs.misc.arcstats.c_max"] + ) + output["min_size_per"] = ( + stats["kstat.zfs.misc.arcstats.c_min"] + / stats["kstat.zfs.misc.arcstats.c_max"] + * 100 + ) + + output["arc_size"] = stats["kstat.zfs.misc.arcstats.size"] + output["target_size_max"] = stats["kstat.zfs.misc.arcstats.c_max"] + output["target_size_min"] = stats["kstat.zfs.misc.arcstats.c_min"] + output["target_size"] = stats["kstat.zfs.misc.arcstats.c"] + + # ARC size breakdown + output["mfu_size"] = ( + stats["kstat.zfs.misc.arcstats.size"] - stats["kstat.zfs.misc.arcstats.p"] + ) + output["p"] = stats["kstat.zfs.misc.arcstats.p"] + output["rec_used_per"] = ( + stats["kstat.zfs.misc.arcstats.p"] / stats["kstat.zfs.misc.arcstats.size"] * 100 + ) + output["freq_used_per"] = ( + output["mfu_size"] / stats["kstat.zfs.misc.arcstats.size"] * 100 + ) + + # ARC misc efficiency stats + output["arc_hits"] = stats["kstat.zfs.misc.arcstats.hits"] + output["arc_misses"] = stats["kstat.zfs.misc.arcstats.misses"] + output["demand_data_hits"] = stats["kstat.zfs.misc.arcstats.demand_data_hits"] + output["demand_data_misses"] = stats["kstat.zfs.misc.arcstats.demand_data_misses"] + output["demand_meta_hits"] = stats["kstat.zfs.misc.arcstats.demand_metadata_hits"] + output["demand_meta_misses"] = stats[ + "kstat.zfs.misc.arcstats.demand_metadata_misses" + ] + output["mfu_ghost_hits"] = stats["kstat.zfs.misc.arcstats.mfu_ghost_hits"] + output["mfu_hits"] = stats["kstat.zfs.misc.arcstats.mfu_hits"] + output["mru_ghost_hits"] = stats["kstat.zfs.misc.arcstats.mru_ghost_hits"] + output["mru_hits"] = stats["kstat.zfs.misc.arcstats.mru_hits"] + output["pre_data_hits"] = stats["kstat.zfs.misc.arcstats.prefetch_data_hits"] + output["pre_data_misses"] = stats["kstat.zfs.misc.arcstats.prefetch_data_misses"] + output["pre_meta_hits"] = stats["kstat.zfs.misc.arcstats.prefetch_metadata_hits"] + output["pre_meta_misses"] = stats[ + "kstat.zfs.misc.arcstats.prefetch_metadata_misses" + ] + + output["anon_hits"] = output["arc_hits"] - ( + output["mfu_hits"] + + output["mru_hits"] + + output["mfu_ghost_hits"] + + output["mru_ghost_hits"] + ) + output["arc_accesses_total"] = output["arc_hits"] + output["arc_misses"] + output["demand_data_total"] = ( + output["demand_data_hits"] + output["demand_data_misses"] + ) + output["pre_data_total"] = output["pre_data_hits"] + output["pre_data_misses"] + output["real_hits"] = output["mfu_hits"] + output["mru_hits"] + + # ARC efficiency percents + output["cache_hits_per"] = percent(output["arc_hits"], output["arc_accesses_total"]) + output["cache_miss_per"] = percent( + output["arc_misses"], output["arc_accesses_total"] + ) + output["actual_hit_per"] = percent( + output["real_hits"], output["arc_accesses_total"] + ) + output["data_demand_per"] = percent( + output["demand_data_hits"], output["demand_data_total"] + ) + output["data_pre_per"] = percent(output["pre_data_hits"], output["pre_data_total"]) + output["anon_hits_per"] = percent(output["anon_hits"], output["arc_hits"]) + output["mru_per"] = percent(output["mru_hits"], output["arc_hits"]) + output["mfu_per"] = percent(output["mfu_hits"], output["arc_hits"]) + output["mru_ghost_per"] = percent(output["mru_ghost_hits"], output["arc_hits"]) + output["mfu_ghost_per"] = percent(output["mfu_ghost_hits"], output["arc_hits"]) + output["demand_hits_per"] = percent(output["demand_data_hits"], output["arc_hits"]) + output["pre_hits_per"] = percent(output["pre_data_hits"], output["arc_hits"]) + output["meta_hits_per"] = percent(output["demand_meta_hits"], output["arc_hits"]) + output["pre_meta_hits_per"] = percent(output["pre_meta_hits"], output["arc_hits"]) + output["demand_misses_per"] = percent( + output["demand_data_misses"], output["arc_misses"] + ) + output["pre_misses_per"] = percent(output["pre_data_misses"], output["arc_misses"]) + output["meta_misses_per"] = percent( + output["demand_meta_misses"], output["arc_misses"] + ) + output["pre_meta_misses_per"] = percent( + output["pre_meta_misses"], output["arc_misses"] + ) + + # pools + p = subprocess.run( + [ZPOOL, "list", "-pH"], stdout=subprocess.PIPE, universal_newlines=True + ) + if p.returncode != 0: + return p.returncode + output["pools"] = [] + fields = [ + "name", + "size", + "alloc", + "free", + "ckpoint", + "expandsz", + "frag", + "cap", + "dedup", + ] + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split("\t"))) + if p["ckpoint"] == "-": + p["ckpoint"] = 0 + if p["expandsz"] == "-": + p["expandsz"] = 0 + p["frag"] = p["frag"].rstrip("%") + if p["frag"] == "-": + p["frag"] = 0 + p["cap"] = p["cap"].rstrip("%") + if p["cap"] == "-": + p["cap"] = 0 + p["dedup"] = p["dedup"].rstrip("x") + output["pools"].append(p) + + print(json.dumps(output)) + + return 0 + + +if __name__ == "__main__": + import sys + + sys.exit(main(sys.argv[1:])) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 638705c51..e1fb67126 100755 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -2,64 +2,70 @@ import json import subprocess + def proc_err(cmd, proc): # output process error and first line of error code return "{}{}".format( subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr), - " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "" + " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "", ) + def main(args): - LINUX = '/proc/spl/kstat/zfs/arcstats' - BSD1 = 'sysctl' - BSD2 = 'kstat.zfs.misc.arcstats' - ILLUMOS = 'kstat -n arcstats' + LINUX = "/proc/spl/kstat/zfs/arcstats" + BSD1 = "sysctl" + BSD2 = "kstat.zfs.misc.arcstats" + ILLUMOS = "kstat -n arcstats" COLUMN = 1 SPLIT = None res = {} try: - LINES = open(LINUX, 'r').readlines() + LINES = open(LINUX, "r").readlines() COLUMN = 2 except IOError as e1: try: - proc = subprocess.run([BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True) + proc = subprocess.run( + [BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True + ) LINES = proc.stdout.splitlines() - LINES = [x[len(BSD2)+1:] for x in LINES] - SPLIT = ':' + LINES = [x[len(BSD2) + 1 :] for x in LINES] + SPLIT = ":" except FileNotFoundError as e2: try: - proc = subprocess.run(ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True) + proc = subprocess.run( + ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True + ) LINES = proc.stdout.splitlines() except FileNotFoundError as e3: - print('Linux :', e1) - print('BSD :', e2) - print('Illumos:', e3) + print("Linux :", e1) + print("BSD :", e2) + print("Illumos:", e3) return 1 - + LINES = [x.strip() for x in LINES] - + STATS = {} for line in LINES[2:]: splitline = line.split(SPLIT) try: - STATS[splitline[0]] = int(splitline[COLUMN]) + STATS[splitline[0]] = int(splitline[COLUMN]) # Skip non int value like Illumos crtime, empty line at the end except: continue - + # ARC misc - DELETED = STATS['deleted'] - EVICT_SKIP = STATS['evict_skip'] - MUTEX_SKIP = STATS['mutex_miss'] - RECYCLE_MISS = STATS['recycle_miss'] if 'recycle_miss' in STATS else 0 + DELETED = STATS["deleted"] + EVICT_SKIP = STATS["evict_skip"] + MUTEX_SKIP = STATS["mutex_miss"] + RECYCLE_MISS = STATS["recycle_miss"] if "recycle_miss" in STATS else 0 # ARC size - ARC_SIZE = STATS['size'] - TARGET_SIZE_MAX = STATS['c_max'] - TARGET_SIZE_MIN = STATS['c_min'] - TARGET_SIZE = STATS['c'] + ARC_SIZE = STATS["size"] + TARGET_SIZE_MAX = STATS["c_max"] + TARGET_SIZE_MIN = STATS["c_min"] + TARGET_SIZE = STATS["c"] TARGET_SIZE_PERCENT = TARGET_SIZE / TARGET_SIZE_MAX * 100 ARC_SIZE_PERCENT = ARC_SIZE / TARGET_SIZE_MAX * 100 @@ -70,7 +76,7 @@ def main(args): MFU_SIZE = 0 RECENTLY_USED_PERCENT = 0 FREQUENTLY_USED_PERCENT = 0 - P = STATS['p'] + P = STATS["p"] if ARC_SIZE >= TARGET_SIZE: MFU_SIZE = ARC_SIZE - P @@ -81,22 +87,21 @@ def main(args): RECENTLY_USED_PERCENT = P / TARGET_SIZE * 100 FREQUENTLY_USED_PERCENT = MFU_SIZE / TARGET_SIZE * 100 - # ARC misc. efficient stats - ARC_HITS = STATS['hits'] - ARC_MISSES = STATS['misses'] - DEMAND_DATA_HITS = STATS['demand_data_hits'] - DEMAND_DATA_MISSES = STATS['demand_data_misses'] - DEMAND_METADATA_HITS = STATS['demand_metadata_hits'] - DEMAND_METADATA_MISSES = STATS['demand_metadata_misses'] - MFU_GHOST_HITS = STATS['mfu_ghost_hits'] - MFU_HITS = STATS['mfu_hits'] - MRU_GHOST_HITS = STATS['mru_ghost_hits'] - MRU_HITS = STATS['mru_hits'] - PREFETCH_DATA_HITS = STATS['prefetch_data_hits'] - PREFETCH_DATA_MISSES = STATS['prefetch_data_misses'] - PREFETCH_METADATA_HITS = STATS['prefetch_metadata_hits'] - PREFETCH_METADATA_MISSES = STATS['prefetch_metadata_misses'] + ARC_HITS = STATS["hits"] + ARC_MISSES = STATS["misses"] + DEMAND_DATA_HITS = STATS["demand_data_hits"] + DEMAND_DATA_MISSES = STATS["demand_data_misses"] + DEMAND_METADATA_HITS = STATS["demand_metadata_hits"] + DEMAND_METADATA_MISSES = STATS["demand_metadata_misses"] + MFU_GHOST_HITS = STATS["mfu_ghost_hits"] + MFU_HITS = STATS["mfu_hits"] + MRU_GHOST_HITS = STATS["mru_ghost_hits"] + MRU_HITS = STATS["mru_hits"] + PREFETCH_DATA_HITS = STATS["prefetch_data_hits"] + PREFETCH_DATA_MISSES = STATS["prefetch_data_misses"] + PREFETCH_METADATA_HITS = STATS["prefetch_metadata_hits"] + PREFETCH_METADATA_MISSES = STATS["prefetch_metadata_misses"] ANON_HITS = ARC_HITS - (MFU_HITS + MRU_HITS + MFU_GHOST_HITS + MRU_GHOST_HITS) ARC_ACCESSES_TOTAL = ARC_HITS + ARC_MISSES @@ -108,9 +113,15 @@ def main(args): CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100 CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100 ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100 - DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0 + DATA_DEMAND_PERCENT = ( + DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0 + ) - DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0 + DATA_PREFETCH_PERCENT = ( + PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 + if PREFETCH_DATA_TOTAL != 0 + else 0 + ) ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0 @@ -121,125 +132,157 @@ def main(args): DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 - METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 - PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + METADATA_HITS_PERCENT = ( + DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + ) + PREFETCH_METADATA_HITS_PERCENT = ( + DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + ) - DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 - PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 - METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 - PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + DEMAND_MISSES_PERCENT = ( + DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) + PREFETCH_MISSES_PERCENT = ( + PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) + METADATA_MISSES_PERCENT = ( + DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) + PREFETCH_METADATA_MISSES_PERCENT = ( + PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) # pools exact_size = True - zpool_cmd = ['/sbin/zpool'] - zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] - std = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'universal_newlines': True} + zpool_cmd = ["/sbin/zpool"] + zpool_cmd_list = zpool_cmd + ["list", "-p", "-H"] + std = { + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "universal_newlines": True, + } ## account for variations between ZoL zfs versions proc = subprocess.run(zpool_cmd_list, **std) - if (proc.returncode == 2): + if proc.returncode == 2: # -p option is not present in older versions # edit snmpd.conf zfs extend section to the following: # extend zfs /usr/bin/sudo /etc/snmp/zfs-linux # make sure to edit your sudo users (usually visudo) and add at the bottom: # snmp ALL=(ALL) NOPASSWD: /etc/snmp/zfs-linux - del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue + del zpool_cmd_list[ + zpool_cmd_list.index("-p") + ] # try removing -p to fix the issue proc = subprocess.run(zpool_cmd_list, **std) exact_size = False - if (proc.returncode != 0): + if proc.returncode != 0: return proc_err(zpool_cmd_list, proc) pools = [] - FIELDS = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup', 'health', 'altroot'] - if len(proc.stdout.splitlines()[0].split('\t')) == 10: - FIELDS.remove('ckpoint') - + FIELDS = [ + "name", + "size", + "alloc", + "free", + "ckpoint", + "expandsz", + "frag", + "cap", + "dedup", + "health", + "altroot", + ] + if len(proc.stdout.splitlines()[0].split("\t")) == 10: + FIELDS.remove("ckpoint") + for line in proc.stdout.splitlines(): - info = dict(zip(FIELDS, line.split('\t'))) + info = dict(zip(FIELDS, line.split("\t"))) - info['expandsz'] = 0 if info['expandsz'] == '-' else info['expandsz'] - info['frag'] = info['frag'].rstrip('%') - info['frag'] = 0 if info['frag'] == '-' else info['frag'] - info['dedup'] = info['dedup'].rstrip('x') - info['cap'] = info['cap'].rstrip('%') - if 'ckpoint' in info: - info['ckpoint'] = 0 if info['ckpoint'] == '-' else info['ckpoint'] + info["expandsz"] = 0 if info["expandsz"] == "-" else info["expandsz"] + info["frag"] = info["frag"].rstrip("%") + info["frag"] = 0 if info["frag"] == "-" else info["frag"] + info["dedup"] = info["dedup"].rstrip("x") + info["cap"] = info["cap"].rstrip("%") + if "ckpoint" in info: + info["ckpoint"] = 0 if info["ckpoint"] == "-" else info["ckpoint"] # zfs-06.5.11 fix if not exact_size: - zpool_cmd_get = zpool_cmd + ['get', '-pH', 'size,alloc,free', info['name']] + zpool_cmd_get = zpool_cmd + ["get", "-pH", "size,alloc,free", info["name"]] proc2 = subprocess.run(zpool_cmd_get, **std) - if (proc2.returncode != 0): + if proc2.returncode != 0: return proc_err(zpool_cmd_get, proc2) - info2 = dict([tuple(s.split('\t')[1:3]) for s in proc2.stdout.splitlines()]) - info['size'] = info2['size'] - info['alloc'] = info2['allocated'] - info['free'] = info2['free'] + info2 = dict([tuple(s.split("\t")[1:3]) for s in proc2.stdout.splitlines()]) + info["size"] = info2["size"] + info["alloc"] = info2["allocated"] + info["free"] = info2["free"] pools.append(info) res = { - 'deleted': DELETED, # ARC misc - 'evict_skip': EVICT_SKIP, - 'mutex_skip': MUTEX_SKIP, - 'recycle_miss': RECYCLE_MISS, - 'arc_size': ARC_SIZE, # ARC size - 'target_size_max': TARGET_SIZE_MAX, - 'target_size_min': TARGET_SIZE_MIN, - 'target_size': TARGET_SIZE, - 'target_size_per': TARGET_SIZE_PERCENT, - 'arc_size_per': ARC_SIZE_PERCENT, - 'target_size_arat': TARGET_SIZE_ADAPTIVE_RATIO, - 'min_size_per': MIN_SIZE_PERCENT, - 'mfu_size': MFU_SIZE, # ARC size breakdown - 'p': P, - 'rec_used_per': RECENTLY_USED_PERCENT, - 'freq_used_per': FREQUENTLY_USED_PERCENT, - 'arc_hits': ARC_HITS, # ARC efficiency - 'arc_misses': ARC_MISSES, - 'demand_data_hits': DEMAND_DATA_HITS, - 'demand_data_misses': DEMAND_DATA_MISSES, - 'demand_meta_hits': DEMAND_METADATA_HITS, - 'demand_meta_misses': DEMAND_METADATA_MISSES, - 'mfu_ghost_hits': MFU_GHOST_HITS, - 'mfu_hits': MFU_HITS, - 'mru_ghost_hits': MRU_GHOST_HITS, - 'mru_hits': MRU_HITS, - 'pre_data_hits': PREFETCH_DATA_HITS, - 'pre_data_misses': PREFETCH_DATA_MISSES, - 'pre_meta_hits': PREFETCH_METADATA_HITS, - 'pre_meta_misses': PREFETCH_METADATA_HITS, - 'anon_hits': ANON_HITS, - 'arc_accesses_total': ARC_ACCESSES_TOTAL, - 'demand_data_total': DEMAND_DATA_TOTAL, - 'pre_data_total': PREFETCH_DATA_TOTAL, - 'real_hits': REAL_HITS, - 'cache_hits_per': CACHE_HIT_PERCENT, # ARC efficiency percentages - 'cache_miss_per': CACHE_MISS_PERCENT, - 'actual_hit_per': ACTUAL_HIT_PERCENT, - 'data_demand_per': DATA_DEMAND_PERCENT, - 'data_pre_per': DATA_PREFETCH_PERCENT, - 'anon_hits_per': ANON_HITS_PERCENT, - 'mru_per': MRU_PERCENT, - 'mfu_per': MFU_PERCENT, - 'mru_ghost_per': MRU_GHOST_PERCENT, - 'mfu_ghost_per': MFU_GHOST_PERCENT, - 'demand_hits_per': DEMAND_HITS_PERCENT, - 'pre_hits_per': PREFETCH_HITS_PERCENT, - 'meta_hits_per': METADATA_HITS_PERCENT, - 'pre_meta_hits_per': PREFETCH_METADATA_HITS_PERCENT, - 'demand_misses_per': DEMAND_MISSES_PERCENT, - 'pre_misses_per': PREFETCH_MISSES_PERCENT, - 'meta_misses_per': METADATA_MISSES_PERCENT, - 'pre_meta_misses_per': PREFETCH_METADATA_MISSES_PERCENT, - 'pools': pools + "deleted": DELETED, # ARC misc + "evict_skip": EVICT_SKIP, + "mutex_skip": MUTEX_SKIP, + "recycle_miss": RECYCLE_MISS, + "arc_size": ARC_SIZE, # ARC size + "target_size_max": TARGET_SIZE_MAX, + "target_size_min": TARGET_SIZE_MIN, + "target_size": TARGET_SIZE, + "target_size_per": TARGET_SIZE_PERCENT, + "arc_size_per": ARC_SIZE_PERCENT, + "target_size_arat": TARGET_SIZE_ADAPTIVE_RATIO, + "min_size_per": MIN_SIZE_PERCENT, + "mfu_size": MFU_SIZE, # ARC size breakdown + "p": P, + "rec_used_per": RECENTLY_USED_PERCENT, + "freq_used_per": FREQUENTLY_USED_PERCENT, + "arc_hits": ARC_HITS, # ARC efficiency + "arc_misses": ARC_MISSES, + "demand_data_hits": DEMAND_DATA_HITS, + "demand_data_misses": DEMAND_DATA_MISSES, + "demand_meta_hits": DEMAND_METADATA_HITS, + "demand_meta_misses": DEMAND_METADATA_MISSES, + "mfu_ghost_hits": MFU_GHOST_HITS, + "mfu_hits": MFU_HITS, + "mru_ghost_hits": MRU_GHOST_HITS, + "mru_hits": MRU_HITS, + "pre_data_hits": PREFETCH_DATA_HITS, + "pre_data_misses": PREFETCH_DATA_MISSES, + "pre_meta_hits": PREFETCH_METADATA_HITS, + "pre_meta_misses": PREFETCH_METADATA_HITS, + "anon_hits": ANON_HITS, + "arc_accesses_total": ARC_ACCESSES_TOTAL, + "demand_data_total": DEMAND_DATA_TOTAL, + "pre_data_total": PREFETCH_DATA_TOTAL, + "real_hits": REAL_HITS, + "cache_hits_per": CACHE_HIT_PERCENT, # ARC efficiency percentages + "cache_miss_per": CACHE_MISS_PERCENT, + "actual_hit_per": ACTUAL_HIT_PERCENT, + "data_demand_per": DATA_DEMAND_PERCENT, + "data_pre_per": DATA_PREFETCH_PERCENT, + "anon_hits_per": ANON_HITS_PERCENT, + "mru_per": MRU_PERCENT, + "mfu_per": MFU_PERCENT, + "mru_ghost_per": MRU_GHOST_PERCENT, + "mfu_ghost_per": MFU_GHOST_PERCENT, + "demand_hits_per": DEMAND_HITS_PERCENT, + "pre_hits_per": PREFETCH_HITS_PERCENT, + "meta_hits_per": METADATA_HITS_PERCENT, + "pre_meta_hits_per": PREFETCH_METADATA_HITS_PERCENT, + "demand_misses_per": DEMAND_MISSES_PERCENT, + "pre_misses_per": PREFETCH_MISSES_PERCENT, + "meta_misses_per": METADATA_MISSES_PERCENT, + "pre_meta_misses_per": PREFETCH_METADATA_MISSES_PERCENT, + "pools": pools, } print(json.dumps(res)) return 0 -if __name__ == '__main__': + +if __name__ == "__main__": import sys + sys.exit(main(sys.argv[1:])) From 25fb357809a38138677972783b3956f2f9c4cdb0 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 18 Mar 2021 20:07:25 +0100 Subject: [PATCH 166/332] Lint more files (#356) --- agent-local/bind | 6 +-- agent-local/check_mrpe | 7 ++- agent-local/dmi | 2 +- agent-local/nfsstats | 2 +- agent-local/rocks.sh | 1 + snmp/Openwrt/wlClients.sh | 9 ++-- snmp/Openwrt/wlFrequency.sh | 5 +-- snmp/Openwrt/wlNoiseFloor.sh | 5 +-- snmp/Openwrt/wlRate.sh | 13 +++--- snmp/Openwrt/wlSNR.sh | 13 +++--- snmp/apache-stats.sh | 3 +- snmp/backupninja.py | 26 +++++------ snmp/chip.sh | 6 +-- snmp/distro | 68 ++++++++++++++--------------- snmp/freeradius.sh | 83 ++++++++++++++++++------------------ snmp/gpsd | 3 +- snmp/icecast-stats.sh | 2 +- snmp/mdadm | 5 ++- snmp/ntp-client | 3 +- snmp/ntp-server.sh | 10 +++-- snmp/phpfpmsp | 12 +++--- snmp/pi-hole | 4 +- snmp/powermon-snmp.py | 4 +- snmp/shoutcast.php | 1 + snmp/voipmon-stats.sh | 2 +- 25 files changed, 148 insertions(+), 147 deletions(-) diff --git a/agent-local/bind b/agent-local/bind index ed294e3e2..f30597c0d 100755 --- a/agent-local/bind +++ b/agent-local/bind @@ -4,18 +4,18 @@ # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . stats="/etc/bind/named.stats" echo "<<>>" -> $stats +true > $stats rndc stats && cat $stats diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index d21b6d9a1..ab6e809ab 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -14,7 +14,7 @@ EXITCODE=3 while getopts "Vha:H:p:" opt; do case $opt in - V) printf "check_mrpe v$VERSION\n" + V) printf 'check_mrpe v%s\n' "$VERSION" exit 0 ;; h) hflag=1 @@ -25,8 +25,7 @@ while getopts "Vha:H:p:" opt; do H) Hflag=1 Hval="$OPTARG" ;; - p) pflag=1 - pval="$OPTARG" + p) pval="$OPTARG" ;; \?) hflag=1 EXITCODE=0 @@ -80,4 +79,4 @@ for i in $($BIN_NC -w 1 "$Hval" "$pval" 2>&1 | $BIN_SED '/^<<>>/,/^<<>>' # requires dmidecode for FIELD in bios-vendor bios-version bios-release-date system-manufacturer system-product-name system-version system-serial-number system-uuid baseboard-manufacturer baseboard-product-name baseboard-version baseboard-serial-number baseboard-asset-tag chassis-manufacturer chassis-type chassis-version chassis-serial-number chassis-asset-tag processor-family processor-manufacturer processor-version processor-frequency do - echo $FIELD=$(dmidecode -s $FIELD | grep -v '^#') + echo $FIELD="$(dmidecode -s $FIELD | grep -v '^#')" done diff --git a/agent-local/nfsstats b/agent-local/nfsstats index 404e2fd06..a43afe786 100755 --- a/agent-local/nfsstats +++ b/agent-local/nfsstats @@ -21,7 +21,7 @@ LOG_NEW='/var/cache/librenms/nfsstats_new' $BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 $BIN_PASTE $LOG_NEW $LOG_OLD | while read a b ; do - echo $(($a - $b)) + echo $(($a - $b)) done $BIN_RM $LOG_OLD 2>&1 diff --git a/agent-local/rocks.sh b/agent-local/rocks.sh index 9ff1ec35e..7636a0274 100755 --- a/agent-local/rocks.sh +++ b/agent-local/rocks.sh @@ -7,6 +7,7 @@ # @author SvennD # required +# shellcheck disable=SC1091 source /etc/profile.d/sge-binaries.sh; QSTAT="/opt/gridengine/bin/linux-x64/qstat" diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index 5becad170..f454e592f 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlClients.sh # Counts connected (associated) Wi-Fi devices # Arguments: targed interface. Assumes all interfaces if no argument @@ -12,16 +12,13 @@ if [ $# -gt 1 ]; then fi # Get path to this script -scriptdir=$(dirname $(readlink -f -- "$0")) +scriptdir=$(dirname "$(readlink -f -- "$0")") -# Get hostname, interface list. Set target, which is name returned for interface -hostname=$(/bin/uname -n) +# Get interface list. Set target, which is name returned for interface if [ "$1" ]; then interfaces=$1 - target=$1 else interfaces=$(cat "$scriptdir"/wlInterfaces.txt | cut -f 1 -d",") - target=wlan fi # Count associated devices diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 83e68b1d1..658459ab5 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlFrequency.sh # Returns wlFrequency, in MHz (not channel number) # Arguments: targed interface @@ -11,8 +11,7 @@ if [ $# -ne 1 ]; then exit 1 fi -# Get hostname, extract frequency -hostname=$(/bin/uname -n) +# Extract frequency frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") # Return snmp result diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index 47d4b4ec2..a3880cf34 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlNoiseFloor.sh # Returns wlNoiseFloor, in dBm # Arguments: targed interface @@ -11,9 +11,8 @@ if [ $# -ne 1 ]; then exit 1 fi -# Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one +# Extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -hostname=$(/bin/uname -n) noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index 08b68b1bd..6b9072435 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlRate.sh # Returns wlRate, bit rate in Mbit/s # Arguments: @@ -14,17 +14,16 @@ if [ $# -ne 3 ]; then exit 1 fi -# Get hostname, calculate result. Sum just for debug, and have to return integer +# Calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -hostname=$(/bin/uname -n) ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") -if [ "$3" == "sum" ]; then +if [ "$3" = "sum" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') -elif [ "$3" == "avg" ]; then +elif [ "$3" = "avg" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}') -elif [ "$3" == "min" ]; then +elif [ "$3" = "min" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}') -elif [ "$3" == "max" ]; then +elif [ "$3" = "max" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}') fi diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index d19283d82..2378c1aac 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlSNR.sh # Returns wlSNR, Signal-to-Noise ratio in dB # Arguments: @@ -13,16 +13,15 @@ if [ $# -ne 2 ]; then exit 1 fi -# Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest) -hostname=$(/bin/uname -n) +# Calculate result. Sum just for debug, and return integer (safest / easiest) snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) -if [ "$2" == "sum" ]; then +if [ "$2" = "sum" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') -elif [ "$2" == "avg" ]; then +elif [ "$2" = "avg" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}') -elif [ "$2" == "min" ]; then +elif [ "$2" = "min" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}') -elif [ "$2" == "max" ]; then +elif [ "$2" = "max" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}') fi diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh index 9b677fd3a..be14a6917 100755 --- a/snmp/apache-stats.sh +++ b/snmp/apache-stats.sh @@ -12,7 +12,7 @@ PATH=/sbin:/bin:/usr/sbin:/usr/bin function debugecho() { if [ ${#Debug} -gt 0 ]; then - echo debug: $@ + echo debug: "$@" fi } @@ -23,6 +23,7 @@ function debugecho() { Tmp_File=/tmp/apache_status # Debug=on; use environment, i.e. Debug=on apache-stats.sh +# shellcheck disable=SC2153 if [ "${DEBUG}" != "" ]; then Debug=${DEBUG} else diff --git a/snmp/backupninja.py b/snmp/backupninja.py index 80cf55f7f..7ae1b46a9 100644 --- a/snmp/backupninja.py +++ b/snmp/backupninja.py @@ -20,20 +20,20 @@ if not os.path.isfile(logfile): error_string = "file unavailable" error = 1 - break +else: + with io.open(logfile, "r") as f: + for line in reversed(list(f)): + match = re.search( + "^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$", + line, + ) + if match: + backupninja_datas["last_actions"] = int(match.group(2)) + backupninja_datas["last_fatal"] = int(match.group(3)) + backupninja_datas["last_error"] = int(match.group(4)) + backupninja_datas["last_warning"] = int(match.group(5)) + break -with io.open(logfile, "r") as f: - for line in reversed(list(f)): - match = re.search( - "^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$", - line, - ) - if match: - backupninja_datas["last_actions"] = int(match.group(2)) - backupninja_datas["last_fatal"] = int(match.group(3)) - backupninja_datas["last_error"] = int(match.group(4)) - backupninja_datas["last_warning"] = int(match.group(5)) - break output = { "version": version, diff --git a/snmp/chip.sh b/snmp/chip.sh index 4dc2fac05..ff2cebbb4 100644 --- a/snmp/chip.sh +++ b/snmp/chip.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Based on https://github.com/Photonicsguy/CHIP +# Based on https://github.com/Photonicsguy/CHIP # Enable ADC registers i2cset -y -f 0 0x34 0x82 0xff @@ -67,7 +67,7 @@ if [ $STATUS_BATCON == 1 ]; then BAT_PERCENT=$(printf "%d" "$REG") else VBAT=0 - BATT_CUR=0 + #BATT_CUR=0 BAT_PERCENT=0 fi @@ -82,6 +82,6 @@ echo $ACIN_C echo $VBUS echo $VBUS_C echo $VBAT -echo $(echo "$BAT_C-$BAT_D"|bc) +echo "$(echo "$BAT_C-$BAT_D"|bc)" echo $BAT_PERCENT echo $STATUS_CHARGING diff --git a/snmp/distro b/snmp/distro index 69cd452a9..f481bbee6 100755 --- a/snmp/distro +++ b/snmp/distro @@ -1,24 +1,24 @@ #!/usr/bin/env sh # Detects which OS and if it is Linux then it will detect which Linux Distribution. -OS=`uname -s` -REV=`uname -r` -MACH=`uname -m` +OS=$(uname -s) +REV=$(uname -r) +#MACH=$(uname -m) if [ "${OS}" = "SunOS" ] ; then OS=Solaris - ARCH=`uname -p` - OSSTR="${OS} ${REV}(${ARCH} `uname -v`)" + ARCH=$(uname -p) + OSSTR="${OS} ${REV}(${ARCH} $(uname -v))" elif [ "${OS}" = "AIX" ] ; then - OSSTR="${OS} `oslevel` (`oslevel -r`)" + OSSTR="${OS} $(oslevel) ($(oslevel -r))" elif [ "${OS}" = "Linux" ] ; then - KERNEL=`uname -r` + #KERNEL=$(uname -r) if [ -f /etc/fedora-release ]; then DIST=$(cat /etc/fedora-release | awk '{print $1}') - REV=`cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//` + REV=$(cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') @@ -29,8 +29,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST="CloudLinux" elif [ "${DIST}" = "Mandriva" ]; then DIST="Mandriva" - PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` - REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` + #PSEUDONAME=$(cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/oracle-release ]; then DIST="Oracle" elif [ -f /etc/rockstor-release ]; then @@ -39,39 +39,39 @@ elif [ "${OS}" = "Linux" ] ; then DIST="RedHat" fi - PSEUDONAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//` - REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//` + #PSEUDONAME=$(cat /etc/redhat-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/mandrake-release ] ; then DIST='Mandrake' - PSEUDONAME=`cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//` - REV=`cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//` + #PSEUDONAME=$(cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/devuan_version ] ; then - DIST="Devuan `cat /etc/devuan_version`" + DIST="Devuan $(cat /etc/devuan_version)" REV="" elif [ -f /etc/debian_version ] ; then - DIST="Debian `cat /etc/debian_version`" + DIST="Debian $(cat /etc/debian_version)" REV="" IGNORE_OS_RELEASE=1 if [ -f /usr/bin/lsb_release ] ; then - ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + ID=$(lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g') fi if [ "${ID}" = "Raspbian" ] ; then - DIST="Raspbian `cat /etc/debian_version`" + DIST="Raspbian $(cat /etc/debian_version)" fi if [ -f /usr/bin/pveversion ]; then - DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" + DIST="${DIST}/PVE $(/usr/bin/pveversion | cut -d '/' -f 2)" fi if [ -f /usr/bin/pmgversion ]; then # pmgversion requires root permissions to run, please add NOPASSWD setting to visudo. - DIST="${DIST}/PMG `sudo /usr/bin/pmgversion | cut -d '/' -f 2`" + DIST="${DIST}/PMG $(sudo /usr/bin/pmgversion | cut -d '/' -f 2)" fi if [ -f /etc/dogtag ]; then - DIST=`cat /etc/dogtag` + DIST=$(cat /etc/dogtag) fi - + elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" REV=$(tr -d '[[:alpha:]]' /dev/null 2>&1 + REV=$(nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]') > /dev/null 2>&1 fi fi # try standardized os version methods - if [ -f /etc/os-release -a "${IGNORE_OS_RELEASE}" != 1 ] ; then + if [ -f /etc/os-release ] && [ "${IGNORE_OS_RELEASE}" != 1 ] ; then . /etc/os-release STD_DIST="$NAME" STD_REV="$VERSION_ID" - elif [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then + elif [ -f /etc/lsb-release ] && [ "${IGNORE_LSB}" != 1 ] ; then STD_DIST=$(lsb_release -si) STD_REV=$(lsb_release -sr) fi @@ -133,18 +133,18 @@ elif [ "${OS}" = "Linux" ] ; then elif [ "${OS}" = "Darwin" ] ; then if [ -f /usr/bin/sw_vers ] ; then - OSSTR=`/usr/bin/sw_vers|grep -v Build|sed 's/^.*:.//'| tr "\n" ' '` + OSSTR=$(/usr/bin/sw_vers|grep -v Build|sed 's/^.*:.//'| tr "\n" ' ') fi elif [ "${OS}" = "FreeBSD" ] ; then if [ -f /etc/version ] ; then DIST=$(cat /etc/version | cut -d'-' -f 1) if [ "${DIST}" = "FreeNAS" ]; then - OSSTR=`cat /etc/version | cut -d' ' -f 1` + OSSTR=$(cat /etc/version | cut -d' ' -f 1) fi else - OSSTR=`/usr/bin/uname -mior` + OSSTR=$(/usr/bin/uname -mior) fi fi -echo ${OSSTR} +echo "${OSSTR}" diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index 8a9423b38..6a0a29fb6 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -11,6 +11,7 @@ RADIUS_PORT='18121' RADIUS_KEY='adminsecret' if [ -f $CONFIGFILE ]; then + # shellcheck disable=SC1090 . $CONFIGFILE fi @@ -27,44 +28,44 @@ fi RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY) -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' diff --git a/snmp/gpsd b/snmp/gpsd index eed38c4bc..913f43d95 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -18,12 +18,13 @@ BIN_PYTHON='/usr/bin/env python' # Check for config file CONFIG=$0".conf" if [ -f "$CONFIG" ]; then + # shellcheck disable=SC1090 . "$CONFIG" fi # Create Temp File TMPFILE=$(mktemp) -trap "rm -f $TMPFILE" 0 2 3 15 +trap 'rm -f $TMPFILE' 0 2 3 15 # Write GPSPIPE Data to Temp File $BIN_GPIPE -w -n 20 > "$TMPFILE" diff --git a/snmp/icecast-stats.sh b/snmp/icecast-stats.sh index 541c174c8..e373f6f97 100644 --- a/snmp/icecast-stats.sh +++ b/snmp/icecast-stats.sh @@ -5,7 +5,7 @@ used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}') cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}') pid=$(pidof icecast) -total_files=$(ls -l /proc/"${pid}"/fd | wc -l) +total_files=$(find /proc/"${pid}"/fd | wc -l) echo "Used Memory=""$used_memory" echo "CPU Load=""$cpu_load" diff --git a/snmp/mdadm b/snmp/mdadm index 8565f8d69..5e820c808 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -7,7 +7,8 @@ REALPATH=/usr/bin/realpath CONFIGFILE=/etc/snmp/mdadm.conf if [ -f $CONFIGFILE ] ; then - . $CONFIGFILE + # shellcheck disable=SC1090 + . $CONFIGFILE fi VERSION=1 @@ -19,7 +20,7 @@ OUTPUT_DATA='[' # use 'ls' command to check if md blocks exist if $LS /dev/md?* 1> /dev/null 2>&1 ; then for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do - RAID="/sys/block/"$($BASENAME $($REALPATH "$ARRAY_BLOCKDEVICE")) + RAID="/sys/block/"$($BASENAME "$($REALPATH "$ARRAY_BLOCKDEVICE")") # ignore arrays with no slaves if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then diff --git a/snmp/ntp-client b/snmp/ntp-client index 0df9ee07b..eccb5e50c 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -13,13 +13,12 @@ # Don't change anything unless you know what are you doing # ################################################################ BIN_NTPQ='/usr/bin/env ntpq' -BIN_NTPD='/usr/bin/env ntpd' BIN_GREP='/usr/bin/env grep' BIN_AWK='/usr/bin/env awk' -BIN_HEAD='/usr/bin/env head' CONFIG=$0".conf" if [ -f "$CONFIG" ]; then + # shellcheck disable=SC1090 . "$CONFIG" fi diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 6fa2f6908..ba1af1593 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -3,7 +3,7 @@ # Alternatively you can put them in $0.conf, meaning if you've named # this script ntp-client.sh then it must go in ntp-client.sh.conf . # -# NTPQV output version of "ntpq -c rv" +# NTPQV output version of "ntpq -c rv" # p1 DD-WRT and some other outdated linux distros # p11 FreeBSD 11 and any linux distro that is up to date # @@ -16,7 +16,8 @@ CONFIGFILE=/etc/snmp/ntp-server.conf BIN_ENV='/usr/bin/env' if [ -f $CONFIGFILE ] ; then - . $CONFIGFILE + # shellcheck disable=SC1090 + . $CONFIGFILE fi BIN_NTPD="$BIN_ENV ntpd" @@ -34,7 +35,8 @@ NTPQV="p11" ################################################################ CONFIG=$0".conf" if [ -f "$CONFIG" ]; then - . "$CONFIG" + # shellcheck disable=SC1090 + . "$CONFIG" fi VERSION=1 @@ -75,7 +77,7 @@ IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}') RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}') PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}') PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}') -INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') +#INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}') echo '{"data":{"offset":"'"$OFFSET"\ diff --git a/snmp/phpfpmsp b/snmp/phpfpmsp index a4d7a4339..6fbf22c57 100644 --- a/snmp/phpfpmsp +++ b/snmp/phpfpmsp @@ -52,13 +52,13 @@ opts="" # Contributed by @safeie with PR #276 # Modified to work as a SNMP extend by Zane C. Bowers-Hadley -declare -A phpfpm_urls=() -declare -A phpfpm_curl_opts=() +#declare -A phpfpm_urls=() +#declare -A phpfpm_curl_opts=() # _update_every is a special variable - it holds the number of seconds # between the calls of the _update() function -phpfpm_update_every= -phpfpm_priority=60000 +#phpfpm_update_every= +#phpfpm_priority=60000 declare -a phpfpm_response=() phpfpm_pool="" @@ -78,8 +78,8 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - phpfpm_response=($(curl -Ss "${opts}" "${url}")) - [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 + phpfpm_response=("$(curl -Ss "${opts}" "${url}")") + [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ] && exit 1 if [[ "${phpfpm_response[0]}" != "pool:" \ || "${phpfpm_response[2]}" != "process" \ diff --git a/snmp/pi-hole b/snmp/pi-hole index 342ef105b..2a7682f9f 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -12,11 +12,13 @@ PICONFIGFILE='/etc/pihole/setupVars.conf' DHCPLEASEFILE='/etc/pihole/dhcp.leases' if [ -f $CONFIGFILE ]; then + # shellcheck disable=SC1090 . $CONFIGFILE fi # read in pi-hole variables for DHCP range if [ -f $PICONFIGFILE ]; then + # shellcheck disable=SC1090 . $PICONFIGFILE fi @@ -74,7 +76,7 @@ debug() { fi if [ -f $PICONFIGFILE ]; then echo '[ok] Pi-Hole config file exists, DHCP stats will be captured if scope active' - else + else echo '[error] Pi-Hole config file does not exist, DHCP stats will not be captured if used' fi if [ -f $DHCPLEASEFILE ]; then diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py index e280fe710..d9f179c1b 100755 --- a/snmp/powermon-snmp.py +++ b/snmp/powermon-snmp.py @@ -98,7 +98,7 @@ + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" ) methods = ["sensors", "hpasmcli"] -# costPerkWh = 0.15 # <<<< UNCOMMENT +# costPerkWh = 0.15 # <<<< CHANGE ### General functions @@ -352,7 +352,7 @@ def getHPASMData(): # Get data data = getData(method) data["supply"] = {} -data["supply"]["rate"] = costPerkWh +data["supply"]["rate"] = costPerkWh # pylint: disable=E0602 # Top-level reading # CUSTOMISE THIS FOR YOUR HOST diff --git a/snmp/shoutcast.php b/snmp/shoutcast.php index 637d3abe4..05ee52d47 100755 --- a/snmp/shoutcast.php +++ b/snmp/shoutcast.php @@ -39,6 +39,7 @@ function get_data($host, $port) { + $rawdata = null; $fp = @fsockopen($host, $port, $errno, $errstr, 5); if (!$fp) { $connect = 0; diff --git a/snmp/voipmon-stats.sh b/snmp/voipmon-stats.sh index 1dcab4d9f..66f943941 100644 --- a/snmp/voipmon-stats.sh +++ b/snmp/voipmon-stats.sh @@ -5,7 +5,7 @@ used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}') cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}') pid=$(pidof voipmonitor) -total_files=$(ls -l /proc/"${pid}"/fd | wc -l) +total_files=$(find /proc/"${pid}"/fd | wc -l) echo "Used Memory=""$used_memory" echo "CPU Load=""$cpu_load" From 8dec782b8b205044b401a78967292d0d6abb3329 Mon Sep 17 00:00:00 2001 From: Serphentas Date: Thu, 18 Mar 2021 20:11:19 +0100 Subject: [PATCH 167/332] Add chrony support (#345) * initial chronyc json sampler * make use of standard data format * fix last_rx json key --- snmp/chrony | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 snmp/chrony diff --git a/snmp/chrony b/snmp/chrony new file mode 100644 index 000000000..08cca7139 --- /dev/null +++ b/snmp/chrony @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +import json +import shlex +import subprocess + +VERSION = 1 + +def proc_err(cmd, proc): + # output process error and first line of error code + return "{}{}".format( + subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr), + " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "" + ) + +def print_data(data, error, error_msg): + print(json.dumps({ + 'data': data, + 'error': error, + 'errorString': error_msg, + 'version': VERSION + })) + +def main(args): + CSV_HEADERS = { + 'tracking': [ + 'reference_name', + 'reference_type', + 'stratum', + 'reference_time', + 'system_time', + 'last_offset', + 'rms_offset', + 'frequency', + 'residual_frequency', + 'skew', + 'root_delay', + 'root_dispersion', + 'update_interval', + 'leap_status', + ], + 'sources': [ + 'source_mode', + 'source_state', + 'source_name', + 'stratum', + 'polling_rate', + 'reachability', + 'last_rx', + 'adjusted_offset', + 'measured_offset', + 'estimated_error' + ], + 'sourcestats': [ + 'source_name', + 'number_samplepoints', + 'number_runs', + 'span', + 'frequency', + 'frequency_skew', + 'offset', + 'stddev', + ] + } + DATA = { + 'tracking': {}, + 'sources': [] + } + ERROR = False + ERROR_MSG = '' + + # get and set tracking data + rc, tracking = subprocess.getstatusoutput('chronyc -c tracking') + if rc != 0: + print_data(DATA, rc, tracking) + return 1 + tracking = tracking.split(',') + for i in range(0, len(CSV_HEADERS['tracking'])): + DATA['tracking'][CSV_HEADERS['tracking'][i]] = tracking[i] + + # get sources + sourcestats data + rc, sources = subprocess.getstatusoutput('chronyc -c sources') + if rc != 0: + print_data(DATA, rc, sources) + return 1 + sources = sources.split('\n') + rc, sourcestats = subprocess.getstatusoutput('chronyc -c sourcestats') + if rc != 0: + print_data(DATA, rc, sourcestats) + return 1 + sourcestats = sourcestats.split('\n') + + # mix sources and sourcestats + for i in range(0, len(sources)): + source = sources[i].split(',') + stats = sourcestats[i].split(',') + data = {} + + for j in range(0, len(CSV_HEADERS['sources'])): + data[CSV_HEADERS['sources'][j]] = source[j] + for j in range(0, len(CSV_HEADERS['sourcestats'])): + data[CSV_HEADERS['sourcestats'][j]] = stats[j] + + DATA['sources'].append(data) + + print_data(DATA, ERROR, ERROR_MSG) + + return 0 + +if __name__ == '__main__': + import sys + sys.exit(main(sys.argv[1:])) From 87b7cc516c5e5fa21a6226c6e7bc4fd910924bde Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 18 Mar 2021 20:12:42 +0100 Subject: [PATCH 168/332] Add linter (#357) --- .github/workflows/linter.yml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 .github/workflows/linter.yml diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 000000000..5c2015aef --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,34 @@ +name: Lint Code Base + +on: + push: + pull_request: + branches: [master] + +jobs: + build: + name: Lint Code Base + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + - name: Lint Code Base + uses: github/super-linter@v3.15.3 + env: + FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* + SUPPRESS_POSSUM: true + + VALIDATE_BASH_EXEC: false + VALIDATE_PYTHON_FLAKE8: false + VALIDATE_PHP_PHPCS: false + VALIDATE_PHP_PSALM: false + + SHELLCHECK_OPTS: --severity=warning + + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From c382982481e5c18a9df367183ac9ede1bec9f463 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Fri, 19 Mar 2021 00:27:48 +0100 Subject: [PATCH 169/332] Update linter.yml --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 5c2015aef..0a776871b 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -20,7 +20,7 @@ jobs: - name: Lint Code Base uses: github/super-linter@v3.15.3 env: - FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* + FILTER_REGEX_EXCLUDE: check_mk_agent.* SUPPRESS_POSSUM: true VALIDATE_BASH_EXEC: false From 2bd9693c5e30b83101b9d5e84b09387f95700d64 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Mon, 22 Mar 2021 12:59:04 +0100 Subject: [PATCH 170/332] Fix CI --- .github/workflows/linter.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 0a776871b..1e65ec1a1 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -20,7 +20,8 @@ jobs: - name: Lint Code Base uses: github/super-linter@v3.15.3 env: - FILTER_REGEX_EXCLUDE: check_mk_agent.* + FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* + VALIDATE_ALL_CODEBASE: false SUPPRESS_POSSUM: true VALIDATE_BASH_EXEC: false From 2e8c6b07d6bc516a970db09c6de17ea5a3c8d40d Mon Sep 17 00:00:00 2001 From: priiduonu Date: Mon, 22 Mar 2021 15:09:20 +0200 Subject: [PATCH 171/332] Update ups-apcups.sh (#361) Filter out `LINEV` value as some APC models also return `MAXLINEV` and `MINLINEV` values in `apcaccess` output, therefore ruining the final output. --- snmp/ups-apcups.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/ups-apcups.sh b/snmp/ups-apcups.sh index 64b55c30f..378f2d6f3 100755 --- a/snmp/ups-apcups.sh +++ b/snmp/ups-apcups.sh @@ -19,7 +19,7 @@ BIN_GREP='/usr/bin/grep' ################################################################ TMP=$($BIN_APCS 2>/dev/null) -for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" +for value in "^LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" do OUT=$(echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo "$value" | $BIN_CUT -d ":" -f 2) if [ -n "$OUT" ]; then @@ -27,4 +27,4 @@ do else echo "Unknown" fi -done \ No newline at end of file +done From 67d73c7b9919a9dcacac5f772cf68673122d3b2d Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Mon, 22 Mar 2021 08:10:13 -0500 Subject: [PATCH 172/332] Allow configuring the number of lines read (#358) --- snmp/gpsd | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/gpsd b/snmp/gpsd index 913f43d95..8844bc0a4 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -14,6 +14,7 @@ BIN_GPIPE='/usr/bin/env gpspipe' BIN_GREP='/usr/bin/env grep' BIN_PYTHON='/usr/bin/env python' +LINES=20 # Check for config file CONFIG=$0".conf" @@ -27,7 +28,7 @@ TMPFILE=$(mktemp) trap 'rm -f $TMPFILE' 0 2 3 15 # Write GPSPIPE Data to Temp File -$BIN_GPIPE -w -n 20 > "$TMPFILE" +$BIN_GPIPE -w -n $LINES > "$TMPFILE" # Parse Temp file for GPSD Data VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]') From 4e09377661022f66045c488cdfa704bd02b73394 Mon Sep 17 00:00:00 2001 From: Wheel Date: Thu, 25 Mar 2021 21:14:43 -0400 Subject: [PATCH 173/332] Minor script reverts (osupdate, phpfpm) (#362) * Revert changes made in #355 that broke the script. * Revert partial changes made in #356 By just removing the quotes in line 81 i could make the script work again * osupdate yum revert Realized i had a centos to test * Fix curl error `curl: (3) URL using bad/illegal format or missing URL` Not sure how to properly fix it but moving the hardcode flags to the variable so its not empty did fix it. The curl error caused to push down all values 1 line which made them mismatch in librenms. * Update osupdate * Update phpfpmsp Co-authored-by: Jellyfrog --- snmp/osupdate | 18 ++++++++++++------ snmp/phpfpmsp | 6 +++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index 1f4f94852..11a6d9a9b 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -34,7 +34,8 @@ CMD_APK=' version' ################################################################ if command -v zypper &>/dev/null ; then # OpenSUSE - UPDATES=$($BIN_ZYPPER "$CMD_ZYPPER" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-2)); else @@ -42,7 +43,8 @@ if command -v zypper &>/dev/null ; then fi elif command -v dnf &>/dev/null ; then # Fedora - UPDATES=$($BIN_DNF "$CMD_DNF" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else @@ -58,7 +60,8 @@ elif command -v pacman &>/dev/null ; then fi elif command -v yum &>/dev/null ; then # CentOS / Redhat - UPDATES=$($BIN_YUM "$CMD_YUM" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else @@ -66,7 +69,8 @@ elif command -v yum &>/dev/null ; then fi elif command -v apt-get &>/dev/null ; then # Debian / Devuan / Ubuntu - UPDATES=$($BIN_APT "$CMD_APT" | $BIN_GREP $CMD_GREP 'Inst') + # shellcheck disable=SC2086 + UPDATES=$($BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst') if [ "$UPDATES" -ge 1 ]; then echo "$UPDATES"; else @@ -74,7 +78,8 @@ elif command -v apt-get &>/dev/null ; then fi elif command -v pkg &>/dev/null ; then # FreeBSD - UPDATES=$($BIN_PKG "$CMD_PKG" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 1 ]; then echo "$UPDATES"; else @@ -82,7 +87,8 @@ elif command -v pkg &>/dev/null ; then fi elif command -v apk &>/dev/null ; then # Alpine - UPDATES=$($BIN_APK "$CMD_APK" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_APK $CMD_APK | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-1)); else diff --git a/snmp/phpfpmsp b/snmp/phpfpmsp index 6fbf22c57..481dc5d2d 100644 --- a/snmp/phpfpmsp +++ b/snmp/phpfpmsp @@ -42,7 +42,7 @@ # the URL to fetch, change as needed url="http://localhost/status?full" -opts="" +opts="-Ss" # netdata # real-time performance and health monitoring, done right! @@ -77,8 +77,8 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - - phpfpm_response=("$(curl -Ss "${opts}" "${url}")") + # shellcheck disable=SC2207 + phpfpm_response=($(curl "${opts}" "${url}")) [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ] && exit 1 if [[ "${phpfpm_response[0]}" != "pool:" \ From 19da9501db559467a069d65d700db116f1bca0c5 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Fri, 26 Mar 2021 11:02:01 +0100 Subject: [PATCH 174/332] Fix regressions after linting (#363) --- snmp/distro | 2 +- snmp/ntp-server.sh | 65 ++++++++++++++++++++++++++++++---------------- snmp/sdfsinfo | 3 ++- 3 files changed, 46 insertions(+), 24 deletions(-) diff --git a/snmp/distro b/snmp/distro index f481bbee6..56ae5e940 100755 --- a/snmp/distro +++ b/snmp/distro @@ -56,7 +56,7 @@ elif [ "${OS}" = "Linux" ] ; then REV="" IGNORE_OS_RELEASE=1 if [ -f /usr/bin/lsb_release ] ; then - ID=$(lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g') + ID=$(lsb_release -i | awk -F ':' '{print $2}' | sed 's/\s//g') fi if [ "${ID}" = "Raspbian" ] ; then DIST="Raspbian $(cat /etc/debian_version)" diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index ba1af1593..30c722041 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -45,18 +45,28 @@ STRATUM=$($BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f # parse the ntpq info that requires version specific info NTPQ_RAW=$($BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g') if [ $NTPQV = "p11" ]; then - OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') - FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') - SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') - CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') - CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $7}') + # shellcheck disable=SC2086 + OFFSET=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}') + # shellcheck disable=SC2086 + FREQUENCY=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}') + # shellcheck disable=SC2086 + SYS_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}') + # shellcheck disable=SC2086 + CLK_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') + # shellcheck disable=SC2086 + CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}') fi if [ $NTPQV = "p1" ]; then - OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $2}') - FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') - SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') - CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') - CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') + # shellcheck disable=SC2086 + OFFSET=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}') + # shellcheck disable=SC2086 + FREQUENCY=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}') + # shellcheck disable=SC2086 + SYS_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}') + # shellcheck disable=SC2086 + CLK_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}') + # shellcheck disable=SC2086 + CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') fi VER=$($BIN_NTPD --version) @@ -67,18 +77,29 @@ else fi CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') -TIMESINCERESET=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $1}') -RECEIVEDBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $2}') -FREERECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $3}') -USEDRECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $4}') -LOWWATERREFILLS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $5}') -DROPPEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $6}') -IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}') -RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}') -PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}') -PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}') -#INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') -USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}') +# shellcheck disable=SC2086 +TIMESINCERESET=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $1}') +# shellcheck disable=SC2086 +RECEIVEDBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $2}') +# shellcheck disable=SC2086 +FREERECEIVEBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $3}') +# shellcheck disable=SC2086 +USEDRECEIVEBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $4}') +# shellcheck disable=SC2086 +LOWWATERREFILLS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $5}') +# shellcheck disable=SC2086 +DROPPEDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $6}') +# shellcheck disable=SC2086 +IGNOREDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $7}') +# shellcheck disable=SC2086 +RECEIVEDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $8}') +# shellcheck disable=SC2086 +PACKETSSENT=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $9}') +# shellcheck disable=SC2086 +PACKETSENDFAILURES=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $10}') +#INPUTWAKEUPS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $11}') +# shellcheck disable=SC2086 +USEFULINPUTWAKEUPS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $12}') echo '{"data":{"offset":"'"$OFFSET"\ '","frequency":"'"$FREQUENCY"\ diff --git a/snmp/sdfsinfo b/snmp/sdfsinfo index 6c83d241d..f65a4a631 100644 --- a/snmp/sdfsinfo +++ b/snmp/sdfsinfo @@ -22,5 +22,6 @@ SDFSCLI_BIN=$(which sdfscli) SDFSCLI_CMD=' --volume-info' GREP_BIN=$(which grep) GREP_CMD=' -o -E ' -DATAPOINTS=$($SDFSCLI_BIN "$SDFSCLI_CMD" | $GREP_BIN "$GREP_CMD" "(([0-9]+)\.?([0-9]+)?)") +# shellcheck disable=SC2086 +DATAPOINTS=$($SDFSCLI_BIN $SDFSCLI_CMD | $GREP_BIN $GREP_CMD "(([0-9]+)\.?([0-9]+)?)") echo "$DATAPOINTS" From b0983980b6d34215d917fc96f9fffd6ff91c557a Mon Sep 17 00:00:00 2001 From: Denny Friebe Date: Tue, 20 Apr 2021 23:32:22 +0200 Subject: [PATCH 175/332] Raspberry Pi: Add SNMP extend to monitor IO pins or sensor modules connected to the GPIO header (#364) * Raspberry Pi: Add SNMP extend to monitor IO pins or sensor modules connected to the GPIO header * Raspberry Pi: Add missing sensor types --- snmp/rpigpiomonitor.ini | 92 +++++++++++++ snmp/rpigpiomonitor.php | 290 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 382 insertions(+) create mode 100644 snmp/rpigpiomonitor.ini create mode 100755 snmp/rpigpiomonitor.php diff --git a/snmp/rpigpiomonitor.ini b/snmp/rpigpiomonitor.ini new file mode 100644 index 000000000..777ee5415 --- /dev/null +++ b/snmp/rpigpiomonitor.ini @@ -0,0 +1,92 @@ +; Note +; +; If a configured sensor does not appear in librenms, this may be due to a faulty configuration. +; After the configuration has been changed, it can be checked for possible errors using "rpigpiomonitor.php -validate". +; +; Any change to your configuration requires a manual rediscover of your device. Otherwise, your changes will only be visible once librenms performs an automatic rediscover. + +; Sensor section +; +; Multiple use of the identical name can lead to overwriting of the state configuration of the first sensor with the same name when using states. +; This should therefore only be used once per configuration if possible. When using multiple Raspberry's, this can be used once per device because the serial number of the Raspberry is added to each sensor. +[sensorexample1] + +; Sensor type +; +; This defines the sensor type. +; When reading a normal IO contact, the use of "state" is recommended. +; The following types are possible: +; airflow, ber, charge, chromatic_dispersion, cooling, count, current, dbm, delay, eer, fanspeed +; frequency, humidity, load, loss, power, power_consumed, power_factor, pressure, quality_factor +; runtime, signal, snr, state, temperature, tv_signal, voltage, waterflow, percent +type = state + +; Sensor description +; +; This defines the sensor description which can be seen later in the respective device overview or in the graph. +description = Cabinet door + +; Sensor limits +; +; This defines the limits from when the respective alarm should be triggered. +; Only numeric values are valid! +; If a certain alarm or even all of them should not be used, these values can be omitted or commented out. +;lowlimit = 0 +;lowwarnlimit = 5 +;warnlimit = 30 +;highlimit = 35 + +; Sensor states +; +; This defines the sensor states to be used. If no sensor state is desired it can be omitted or commented out. +; A state sensor is configured in the following format: +; states.state_description.state_variable = state_value +; state_description should be replaced with the desired sensor state description which is displayed in librenms. +; state_variable should be replaced with the variable to be configured. The 2 variables "value" and "generic" must be configured for each state. +; The variable "value" specifies the value when the respective state is to be displayed. +; The variable "generic" can be used to define the background color of the respective state. 0 = green, 1 = orange, 2 = red. +; In the following example, the "Cabinet door" sensor will display "Open" in red for value 1 and "Closed" in green for value 0: +states.Open.value = 1 +states.Open.generic = 2 +states.Closed.value = 0 +states.Closed.generic = 0 + +; GPIO readout of an IO contact +; +; This defines the respective GPIO PIN which is to be read out. +; With a pullup or pulldown circuit the state of a door contact can be determined. +; It is important that the pin numbering scheme of WiringPi is used! +io_gpio_pin = 21 + +; GPIO readout by external program +; +; This defines the external program to be used to read out the respective GPIO values. +; Hereby it is possible to read sensor modules like temperature sensors, air pressure sensors, humidity sensors etc.. +; The program to be used must output a pure numerical value. If this is not the case, the value can be extracted from the output of the program using a helper script, for example. +;external_gpio_reader = /etc/snmp/tempreader.sh + + + +; +; A few more example configurations +; + +; Example configuration which reads an IO contact (pin 25) to determine whether the light of a technical cabinet is switched on or off. +[sensorexample2] +type = state +description = Cabinet lighting +states.Switched on.value = 1 +states.Switched on.generic = 2 +states.Switched off.value = 0 +states.Switched off.generic = 0 +io_gpio_pin = 25 + +; Example configuration which reads a temperature sensor +;[sensorexample3] +;type = temperature +;description = Cabinet temperature +;lowlimit = 0 +;lowwarnlimit = 5 +;warnlimit = 35 +;highlimit = 40 +;external_gpio_reader = /etc/snmp/tempreader.sh diff --git a/snmp/rpigpiomonitor.php b/snmp/rpigpiomonitor.php new file mode 100755 index 000000000..b6614e182 --- /dev/null +++ b/snmp/rpigpiomonitor.php @@ -0,0 +1,290 @@ +#!/usr/bin/env php +. + * + * @link https://librenms.org + * @copyright 2021 Denny Friebe + * @author Denny Friebe + */ + +function parseConfigFile($file, $process_sections = false, $scanner_mode = INI_SCANNER_NORMAL) { + $explode_str = '.'; + $escape_char = "'"; + + // load ini file the normal way + $data = parse_ini_file($file, $process_sections, $scanner_mode); + + if (!$process_sections) { + $data = array($data); + } + + foreach ($data as $section_key => $section) { + // loop inside the section + foreach ($section as $key => $value) { + if (strpos($key, $explode_str)) { + if (substr($key, 0, 1) !== $escape_char) { + // key has a dot. Explode on it, then parse each subkeys + // and set value at the right place thanks to references + $sub_keys = explode($explode_str, $key); + $subs =& $data[$section_key]; + foreach ($sub_keys as $sub_key) { + if (!isset($subs[$sub_key])) { + $subs[$sub_key] = []; + } + $subs =& $subs[$sub_key]; + } + // set the value at the right place + $subs = $value; + // unset the dotted key, we don't need it anymore + unset($data[$section_key][$key]); + } + // we have escaped the key, so we keep dots as they are + else { + $new_key = trim($key, $escape_char); + $data[$section_key][$new_key] = $value; + unset($data[$section_key][$key]); + } + } + } + } + if (!$process_sections) { + $data = $data[0]; + } + return $data; +} + +function validate_sensor_type($type) { + switch ($type) { + case "airflow": + case "ber": + case "charge": + case "chromatic_dispersion": + case "cooling": + case "count": + case "current": + case "dbm": + case "delay": + case "eer": + case "fanspeed": + case "frequency": + case "humidity": + case "load": + case "loss": + case "power": + case "power_consumed": + case "power_factor": + case "pressure": + case "quality_factor": + case "runtime": + case "signal": + case "snr": + case "state": + case "temperature": + case "tv_signal": + case "voltage": + case "waterflow": + case "percent": + return true; + default: + return false; + } +} + +function validate_sensor_states($states) { + if (is_array($states)) { + foreach($states as $state_index => $state) { + if (!isset($state["value"]) || !isset($state["generic"])) { + continue; + } + + if (!is_numeric($state["value"]) || !is_numeric($state["generic"])) { + return false; + } + } + return true; + } + return false; +} + +function validate_sensor_limit($limit) { + if (isset($limit) && is_numeric($limit)) { + return true; + } + return false; +} + +function get_rpi_serial() { + if (file_exists("/proc/device-tree/serial-number")) { + $rpi_serial = file_get_contents("/proc/device-tree/serial-number"); + //During the readout of serial-number additional characters are passed. (at this point I am not sure why) + //To prevent these characters from being output and messing up the whole snmp string we only cut out the needed characters. + $rpi_serial = substr($rpi_serial, 0, 16); + return $rpi_serial; + } + return; +} + +function get_sensor_current_value($sensor_data) { + if (isset($sensor_data["io_gpio_pin"])) { + $sensor_current_value = exec("gpio read " .$sensor_data["io_gpio_pin"]. " 2>&1", $tt, $retcode); + } else { + $sensor_current_value = exec($sensor_data["external_gpio_reader"]. " 2>&1", $tt, $retcode); + } + + if (is_numeric($sensor_current_value)) { + return $sensor_current_value; + } + + return; +} + +function validate_config($config, $rpi_serial) { + if(!$rpi_serial) { + echo "The serial number of your raspberry pi could not be read. Please check if you are using a DT enabled kernel and the file /proc/device-tree/serial-number is present. \n"; + echo "The serial number is required for creating a state sensor so that no sensor with the same name from another RPI overwrites it. \n"; + } + + foreach($config as $sensor_name => $sensor_data) { + $valid = false; + $gpio_reader_valid = true; + + if (!isset($sensor_data["type"]) || validate_sensor_type($sensor_data["type"]) == false) { + echo "No valid type is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["states"]) && validate_sensor_states($sensor_data["states"]) == false) { + echo "No valid states is configured for sensor ".$sensor_name."! \n"; + } + + if (!$sensor_data["description"]) { + echo "No valid description is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["lowlimit"]) && validate_sensor_limit($sensor_data["lowlimit"]) == false) { + echo "No valid lowlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["lowwarnlimit"]) && validate_sensor_limit($sensor_data["lowwarnlimit"]) == false) { + echo "No valid lowwarnlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["warnlimit"]) && validate_sensor_limit($sensor_data["warnlimit"]) == false) { + echo "No valid warnlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["highlimit"]) && validate_sensor_limit($sensor_data["highlimit"]) == false) { + echo "No valid highlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (!isset($sensor_data["io_gpio_pin"]) && !isset($sensor_data["external_gpio_reader"])) { + echo "No IO GPIO pin or external GPIO readout program is configured for sensor ".$sensor_name."! \n"; + $gpio_reader_valid = false; + } + + if (isset($sensor_data["external_gpio_reader"]) && !file_exists($sensor_data["external_gpio_reader"])) { + echo "The external GPIO program for sensor ".$sensor_name." could not be found! Please check if the specified path is correct and the file exists. \n"; + $gpio_reader_valid = false; + } + + if ($gpio_reader_valid) { + $sensor_current_value = get_sensor_current_value($sensor_data); + if (isset($sensor_current_value)) { + echo "Current sensor value for ".$sensor_name.": " . $sensor_current_value . "\n"; + $valid = true; + } else { + echo "The current sensor value for ".$sensor_name." does not seem to be numeric! \n"; + if (isset($sensor_data["io_gpio_pin"])) { + echo "Please check if wiringpi is installed on this device! \n"; + } else { + echo "Please check if the external GPIO program outputs pure numeric values and if the required access rights are available to execute this program. \n"; + } + } + } + + if ($valid) { + echo "The sensor ".$sensor_name." are configured correctly. \n\n"; + } else { + echo "Please check your configuration for sensor ".$sensor_name.". \n\n"; + } + } +} + +function read_sensors($config, $rpi_serial) { + if ($rpi_serial) { + foreach($config as $sensor_name => $sensor_data) { + if ((!isset($sensor_data["type"]) || validate_sensor_type($sensor_data["type"]) == false) + || (isset($sensor_data["states"]) && validate_sensor_states($sensor_data["states"]) == false) + || !$sensor_data["description"] + || (isset($sensor_data["lowlimit"]) && validate_sensor_limit($sensor_data["lowlimit"]) == false) + || (isset($sensor_data["lowwarnlimit"]) && validate_sensor_limit($sensor_data["lowwarnlimit"]) == false) + || (isset($sensor_data["warnlimit"]) && validate_sensor_limit($sensor_data["warnlimit"]) == false) + || (isset($sensor_data["highlimit"]) && validate_sensor_limit($sensor_data["highlimit"]) == false) + || (!isset($sensor_data["io_gpio_pin"]) && !isset($sensor_data["external_gpio_reader"])) + || (isset($sensor_data["external_gpio_reader"]) && !file_exists($sensor_data["external_gpio_reader"]))) { + continue; //The configuration of this sensor is not correct. Skip this one. + } + + $sensor_current_value = get_sensor_current_value($sensor_data); + if (!isset($sensor_current_value)) { + continue; //The value read from the sensor does not correspond to a numerical value. Skip this one. + } + + //If limit is not configured, we initialize the respective key to prevent "Undefined index" notes. + if (!isset($sensor_data["lowlimit"])) { + $sensor_data["lowlimit"] = null; + } + + if (!isset($sensor_data["lowwarnlimit"])) { + $sensor_data["lowwarnlimit"] = null; + } + + if (!isset($sensor_data["warnlimit"])) { + $sensor_data["warnlimit"] = null; + } + + if (!isset($sensor_data["highlimit"])) { + $sensor_data["highlimit"] = null; + } + + echo $sensor_name."_".$rpi_serial.",".$sensor_data["type"].",".$sensor_data["description"].",".$sensor_data["lowlimit"].",".$sensor_data["lowwarnlimit"].",".$sensor_data["warnlimit"].",".$sensor_data["highlimit"]. ";"; + + if(isset($sensor_data["states"])) { + foreach($sensor_data["states"] as $state_descr => $state) { + echo $state["value"].",".$state["generic"].",".$state_descr.";"; + } + } + + echo "\n" . $sensor_current_value . "\n"; + } + } +} + +$config = parseConfigFile('rpigpiomonitor.ini', true); +$rpi_serial = get_rpi_serial(); + +for ($i=0; $i < $argc; $i++) { + if ($argv[$i] == "-validate") { + validate_config($config, $rpi_serial); + return; + } +} + +read_sensors($config, $rpi_serial); +?> + From ef381d780d04597a53dc787fea447ca4baabb156 Mon Sep 17 00:00:00 2001 From: Tim Pozar Date: Thu, 27 May 2021 12:36:59 -0700 Subject: [PATCH 176/332] Error checking with two line stats (#365) * Error checking with two line stats * Fixed some spacing problems --- snmp/zfs-freebsd | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index e4d27cf80..c1654f47a 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -74,9 +74,12 @@ my @sysctls_pull = `/sbin/sysctl -q @to_pull`; foreach my $stat (@sysctls_pull) { chomp( $stat ); my ( $var, $val ) = split(/:/, $stat, 2); - - $val =~ s/^ //; - $sysctls->{$var}=$val; + # If $val is empty, skip it. Likely a var with a newline before + # the data so it is trying to "split" the data. + if( length $val ) { + $val =~ s/^ //; + $sysctls->{$var}=$val; + } } # does not seem to exist for me, but some of these don't seem to be created till needed From 183dec16552fd0a932e1d6a797cbf2a8e9400e88 Mon Sep 17 00:00:00 2001 From: adamus1red Date: Tue, 27 Jul 2021 00:01:03 +0100 Subject: [PATCH 177/332] Change UPS to pull from arg[1] for UPS name (#371) Will still fallback to APCUPS --- snmp/ups-nut.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 7fa5a0ba3..b5ba04fe4 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -9,7 +9,7 @@ # 4. restart snmpd on the host # # 5. activate the app for the desired host in LibreNMS # ################################################################ -UPS_NAME='APCUPS' +UPS_NAME="${1:-APCUPS}" PATH=$PATH:/usr/bin:/bin TMP=$(upsc $UPS_NAME 2>/dev/null) From 5444de823a28e95b531fc5ddbfe8dfed21ff98bc Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Tue, 27 Jul 2021 10:26:09 +0200 Subject: [PATCH 178/332] Bump Super-linter (#372) * Bump Super-linter * Update linter.yml * Update linter.yml * Update linter.yml --- .github/workflows/linter.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 1e65ec1a1..797b4f20f 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -18,7 +18,7 @@ jobs: fetch-depth: 0 - name: Lint Code Base - uses: github/super-linter@v3.15.3 + uses: github/super-linter@v4 env: FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* VALIDATE_ALL_CODEBASE: false @@ -26,6 +26,7 @@ jobs: VALIDATE_BASH_EXEC: false VALIDATE_PYTHON_FLAKE8: false + VALIDATE_PYTHON_MYPY: false VALIDATE_PHP_PHPCS: false VALIDATE_PHP_PSALM: false From 15d030f96579cc737de2954c58e91a46b68c0407 Mon Sep 17 00:00:00 2001 From: Pim van Pelt Date: Wed, 15 Sep 2021 17:50:57 +0200 Subject: [PATCH 179/332] Tag VPP enabled machines to allow for a custom icon in LibreNMS device view (#374) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 56ae5e940..d29d3e805 100755 --- a/snmp/distro +++ b/snmp/distro @@ -147,4 +147,8 @@ elif [ "${OS}" = "FreeBSD" ] ; then fi fi +if [ -f /etc/vpp/startup.conf ]; then + OSSTR="VPP ${OSSTR}" +fi + echo "${OSSTR}" From c82348bb5359644963bd957732e6fc5746fdf98b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:35:52 -0500 Subject: [PATCH 180/332] fix for ntp-server.sh from #376 --- snmp/ntp-server.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 30c722041..fba25a211 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -69,11 +69,11 @@ if [ $NTPQV = "p1" ]; then CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') fi -VER=$($BIN_NTPD --version) -if [ "$VER" = '4.2.6p5' ]; then - USECMD=$(echo "$BIN_NTPDC" -c iostats) +VER=$($BIN_NTPD --version 2>&1 | head -n 1) +if [[ "$VER" == *"4.2.6p5"* ]]; then + USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else - USECMD=$(echo "$BIN_NTPQ" -c iostats localhost) + USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) fi CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') From b99db0b3848b8bba8df6929980794e2f9558ebd6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:44:07 -0500 Subject: [PATCH 181/332] [[ -> [ tested on FreeBSD and works --- snmp/ntp-server.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index fba25a211..b3b78fbd6 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -70,7 +70,7 @@ if [ $NTPQV = "p1" ]; then fi VER=$($BIN_NTPD --version 2>&1 | head -n 1) -if [[ "$VER" == *"4.2.6p5"* ]]; then +if [ "$VER" == *"4.2.6p5"* ]; then USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) From d0278e8560d82413d913c5e4698c705438732163 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:49:26 -0500 Subject: [PATCH 182/332] now happy on both linux and freebsd for ntpd --version output --- snmp/ntp-server.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index b3b78fbd6..b59f6c714 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -69,8 +69,8 @@ if [ $NTPQV = "p1" ]; then CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') fi -VER=$($BIN_NTPD --version 2>&1 | head -n 1) -if [ "$VER" == *"4.2.6p5"* ]; then +VER=$($BIN_NTPD --version 2>&1 | cut -d\ -f 2 | head -n 1) +if [ "$VER" == "4.2.6p5" ]; then USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) From d8edd140ce91a4b158b4dbcd7348759141c81b73 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:53:40 -0500 Subject: [PATCH 183/332] pet the linter and hope everything is POSIX happy now.... works on freebsd and linux though --- snmp/ntp-server.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index b59f6c714..4fb02e8c5 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -70,7 +70,7 @@ if [ $NTPQV = "p1" ]; then fi VER=$($BIN_NTPD --version 2>&1 | cut -d\ -f 2 | head -n 1) -if [ "$VER" == "4.2.6p5" ]; then +if [ "$VER" = "4.2.6p5" ]; then USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) From f0cb0eb1e3eac47e0c2fbf98ebbbd42380958189 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 24 Oct 2021 21:01:40 +0200 Subject: [PATCH 184/332] update yaml loader (#381) --- snmp/puppet_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index 9cb64f17b..cc9b36343 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -28,7 +28,7 @@ def parse_yaml_file(filename): try: - yaml_data = yaml.load(open(filename, "r")) + yaml_data = yaml.load(open(filename, "r"), Loader=yaml.FullLoader) msg = None except yaml.scanner.ScannerError as e: yaml_data = [] From 7e323c345de1240375bd73a8661e529511e548e2 Mon Sep 17 00:00:00 2001 From: David Simpson <31688862+ds-04@users.noreply.github.com> Date: Sun, 24 Oct 2021 20:02:34 +0100 Subject: [PATCH 185/332] Add almalinux to distro (#378) --- snmp/distro | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snmp/distro b/snmp/distro index d29d3e805..a8e9eb0d5 100755 --- a/snmp/distro +++ b/snmp/distro @@ -42,6 +42,11 @@ elif [ "${OS}" = "Linux" ] ; then #PSEUDONAME=$(cat /etc/redhat-release | sed s/.*\(// | sed s/\)//) REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//) + elif [ -f /etc/almalinux-release ] ; then + DIST='AlmaLinux' + #PSEUDONAME=$(cat /etc/almalinux-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/almalinux-release | sed s/.*release\ // | sed s/\ .*//) + elif [ -f /etc/mandrake-release ] ; then DIST='Mandrake' #PSEUDONAME=$(cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//) From e20c1cf269584070e255b342229304ef385e9513 Mon Sep 17 00:00:00 2001 From: Peca Nesovanovic <59750439+Npeca75@users.noreply.github.com> Date: Mon, 1 Nov 2021 20:27:02 +0100 Subject: [PATCH 186/332] Mikrotik vlans discovery script (#382) --- snmp/Routeros/LMNS_vlans.scr | 58 ++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 snmp/Routeros/LMNS_vlans.scr diff --git a/snmp/Routeros/LMNS_vlans.scr b/snmp/Routeros/LMNS_vlans.scr new file mode 100644 index 000000000..3ac920ed5 --- /dev/null +++ b/snmp/Routeros/LMNS_vlans.scr @@ -0,0 +1,58 @@ +### +### LibreNMS "glue" script for routeros vlans discovery +### https://github.com/librenms/librenms/pull/13427 +### + +:global vlanst [:toarray ""] +:global vlansu [:toarray ""] + +:foreach i in [/interface bridge vlan find] do={ + :local intf [/interface bridge vlan get $i bridge] + :local vlid [/interface bridge vlan get $i vlan-ids] + + :foreach t in [/interface bridge vlan get $i tagged] do={ + :set $vlanst ($vlanst, "$vlid,$t") + } + + :foreach u in [/interface bridge vlan get $i current-untagged] do={ + :set $vlansu ($vlansu, "$vlid,$u") + } + + :foreach u in [/interface bridge port find where bridge=$intf and pvid=$vlid] do={ + :local iu [/interface bridge port get $u interface] + :local fl 0 + :foreach tmp in $vlansu do={ + :local ar [:toarray $tmp] + :if ((($ar->0) = $vlid) && (($ar->1) = $iu)) do={ + :set fl 1 + } + } + :if ( $fl != 1 ) do={ + :set $vlansu ($vlansu, "$vlid,$iu") + } + } +} + +:foreach vl in [/interface vlan find ] do={ + :local intf [/interface vlan get $vl interface] + :local vlid [/interface vlan get $vl vlan-id] + :local fl 0 + + :foreach tmp in $vlanst do={ + :local ar [:toarray $tmp] + :if ((($ar->0) = $vlid) && (($ar->1) = $intf)) do={ + :set fl 1 + } + } + :if ( $fl != 1 ) do={ + :set $vlanst ($vlanst, "$vlid,$intf") + } +} + +:foreach tmp in $vlanst do={ + :put "T,$tmp" +} + +:foreach tmp in $vlansu do={ + :put "U,$tmp" +} From d3a8aaef510a9fcf99a5ae6abdfe9a2ccc06d4e9 Mon Sep 17 00:00:00 2001 From: Deltawings <77517677+Deltawings@users.noreply.github.com> Date: Wed, 10 Nov 2021 22:25:51 +0100 Subject: [PATCH 187/332] Constants definition logic modification Correction avoiding script exit when configuration file doesn't exist because MySQL connection constants can also be set at the beginning of this script. --- agent-local/mysql | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 0b9419fd0..c56e4e7e1 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -27,9 +27,9 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ -# Define MySQL connection constants in config.php. Instead of defining -# parameters here, you can define them in another file named the same as this -# file, with a .cnf extension. +# Define MySQL connection constants. Instead of defining parameters here, +# you can also define them in another file named the same as this +# file with a .cnf extension. # ============================================================================ $mysql_user = ''; @@ -77,9 +77,6 @@ echo("<<>>\n"); if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); debug('Found configuration file ' . __FILE__ . '.cnf'); -} else { - echo("No ".__FILE__ . ".cnf found!\n"); - exit(); } # Make this a happy little script even when there are errors. From 032baf37660761518f8f2514dfc5c11e8e711624 Mon Sep 17 00:00:00 2001 From: Alex R Date: Thu, 16 Dec 2021 18:24:29 +0100 Subject: [PATCH 188/332] - added equivalent wear level for nvme ssd (#383) * - added equivalent wear level for nvme ssd - remove touched cache file to avoid no data if config is guessed - take only 1st raw response to avoid taking strings instead of int (eg. adacom devices like supermicro sata dom moule) * fix identation --- snmp/smart | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/snmp/smart b/snmp/smart index 5793b90db..ef1304b0d 100755 --- a/snmp/smart +++ b/snmp/smart @@ -12,11 +12,11 @@ # and/or other materials provided with the distribution. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF @@ -112,6 +112,7 @@ if ( defined( $opts{g} ) ){ $cache='#Could not touch '.$cache. "You will need to manually set it\n". "cache=?\n"; }else{ + system('rm -f '.$cache.'>/dev/null'); $cache='cache='.$cache."\n"; } @@ -175,7 +176,7 @@ if ( defined( $opts{g} ) ){ } - print "useSN=0\n".'smartctl='.$smartctl."\n". + print "useSN=1\n".'smartctl='.$smartctl."\n". $cache. $drive_lines; @@ -283,7 +284,7 @@ foreach my $line ( @disks ){ '233'=>'null', '9'=>'null', ); - + my @outputA; if($output =~ /NVMe Log/) @@ -293,6 +294,7 @@ foreach my $line ( @disks ){ 'Temperature' => 194, 'Power Cycles' => 12, 'Power On Hours' => 9, + 'Percentage Used' => 231, ); foreach(split(/\n/, $output )) { @@ -302,7 +304,11 @@ foreach my $line ( @disks ){ $val =~ s/^\s+|\s+$|\D+//g; if(exists($mappings{$key})) { - $IDs{$mappings{$key}} = $val; + if ($mappings{$key} == 231) { + $IDs{$mappings{$key}} = 100-$val; + } else { + $IDs{$mappings{$key}} = $val; + } } } } @@ -344,7 +350,8 @@ foreach my $line ( @disks ){ ( $id == 231 ) || ( $id == 233 ) ) { - $IDs{$id}=$raw; + my @rawA=split( /\ /, $raw ); + $IDs{$id}=$rawA[0]; } # 9, power on hours @@ -426,7 +433,7 @@ foreach my $line ( @disks ){ # get the drive serial number, if needed my $disk_id=$name; if ( $useSN ){ - while (`$smartctl -i $disk` =~ /Serial Number:(.*)/g) { + while (`$smartctl -i $disk` =~ /(?i)Serial Number:(.*)/g) { $disk_id = $1; $disk_id =~ s/^\s+|\s+$//g; } From f304b1a2c02fef3b4b3c710bdd1fe0322610acfd Mon Sep 17 00:00:00 2001 From: Dave King Date: Thu, 16 Dec 2021 10:27:16 -0700 Subject: [PATCH 189/332] improve FreeRADIUS stats efficiency (#389) * agent setting needs numeric comparison * reduce cpu and system calls by using sed instead of grep --- snmp/freeradius.sh | 100 +++++++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 44 deletions(-) mode change 100644 => 100755 snmp/freeradius.sh diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh old mode 100644 new mode 100755 index 6a0a29fb6..560c75f06 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -18,54 +18,66 @@ fi # Default radclient access request, shouldn't need to be changed RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' -# Pathes for grep and radclient executables, should work if within PATH -BIN_GREP="$(command -v grep)" +# Paths for sed and radclient executables, should work if within PATH +BIN_SED="$(command -v sed)" BIN_RADCLIENT="$(command -v radclient)" -if [ $AGENT == 1 ]; then +if [ $AGENT -eq 1 ]; then echo "<<>>" fi RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY) -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' +if [[ $RESULT != *"Received Access-Accept"* ]] ; then + # A valid result must contain the match string. Otherwise, verify: + # - the FreeRADIUS 'status' virtual server is enabled and running + # - the server, port or key match the 'status' server settings + echo "invalid result from radclient status request, check server settings" + exit 1 +fi + +# Return only those AV pairs expected by the FreeRADIUS app, one per line +# Drop any leading or trailing whitespace +# They may be returned in any order +echo "$RESULT" | $BIN_SED -n \ + -e 's/\s*\(FreeRADIUS-Total-Access-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Access-Accepts = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Access-Rejects = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Access-Challenges = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Accounting-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Accounting-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Accepts = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Rejects = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Challenges = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Accounting-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Accounting-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Internal = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Proxy = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Auth = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Acct = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Detail = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-PPS-In = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-PPS-Out = [0-9]*\)/\1/p' From 38628e3528961fe3bf6170f5d68a72cebd0b999d Mon Sep 17 00:00:00 2001 From: Plamen Vasilev Date: Thu, 16 Dec 2021 19:27:36 +0200 Subject: [PATCH 190/332] fix occasionally random ordering for fail2ban (#388) If $j->canonical(1); need another changes, please fix that. I get this from: https://github.com/librenms/librenms-agent/pull/240 --- snmp/fail2ban | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 85640021b..42f29ed63 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -157,7 +157,7 @@ sub stats{ } my $j=JSON->new; - + $j->canonical(1); if ( $_[0] ){ $j->pretty(1); return $j->encode( \%toReturn ); From e0f47d499bd5551243ee6339f2c11901b43a3a0b Mon Sep 17 00:00:00 2001 From: Henne Van Och Date: Thu, 13 Jan 2022 18:18:57 +0100 Subject: [PATCH 191/332] Add supervisord script (#392) --- snmp/supervisord.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 snmp/supervisord.py diff --git a/snmp/supervisord.py b/snmp/supervisord.py new file mode 100644 index 000000000..4cc8f70fd --- /dev/null +++ b/snmp/supervisord.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python + +import json +import sys + +from supervisor import xmlrpc + +if sys.version_info.major < 3: + from xmlrpclib import ServerProxy +else: + from xmlrpc.client import ServerProxy + +unix_socket_path = "/var/run/supervisor/supervisor.sock" + +error = 0 +error_string = 0 +processes = [] + +total = { + "STOPPED": 0, + "STARTING": 0, + "RUNNING": 0, + "BACKOFF": 0, + "STOPPING": 0, + "EXITED": 0, + "FATAL": 0, + "UNKNOWN": 0, +} + +try: + server = ServerProxy( + "http://127.0.0.1", + transport=xmlrpc.SupervisorTransport(None, None, "unix://" + unix_socket_path), + ) + + state = server.supervisor.getState()["statename"] + + if state != "RUNNING": + error = 1 + error_string = "Not running" + + for process in server.supervisor.getAllProcessInfo(): + if process["statename"] == "RUNNING": + uptime = process["now"] - process["start"] + else: + uptime = process["stop"] - process["start"] + + uptime = 0 if uptime < 0 else uptime + + processes.append( + { + "name": process["name"], + "group": process["group"], + "statename": process["statename"], + "state": process["state"], + "error": process["spawnerr"] if process["spawnerr"] else None, + "start": process["start"], + "stop": process["stop"], + "now": process["now"], + "uptime": uptime, + } + ) + + total[process["statename"]] += 1 + +except Exception as e: + error = 1 + error_string = repr(e) + +print( + json.dumps( + { + "version": 1, + "error": error, + "errorString": error_string, + "data": { + "total": total, + "processes": processes, + }, + } + ) +) From 92f9acbb9fcb4216aed259f615536accf04a7ad6 Mon Sep 17 00:00:00 2001 From: Thermi Date: Sun, 30 Jan 2022 03:19:52 +0100 Subject: [PATCH 192/332] osupdates: on Arch, try to use checkupdates (#391) --- snmp/osupdate | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/snmp/osupdate b/snmp/osupdate index 11a6d9a9b..4a9b568cc 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -24,6 +24,7 @@ BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' +BIN_CHECKUPDATES='/usr/bin/env checkupdates' BIN_PKG='/usr/sbin/pkg' CMD_PKG=' audit -q -F' BIN_APK='/sbin/apk' @@ -52,7 +53,17 @@ elif command -v dnf &>/dev/null ; then fi elif command -v pacman &>/dev/null ; then # Arch - UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + # calling pacman -Sup does not refresh the package list from the mirrors, + # thus it is not useful to find out if there are updates. Keep the pacman call + # to accomodate users that do not have it. checkupdates is in pacman-contrib. + # also enables snmpd to collect this information if it's not run as root + if command -v checkupdates &>/dev/null ; then + # shellcheck disable=SC2086 + UPDATES=$($BIN_CHECKUPDATES | $BIN_WC $CMD_WC) + else + # shellcheck disable=SC2086 + UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + fi if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else From c1d5aa0835ec3a35210d0b0a4c41efb9f26d8f6b Mon Sep 17 00:00:00 2001 From: Hans Erasmus Date: Fri, 25 Feb 2022 19:22:09 +0200 Subject: [PATCH 193/332] Add rocky linux identification (#397) --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index a8e9eb0d5..da06f943a 100755 --- a/snmp/distro +++ b/snmp/distro @@ -35,6 +35,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Oracle" elif [ -f /etc/rockstor-release ]; then DIST="Rockstor" + elif [ -f /etc/rocky-release ]; then + DIST="Rocky" else DIST="RedHat" fi From f20cd12155d1e9d84feea479a194649e5822e104 Mon Sep 17 00:00:00 2001 From: Barny Ritchley Date: Thu, 10 Mar 2022 22:56:23 +0000 Subject: [PATCH 194/332] Update opensip3-stats.sh (#396) Update to use curl for management information. Reduces load for frequent polling., --- snmp/opensip3-stats.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/snmp/opensip3-stats.sh b/snmp/opensip3-stats.sh index a3302c6bd..c0d93aeee 100644 --- a/snmp/opensip3-stats.sh +++ b/snmp/opensip3-stats.sh @@ -2,9 +2,10 @@ # Author: Sharad Kumar # This script is for OpenSIPS 3.X + version -total_memory=$(opensips-cli -x mi get_statistics total_size | awk '/shmem:total_size/ { gsub(/[",]/,""); print "Total Memory=" $2}') -used_memory=$(opensips-cli -x mi get_statistics real_used_size | awk '/shmem:real_used_size/ { gsub(/[",]/,""); print "Used Memory=" $2}') -free_memory=$(opensips-cli -x mi get_statistics free_size | awk '/shmem:free_size/ { gsub(/[",]/,""); print "Free Memory=" $2}') +statistics=$(curl -s --header "Content-Type: application/json" -X POST -i http://127.0.0.1:8888/json -d '{"jsonrpc":"2.0","id":1,"method":"get_statistics", "params":[["all"]]}') +total_memory=$(echo "$statistics" | grep -Po '"shmem:total_size":(\d+)' |awk -F':' '{print $3}') +used_memory=$(echo "$statistics" | grep -Po '"shmem:used_size":(\d+)' |awk -F':' '{print $3}') +free_memory=$(echo "$statistics" | grep -Po '"shmem:free_size":(\d+)' |awk -F':' '{print $3}') load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Average=" sum}') total_files=$(lsof -c opensips | wc -l) From 1914bcb1eac47f0f48aaf3789e376e0d8d17fd14 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 23 Apr 2022 22:59:20 -0500 Subject: [PATCH 195/332] localhost -> 127.0.0.1 to work around bug where it complains about the directory not existing --- snmp/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mysql b/snmp/mysql index 44e31e289..89e3c9059 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -34,7 +34,7 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) $mysql_user = ''; $mysql_pass = ''; -$mysql_host = 'localhost'; +$mysql_host = '127.0.0.1'; $mysql_port = 3306; $mysql_ssl = FALSE; # Whether to use SSL to connect to MySQL. $mysql_ssl_key = '/etc/pki/tls/certs/mysql/client-key.pem'; From dc9993c665a2db8663d54766b290512ba6191e4f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 23 Apr 2022 23:03:53 -0500 Subject: [PATCH 196/332] fix a off by one for nfsstat on new releases --- snmp/fbsdnfsclient | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/snmp/fbsdnfsclient b/snmp/fbsdnfsclient index 7e3d57722..ab5c12694 100644 --- a/snmp/fbsdnfsclient +++ b/snmp/fbsdnfsclient @@ -95,7 +95,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ $nfsstatOutputA[$int]=~s/^ +//; $nfsstatOutputA[$int]=~s/ +/ /g; - if ( $int == 3 ){ + if ( $int == 2 ){ ( $data{Getattr}, $data{Setattr}, @@ -109,7 +109,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 5 ){ + if ( $int == 4 ){ ( $data{Rename}, $data{Link}, @@ -123,7 +123,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 7 ){ + if ( $int == 6 ){ ( $data{Mknod}, $data{Fsstat}, @@ -134,7 +134,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 10 ){ + if ( $int == 9 ){ ( $data{TimedOut}, $data{Invalid}, @@ -145,7 +145,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 13 ){ + if ( $int == 12 ){ ( $data{AttrHits}, $data{AttrMisses}, @@ -159,7 +159,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 15 ){ + if ( $int == 14 ){ ( $data{BioRLHits}, $data{BioRLMisses}, From ed00d088c5a035af6c03b9a35a807e9b1c336afb Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Fri, 20 May 2022 06:22:32 +0800 Subject: [PATCH 197/332] fix(dpkg): No such file or directory (#400) --- agent-local/dpkg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/agent-local/dpkg b/agent-local/dpkg index 70917ba72..e89e2a02c 100755 --- a/agent-local/dpkg +++ b/agent-local/dpkg @@ -6,6 +6,8 @@ if [ -x /usr/bin/dpkg-query ]; then DATE=$(date +%s) FILE=/var/cache/librenms/agent-local-dpkg + [ -d /var/cache/librenms ] || mkdir -p /var/cache/librenms + if [ ! -e $FILE ]; then dpkg-query -W --showformat='${Status} ${Package} ${Version} ${Architecture} ${Installed-Size}\n'|grep " installed "|cut -d\ -f4- > $FILE fi From 9c2a35ab362acc0dbd6757075a196f5a0d172c4e Mon Sep 17 00:00:00 2001 From: adamus1red Date: Thu, 19 May 2022 23:24:12 +0100 Subject: [PATCH 198/332] Update GPSD extension to use python3 (#404) Fix the print statements so it works with python3. Update the BIN_PYTHON to use `python3` --- snmp/gpsd | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/snmp/gpsd b/snmp/gpsd index 8844bc0a4..4d7f3e40a 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -13,7 +13,7 @@ BIN_GPIPE='/usr/bin/env gpspipe' BIN_GREP='/usr/bin/env grep' -BIN_PYTHON='/usr/bin/env python' +BIN_PYTHON='/usr/bin/env python3' LINES=20 # Check for config file @@ -31,15 +31,15 @@ trap 'rm -f $TMPFILE' 0 2 3 15 $BIN_GPIPE -w -n $LINES > "$TMPFILE" # Parse Temp file for GPSD Data -VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]') -GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]') -HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]') -VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]') -LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]') -LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]') -ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]') -SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])') -SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])') +VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["rev"])') +GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["mode"])') +HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["hdop"])') +VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["vdop"])') +LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["lat"])') +LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["lon"])') +ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["alt"])') +SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print(len(json.load(sys.stdin)["satellites"]))') +SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print(len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]]))') # Output info for SNMP Extend echo '{"data":{"mode":"'"$GPSDMODE"'", "hdop":"'"$HDOP"'", "vdop":"'"$VDOP"'", "latitude":"'"$LAT"'", "longitude":"'"$LONG"'", "altitude":"'"$ALT"'", "satellites":"'"$SATS"'", "satellites_used":"'"$SATSUSED"'"}, "error":"0", "errorString":"", "version":"'"$VERSION"'"}' From a8c0bd7eb4018db1110a8b2a64f3c229cb7d61cf Mon Sep 17 00:00:00 2001 From: Sebastian Heiden Date: Wed, 1 Jun 2022 00:26:08 +0200 Subject: [PATCH 199/332] Provide dhcpd-pools leasefile location (#407) --- snmp/dhcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/dhcp.py b/snmp/dhcp.py index 532665dd8..a43b38760 100755 --- a/snmp/dhcp.py +++ b/snmp/dhcp.py @@ -65,7 +65,7 @@ elif "binding state free" in line: leases["free"] += 1 -shell_cmd = "dhcpd-pools -s i -A" +shell_cmd = "dhcpd-pools -s i -A -l" + configfile["leasefile"] pool_data = ( subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE) .stdout.read() From 4e2399e0a55b04c1d00d9eda2d064c7efc80d92f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 18 Jun 2022 04:52:37 -0500 Subject: [PATCH 200/332] Elastic/Opensearch SNMP extend (#408) * mostly done, just need to document stuff at the top * finish docs at the top * remove a redundant line * correct spelling of evictions * remove a unused line --- snmp/opensearch | 274 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100755 snmp/opensearch diff --git a/snmp/opensearch b/snmp/opensearch new file mode 100755 index 000000000..8515136ec --- /dev/null +++ b/snmp/opensearch @@ -0,0 +1,274 @@ +#!/usr/bin/env perl + +#Copyright (c) 2022, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart snmpd. + + extend opensearch /etc/snmp/extends/opensearch + +Supported command line options are as below. + + -h The host to connect to. + Default: 127.0.0.1 + -p The port to use. + Default: 9200 + -P Pretty print. + +The last is only really relevant to the usage with SNMP. + +=cut + +use warnings; +use strict; +use Getopt::Std; +use JSON; +use LWP::UserAgent (); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "Elastic/Opensearch SNMP extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n" + . "-h The host to connect to.\n" + . " Default: 127.0.0.1\n" + . "-p The port to use.\n" + . " Default: 9200\n" + . "-P Pretty print.\n"; +} + +my $host = '127.0.0.1'; +my $port = 9200; + +#gets the options +my %opts; +getopts( 'h:p:P', \%opts ); +if ( defined( $opts{h} ) ) { + $host = $opts{h}; +} +if ( defined( $opts{p} ) ) { + $port = $opts{p}; +} + +# +my $to_return = { + error => 0, + errorString => '', + version => 1, + date => {}, +}; + +my $stats_url = 'http://' . $host . ':' . $port . '/_stats'; +my $health_url = 'http://' . $host . ':' . $port . '/_cluster/health'; + +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{P} ) { + $json->pretty(); +} + +my $ua = LWP::UserAgent->new( timeout => 10 ); + +my $stats_response = $ua->get($stats_url); +my $stats_json; +if ( $stats_response->is_success ) { + eval { $stats_json = decode_json( $stats_response->decoded_content ); }; + if ($@) { + $to_return->{errorString} = 'Failed to decode the JSON from "' . $stats_url . '"... ' . $@; + $to_return->{error} = 2; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; + } +} +else { + $to_return->{errorString} = 'Failed to get "' . $stats_url . '"... ' . $stats_response->status_line; + $to_return->{error} = 1; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; +} + +my $health_response = $ua->get($health_url); +my $health_json; +if ( $health_response->is_success ) { + eval { $health_json = decode_json( $health_response->decoded_content ); }; + if ($@) { + $to_return->{errorString} = 'Failed to decode the JSON from "' . $health_url . '"... ' . $@; + $to_return->{error} = 2; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; + } +} +else { + $to_return->{errorString} = 'Failed to get "' . $health_url . '"... ' . $health_response->status_line; + $to_return->{error} = 1; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; +} + +# +# process the health json +# +# +$to_return->{data}{cluster_name} = $health_json->{cluster_name}; +$to_return->{data}{c_nodes} = $health_json->{number_of_nodes}; +$to_return->{data}{c_data_nodes} = $health_json->{number_of_data_nodes}; +$to_return->{data}{c_act_pri_shards} = $health_json->{active_primary_shards}; +$to_return->{data}{c_act_shards} = $health_json->{active_shards}; +$to_return->{data}{c_rel_shards} = $health_json->{relocating_shards}; +$to_return->{data}{c_init_shards} = $health_json->{initializing_shards}; +$to_return->{data}{c_delayed_shards} = $health_json->{delayed_unassigned_shards}; +$to_return->{data}{c_pending_tasks} = $health_json->{number_of_pending_tasks}; +$to_return->{data}{c_in_fl_fetch} = $health_json->{number_of_in_flight_fetch}; +$to_return->{data}{c_task_max_in_time} = $health_json->{task_max_waiting_in_queue_millis}; +$to_return->{data}{c_act_shards_perc} = $health_json->{active_shards_percent_as_number}; + +# status color to int, nagious style +# green / ok = 0 +# yellow / warning = 1 +# red / critical = 2 +# unknown = 3 +if ( $health_json->{status} =~ /[Gg][Rr][Ee][Ee][Nn]/ ) { + $to_return->{data}{status} = 0; +} +elsif ( $health_json->{status} =~ /[Yy][Ee][Ll][Ll][Oo][Ww]/ ) { + $to_return->{data}{status} = 1; +} +elsif ( $health_json->{status} =~ /[Rr][Ee][Dd]/ ) { + $to_return->{data}{status} = 2; +} +else { + $to_return->{data}{status} = 3; +} + +# +# process the stats json, sucking stuff in from under _all.total +# +$to_return->{data}{ttl_ops} = $stats_json->{_all}{total}{translog}{operations}; +$to_return->{data}{ttl_size} = $stats_json->{_all}{total}{translog}{size_in_bytes}; +$to_return->{data}{ttl_uncom_ops} = $stats_json->{_all}{total}{translog}{uncommitted_operations}; +$to_return->{data}{ttl_uncom_size} = $stats_json->{_all}{total}{translog}{uncommitted_size_in_bytes}; +$to_return->{data}{ttl_last_mod_age} = $stats_json->{_all}{total}{translog}{earliest_last_modified_age}; + +$to_return->{data}{ti_total} = $stats_json->{_all}{total}{indexing}{index_total}; +$to_return->{data}{ti_time} = $stats_json->{_all}{total}{indexing}{index_time_in_millis}; +$to_return->{data}{ti_failed} = $stats_json->{_all}{total}{indexing}{index_failed}; +$to_return->{data}{ti_del_total} = $stats_json->{_all}{total}{indexing}{delete_total}; +$to_return->{data}{ti_del_time} = $stats_json->{_all}{total}{indexing}{delete_time_in_millis}; +$to_return->{data}{ti_noop_up_total} = $stats_json->{_all}{total}{indexing}{noop_update_total}; +$to_return->{data}{ti_throttled_time} = $stats_json->{_all}{total}{indexing}{throttle_time_in_millis}; + +if ( $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) { + $to_return->{data}{ti_throttled} = 1; +} +else { + $to_return->{data}{ti_throttled} = 0; +} + +$to_return->{data}{ts_q_total} = $stats_json->{_all}{total}{search}{query_total}; +$to_return->{data}{ts_q_time} = $stats_json->{_all}{total}{search}{query_time_in_millis}; +$to_return->{data}{ts_f_total} = $stats_json->{_all}{total}{search}{fetch_total}; +$to_return->{data}{ts_f_time} = $stats_json->{_all}{total}{search}{fetch_time_in_millis}; +$to_return->{data}{ts_sc_total} = $stats_json->{_all}{total}{search}{scroll_total}; +$to_return->{data}{ts_sc_time} = $stats_json->{_all}{total}{search}{scroll_time_in_millis}; +$to_return->{data}{ts_su_total} = $stats_json->{_all}{total}{search}{suggest_total}; +$to_return->{data}{ts_su_time} = $stats_json->{_all}{total}{search}{suggest_time_in_millis}; + +$to_return->{data}{tr_total} = $stats_json->{_all}{total}{refresh}{total}; +$to_return->{data}{tr_time} = $stats_json->{_all}{total}{refresh}{total_time_in_millis}; +$to_return->{data}{tr_ext_total} = $stats_json->{_all}{total}{refresh}{external_total}; +$to_return->{data}{tr_ext_time} = $stats_json->{_all}{total}{refresh}{external_total_time_in_millis}; + +$to_return->{data}{tf_total} = $stats_json->{_all}{total}{flush}{total}; +$to_return->{data}{tf_periodic} = $stats_json->{_all}{total}{flush}{periodic}; +$to_return->{data}{tf_time} = $stats_json->{_all}{total}{flush}{total_time_in_millis}; + +$to_return->{data}{tqc_size} = $stats_json->{_all}{total}{query_cache}{memory_size_in_bytes}; +$to_return->{data}{tqc_total} = $stats_json->{_all}{total}{query_cache}{total_count}; +$to_return->{data}{tqc_hit} = $stats_json->{_all}{total}{query_cache}{hit_count}; +$to_return->{data}{tqc_miss} = $stats_json->{_all}{total}{query_cache}{miss_count}; +$to_return->{data}{tqc_miss} = $stats_json->{_all}{total}{query_cache}{miss_count}; +$to_return->{data}{tqc_cache_size} = $stats_json->{_all}{total}{query_cache}{cache_size}; +$to_return->{data}{tqc_cache_count} = $stats_json->{_all}{total}{query_cache}{cache_count}; +$to_return->{data}{tqc_evictions} = $stats_json->{_all}{total}{query_cache}{evictions}; + +$to_return->{data}{tg_total} = $stats_json->{_all}{total}{get}{total}; +$to_return->{data}{tg_time} = $stats_json->{_all}{total}{get}{time_in_millis}; +$to_return->{data}{tg_exists_total} = $stats_json->{_all}{total}{get}{exists_total}; +$to_return->{data}{tg_exists_time} = $stats_json->{_all}{total}{get}{exists_time_in_millis}; +$to_return->{data}{tg_missing_total} = $stats_json->{_all}{total}{get}{missing_total}; +$to_return->{data}{tg_missing_time} = $stats_json->{_all}{total}{get}{missing_time_in_millis}; + +$to_return->{data}{tm_total} = $stats_json->{_all}{total}{merges}{total}; +$to_return->{data}{tm_time} = $stats_json->{_all}{total}{merges}{total_time_in_millis}; +$to_return->{data}{tm_docs} = $stats_json->{_all}{total}{merges}{total_docs}; +$to_return->{data}{tm_size} = $stats_json->{_all}{total}{merges}{total_size_in_bytes}; +$to_return->{data}{tm_throttled_time} = $stats_json->{_all}{total}{merges}{total_throttled_time_in_millis}; +$to_return->{data}{tm_throttled_size} = $stats_json->{_all}{total}{merges}{total_auto_throttle_in_bytes}; + +$to_return->{data}{tw_total} = $stats_json->{_all}{total}{warmer}{total}; +$to_return->{data}{tw_time} = $stats_json->{_all}{total}{warmer}{total_time_in_millis}; + +$to_return->{data}{tfd_size} = $stats_json->{_all}{total}{fielddata}{memory_size_in_bytes}; +$to_return->{data}{tfd_evictions} = $stats_json->{_all}{total}{fielddata}{evictions}; + +$to_return->{data}{tseg_count} = $stats_json->{_all}{total}{segments}{count}; +$to_return->{data}{tseg_size} = $stats_json->{_all}{total}{segments}{memory_in_bytes}; +$to_return->{data}{tseg_terms_size} = $stats_json->{_all}{total}{segments}{terms_memory_in_bytes}; +$to_return->{data}{tseg_fields_size} = $stats_json->{_all}{total}{segments}{stored_fields_memory_in_bytes}; +$to_return->{data}{tseg_tvector_size} = $stats_json->{_all}{total}{segments}{term_vectors_memory_in_bytes}; +$to_return->{data}{tseg_norms_size} = $stats_json->{_all}{total}{segments}{norms_memory_in_bytes}; +$to_return->{data}{tseg_points_size} = $stats_json->{_all}{total}{segments}{points_memory_in_bytes}; +$to_return->{data}{tseg_docval_size} = $stats_json->{_all}{total}{segments}{doc_values_memory_in_bytes}; +$to_return->{data}{tseg_indwrt_size} = $stats_json->{_all}{total}{segments}{index_writer_memory_in_bytes}; +$to_return->{data}{tseg_vermap_size} = $stats_json->{_all}{total}{segments}{version_map_memory_in_bytes}; +$to_return->{data}{tseg_fbs_size} = $stats_json->{_all}{total}{segments}{fixed_bit_set_memory_in_bytes}; + +$to_return->{data}{trc_size} = $stats_json->{_all}{total}{request_cache}{memory_size_in_bytes}; +$to_return->{data}{trc_evictions} = $stats_json->{_all}{total}{request_cache}{evictions}; +$to_return->{data}{trc_hits} = $stats_json->{_all}{total}{request_cache}{hit_count}; +$to_return->{data}{trc_misses} = $stats_json->{_all}{total}{request_cache}{miss_count}; + +$to_return->{data}{tst_size} = $stats_json->{_all}{total}{store}{size_in_bytes}; +$to_return->{data}{tst_res_size} = $stats_json->{_all}{total}{store}{reserved_in_bytes}; + +print $json->encode($to_return); +if ( !$opts{P} ) { + print "\n"; +} +exit 0; From 7cbf74e9ebfa2401c10294645d82fc13b88d2fc3 Mon Sep 17 00:00:00 2001 From: Trae Santiago <249409+Trae32566@users.noreply.github.com> Date: Sat, 18 Jun 2022 04:58:33 -0500 Subject: [PATCH 201/332] Replaced mdadm script with a newer, more flexible version (#401) * Replaced mdadm script with a newer, more flexible version * added basic error handling, and conditionals for missing array * added fallback json squashing code if jq is missing * fixed comments and shellcheck * spacing --- snmp/mdadm | 202 ++++++++++++++++++++++------------------------------- 1 file changed, 84 insertions(+), 118 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index 5e820c808..57628f698 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -1,120 +1,86 @@ -#!/bin/bash - -CAT=/bin/cat -LS=/bin/ls -BASENAME=/usr/bin/basename -REALPATH=/usr/bin/realpath - -CONFIGFILE=/etc/snmp/mdadm.conf -if [ -f $CONFIGFILE ] ; then - # shellcheck disable=SC1090 - . $CONFIGFILE -fi - -VERSION=1 -ERROR_CODE=0 -ERROR_STRING="" - -OUTPUT_DATA='[' - -# use 'ls' command to check if md blocks exist -if $LS /dev/md?* 1> /dev/null 2>&1 ; then - for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do - RAID="/sys/block/"$($BASENAME "$($REALPATH "$ARRAY_BLOCKDEVICE")") - - # ignore arrays with no slaves - if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then - continue +#!/usr/bin/env bash +# MDADM SNMP extension for LibreNMS +# Version +extendVer='2.0.0' +# Initial portion of json +mdadmSNMPOutput='{ "data": [' + +# Outputs a list of devices +list_devices() { + for device in "${1}/slaves/"*; do + if [ "${2,,}" == 'count' ]; then + ((devCount++)) + elif [ "${2,,}" != 'missing' ] || [ ! -e "${device}" ]; then + printf '%b\t "%s"' "${multiDisk}" "$(basename "${device}")" + multiDisk=',\n' fi - # ignore "non existing" arrays - if [ ! -f "$RAID/md/degraded" ] ; then - continue - fi - - if [[ $($BASENAME "$ARRAY_BLOCKDEVICE") = [[:digit:]] ]] ; then - RAID_NAME=$($BASENAME "$RAID") - else - RAID_NAME=$($BASENAME "$ARRAY_BLOCKDEVICE") - fi - RAID_DEV_LIST=$($LS "$RAID"/slaves/) - RAID_LEVEL=$($CAT "$RAID"/md/level) - RAID_DISC_COUNT=$($CAT "$RAID"/md/raid_disks| cut -d' ' -f1) - RAID_STATE=$($CAT "$RAID"/md/array_state) - RAID_ACTION=$($CAT "$RAID"/md/sync_action) - RAID_DEGRADED=$($CAT "$RAID"/md/degraded) - - if [ "$RAID_SYNC_SPEED" = "none" ] ; then - RAID_SYNC_SPEED=0 - else - let "RAID_SYNC_SPEED=$($CAT "$RAID"/md/sync_speed)*1024" - fi - - if [ "$($CAT "$RAID"/md/sync_completed)" != "none" ] ; then - let "RAID_SYNC_COMPLETED=100*$($CAT "$RAID"/md/sync_completed)" - elif [ "$RAID_DEGRADED" -eq 1 ] ; then - RAID_SYNC_COMPLETED=0 - else - RAID_SYNC_COMPLETED=100 - fi - - # divide with 2 to size like in /proc/mdstat - # and multiply with 1024 to get size in bytes - let "RAID_SIZE=$($CAT "$RAID"/size)*1024/2" - - RAID_DEVICE_LIST='[' - ALL_DEVICE_COUNT=0 - for D in $RAID_DEV_LIST ; do - RAID_DEVICE_LIST=$RAID_DEVICE_LIST'"'$D'",' - let "ALL_DEVICE_COUNT+=1" - done - if [ ${#RAID_DEVICE_LIST} -gt 3 ] ; then - RAID_DEVICE_LIST=${RAID_DEVICE_LIST: : -1} - fi - RAID_DEVICE_LIST=$RAID_DEVICE_LIST']' - - RAID_MISSING_DEVICES='[' - for D in $RAID_DEV_LIST ; do - if [ -L "$RAID"/slaves/"$D" ] && [ -f "$RAID"/slaves/"$D" ] ; then - RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",' - fi - done - if [ ${#RAID_MISSING_DEVICES} -gt 3 ] ; then - RAID_MISSING_DEVICES=${RAID_MISSING_DEVICES: : -1} - fi - RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' - - let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" - if [ "$RAID_HOTSPARE_COUNT" -lt 0 ] ; then - RAID_HOTSPARE_COUNT=0 - fi - - ARRAY_DATA='{'\ -'"name":"'$RAID_NAME\ -'","level":"'$RAID_LEVEL\ -'","size":"'$RAID_SIZE\ -'","disc_count":"'$RAID_DISC_COUNT\ -'","hotspare_count":"'$RAID_HOTSPARE_COUNT\ -'","device_list":'$RAID_DEVICE_LIST\ -',"missing_device_list":'$RAID_MISSING_DEVICES\ -',"state":"'$RAID_STATE\ -'","action":"'$RAID_ACTION\ -'","degraded":"'$RAID_DEGRADED\ -'","sync_speed":"'$RAID_SYNC_SPEED\ -'","sync_completed":"'$RAID_SYNC_COMPLETED\ -'"},' - - OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA done - - OUTPUT_DATA=${OUTPUT_DATA: : -1}']' -else - OUTPUT_DATA=${OUTPUT_DATA}']' -fi - -OUTPUT='{"data":'$OUTPUT_DATA\ -',"error":"'$ERROR_CODE\ -'","errorString":"'$ERROR_STRING\ -'","version":"'$VERSION'"}' - -echo "$OUTPUT" - + [ "${devCount}" ] && echo "${devCount}" +} + +# Outputs either 0, 100, or the value of the file referenced +maybe_get() { + if [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then + cat "${1}" + else + echo 0 + fi +} + +main() { + if ! which 'jq' > /dev/null 2>&1; then + errorCode=1 + # The underscore here is a hack since we have to strip spaces without jq + errorString='jq_missing!' + elif stat "/dev/md"[[:digit:]]* > /dev/null 2>&1; then + for mdadmArray in "/dev/md"[[:digit:]]*; do + # Ignore partitions + [[ "${mdadmArray}" =~ '/dev/md'[[:digit:]]+'p' ]] && continue + + mdadmName="$(basename "$(realpath "${mdadmArray}")")" + mdadmSysDev="/sys/block/${mdadmName}" + + read -r -d '' mdadmOutput < /dev/null || sed 's/\s//g' <<< "${mdadmSNMPOutput//$'\n'/}${metadataOutput//$'\n'/}" +} + +main "${@}" From bafb379e14dd5a80894367461e15ebd53c837605 Mon Sep 17 00:00:00 2001 From: Thermi Date: Sat, 18 Jun 2022 11:59:19 +0200 Subject: [PATCH 202/332] osupdates: unpriv implementation alternative (#395) --- snmp/unpriv/osupdates/Readme.md | 9 ++ .../librenms-osupdates-generate.service | 8 ++ .../librenms-osupdates-generate.timer | 11 ++ .../osupdates/osupdates-unpriv-gather.sh | 11 ++ .../osupdates/osupdates-unpriv-generate.sh | 115 ++++++++++++++++++ 5 files changed, 154 insertions(+) create mode 100644 snmp/unpriv/osupdates/Readme.md create mode 100644 snmp/unpriv/osupdates/librenms-osupdates-generate.service create mode 100644 snmp/unpriv/osupdates/librenms-osupdates-generate.timer create mode 100644 snmp/unpriv/osupdates/osupdates-unpriv-gather.sh create mode 100644 snmp/unpriv/osupdates/osupdates-unpriv-generate.sh diff --git a/snmp/unpriv/osupdates/Readme.md b/snmp/unpriv/osupdates/Readme.md new file mode 100644 index 000000000..d778a5d04 --- /dev/null +++ b/snmp/unpriv/osupdates/Readme.md @@ -0,0 +1,9 @@ +# osupdates + +## Installation + +1. Copy shell scripts into /usr/local/bin/ +2. Make them executable +3. Copy timer and service unit into /etc/systemd/system/ +4. Activate timer (`systemctl enable --now librenms-osupdates-generate.timer`) +5. Set `extend osupdate /usr/local/bin/osupdates-unpriv-gather.sh` in `/etc/snmp/snmpd.conf` diff --git a/snmp/unpriv/osupdates/librenms-osupdates-generate.service b/snmp/unpriv/osupdates/librenms-osupdates-generate.service new file mode 100644 index 000000000..238e2e586 --- /dev/null +++ b/snmp/unpriv/osupdates/librenms-osupdates-generate.service @@ -0,0 +1,8 @@ +# librenms-osupdates-generate.service + +[Unit] +Description=generate osupdates information + +[Service] +ExecStart=/usr/local/bin/osupdates-unpriv-generate.sh + diff --git a/snmp/unpriv/osupdates/librenms-osupdates-generate.timer b/snmp/unpriv/osupdates/librenms-osupdates-generate.timer new file mode 100644 index 000000000..e40fb7e37 --- /dev/null +++ b/snmp/unpriv/osupdates/librenms-osupdates-generate.timer @@ -0,0 +1,11 @@ +# librenms-osupdates-generate.timer + +[Unit] +Description=generates osupdates information minutely + +[Timer] +OnCalendar=hourly +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/snmp/unpriv/osupdates/osupdates-unpriv-gather.sh b/snmp/unpriv/osupdates/osupdates-unpriv-gather.sh new file mode 100644 index 000000000..a337c5981 --- /dev/null +++ b/snmp/unpriv/osupdates/osupdates-unpriv-gather.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +SNMP_PERSISTENT_DIR="$(net-snmp-config --persistent-directory)" +UNPRIV_SHARED_FILE="$SNMP_PERSISTENT_DIR/osupdates/stats.txt" + +if [ -f "$UNPRIV_SHARED_FILE" ]; then + cat "$UNPRIV_SHARED_FILE" +else + echo "0" + logger -p daemon.error -t "osupdates-unpriv" Reading osupdate data from file "$UNPRIV_SHARED_FILE" failed! +fi diff --git a/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh b/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh new file mode 100644 index 000000000..08a6bca44 --- /dev/null +++ b/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +################################################################ +# copy this script to /etc/snmp/ and make it executable: # +# chmod +x /etc/snmp/osupdate # +# ------------------------------------------------------------ # +# edit your snmpd.conf and include: # +# extend osupdate /etc/snmp/osupdate # +#--------------------------------------------------------------# +# restart snmpd and activate the app for desired host # +#--------------------------------------------------------------# +# please make sure you have the path/binaries below # +################################################################ +BIN_WC='/usr/bin/env wc' +BIN_GREP='/usr/bin/env grep' +CMD_GREP='-c' +CMD_WC='-l' +BIN_ZYPPER='/usr/bin/env zypper' +CMD_ZYPPER='-q lu' +BIN_YUM='/usr/bin/env yum' +CMD_YUM='-q check-update' +BIN_DNF='/usr/bin/env dnf' +CMD_DNF='-q check-update' +BIN_APT='/usr/bin/env apt-get' +CMD_APT='-qq -s upgrade' +BIN_PACMAN='/usr/bin/env pacman' +CMD_PACMAN='-Sup' +BIN_CHECKUPDATES='/usr/bin/env checkupdates' +BIN_PKG='/usr/sbin/pkg' +CMD_PKG=' audit -q -F' +BIN_APK='/sbin/apk' +CMD_APK=' version' +SNMP_PERSISTENT_DIR="$(net-snmp-config --persistent-directory)" +UNPRIV_SHARED_FILE="$SNMP_PERSISTENT_DIR/osupdates/stats.txt" + +mkdir -p "$(dirname "$UNPRIV_SHARED_FILE" )" +exec > "$UNPRIV_SHARED_FILE" + +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +if command -v zypper &>/dev/null ; then + # OpenSUSE + # shellcheck disable=SC2086 + UPDATES=$($BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then + echo $(($UPDATES-2)); + else + echo "0"; + fi +elif command -v dnf &>/dev/null ; then + # Fedora + # shellcheck disable=SC2086 + UPDATES=$($BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif command -v pacman &>/dev/null ; then + # Arch + # calling pacman -Sup does not refresh the package list from the mirrors, + # thus it is not useful to find out if there are updates. Keep the pacman call + # to accomodate users that do not have it. checkupdates is in pacman-contrib. + # also enables snmpd to collect this information if it's not run as root + if command -v checkupdates &>/dev/null ; then + # shellcheck disable=SC2086 + UPDATES=$($BIN_CHECKUPDATES | $BIN_WC $CMD_WC) + else + # shellcheck disable=SC2086 + UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + fi + if [ "$UPDATES" -ge 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif command -v yum &>/dev/null ; then + # CentOS / Redhat + # shellcheck disable=SC2086 + UPDATES=$($BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif command -v apt-get &>/dev/null ; then + # Debian / Devuan / Ubuntu + # shellcheck disable=SC2086 + UPDATES=$($BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst') + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; + else + echo "0"; + fi +elif command -v pkg &>/dev/null ; then + # FreeBSD + # shellcheck disable=SC2086 + UPDATES=$($BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; + else + echo "0"; + fi +elif command -v apk &>/dev/null ; then + # Alpine + # shellcheck disable=SC2086 + UPDATES=$($BIN_APK $CMD_APK | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +else + echo "0"; +fi From 027e8480693ddfb98e5093163c68bbc84371c654 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 19 Jun 2022 13:38:41 +0200 Subject: [PATCH 203/332] mdadm sync_complete fix (#409) --- snmp/mdadm | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index 57628f698..28f351382 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -40,7 +40,16 @@ main() { mdadmName="$(basename "$(realpath "${mdadmArray}")")" mdadmSysDev="/sys/block/${mdadmName}" + degraded=$(maybe_get "${mdadmSysDev}/md/degraded") + syncSpeed=$(($(maybe_get "${mdadmSysDev}/md/sync_speed") * 1024)) + + syncCompleted=$(maybe_get "${mdadmSysDev}/md/sync_completed") + if [ $syncCompleted -eq 0 ] && [ $degraded -eq 0 ] && [ $syncSpeed -eq 0 ]; then + syncCompleted="100" + fi + read -r -d '' mdadmOutput < Date: Sun, 10 Jul 2022 11:51:11 +0200 Subject: [PATCH 204/332] Add support for PhotonOS's tdnf. (#411) Example output: root [ /home/ives ]# tdnf -q check-update Linux-PAM.x86_64 1.4.0-5.ph4 photon-updates cloud-init.noarch 22.2.2-1.ph4 photon-updates curl.x86_64 7.83.1-2.ph4 photon-updates curl-libs.x86_64 7.83.1-2.ph4 photon-updates openssl.x86_64 3.0.3-3.ph4 photon-updates So a simple line count should suffice. Output of the script: root [ /home/ives ]# /bin/docker-osupdate 5 --- snmp/osupdate | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index 4a9b568cc..87e16873f 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -20,6 +20,8 @@ BIN_YUM='/usr/bin/env yum' CMD_YUM='-q check-update' BIN_DNF='/usr/bin/env dnf' CMD_DNF='-q check-update' +BIN_TDNF='/usr/bin/env tdnf' +CMD_TDNF='-q check-update' BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/env pacman' @@ -51,6 +53,15 @@ elif command -v dnf &>/dev/null ; then else echo "0"; fi +elif command -v tdnf &>/dev/null ; then + # PhotonOS + # shellcheck disable=SC2086 + UPDATES=$($BIN_TDNF $CMD_TDNF | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; + else + echo "0"; + fi elif command -v pacman &>/dev/null ; then # Arch # calling pacman -Sup does not refresh the package list from the mirrors, From 77c45f82043c3c58ab89923ea7fe37bfcd3bb64c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 14 Jul 2022 14:59:03 -0500 Subject: [PATCH 205/332] add extend for polling/monitoring CAPEv2 (#412) --- snmp/cape | 448 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 448 insertions(+) create mode 100755 snmp/cape diff --git a/snmp/cape b/snmp/cape new file mode 100755 index 000000000..d3748ad64 --- /dev/null +++ b/snmp/cape @@ -0,0 +1,448 @@ +#!/usr/bin/env perl + +#Copyright (c) 2022, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart snmpd. + + extend cape /etc/snmp/extends/cape + +Supported command line options are as below. + + -c Config INI file. + Default: /usr/local/etc/cape_extend.ini + +The defeault setttings are... + + # DBI connection DSN + dsn=dbi:Pg:dbname=cape + + # DB user + user=cape + + # DB PW + pass= + + # CAPEv2 cuckoo log file + clog=/opt/CAPEv2/log/cuckoo.log + + # CAPEv2 process log file + plog=/opt/CAPEv2/log/process.log + + # 0/1 for if it is okay for the process log to not exist + # this enables it to work with cuckoo as well as CAPEv2 + mplogok=1 + + # list of ignores + ignores=/usr/local/etc/cape_extend.ignores + + # send errors along for inclusion in the event log + sendErrors=1 + + # send criticals along for inclusion in the event log + sendCriticals=1 + + # send warnings along for inclusion in the event log + sendWarnings= 1 + + # don't use analysis_started_on. analysis_finished_on. processing_started_on, + # processing_finished_on, signatures_started_on, signatures_finished_on, + # reporting_started_on, or reporting_finished_on with the SQL statement + # + # This is specifically for supporting ancient cuckoo instances. + cuckoosql=0 + +The ignores file will only be used if it exists. The format is as below. + + + +This the ignore level will be lower cased. The seperator bween the level and +the regexp pattern is /[\ \t]+/. So if you want to ignore the two warnings +generated when VM traffic is dropped, you would use the two lines such as below. + + WARNING PCAP file does not exist at path + WARNING Unable to Run Suricata: Pcap file + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::ReadBackwards; +use JSON; +use Config::Tiny; +use DBI; +use Time::Piece; +use File::Slurp; + +sub version { + print "cape v. 0.0.1\n"; +} + +sub help { + &version; + + print ' + +-c Config INI file. + Default: /usr/local/etc/cape_extend.ini +'; +} + +# get the commandline options +my $help = 0; +my $version = 0; +my $ini_file = '/usr/local/etc/cape_extend.ini'; +Getopt::Long::Configure('no_ignore_case'); +Getopt::Long::Configure('bundling'); +GetOptions( + 'version' => \$version, + 'v' => \$version, + 'help' => \$help, + 'h' => \$help, + 'i=s' => \$ini_file, +); + +# print version or help if requested +if ($help) { + &help; + exit 42; +} +if ($version) { + &version; + exit 42; +} + +# time +my $current_time = time; +my $target_time = $current_time - 300; + +my $return_json = { + data => { + error => 0, + errors => [], + info => 0, + debug => 0, + warning => 0, + warnigns => [], + critical => 0, + criticals => [], + banned => 0, + pending => 0, + running => 0, + completed => 0, + distributed => 0, + reported => 0, + recovered => 0, + failed_analysis => 0, + failed_processing => 0, + failed_reporting => 0, + packages => {}, + dropped_files => 0, + running_processes => 0, + api_calls => 0, + domains => 0, + signatures_total => 0, + signatures_alert => 0, + files_written => 0, + registry_keys_modified => 0, + crash_issues => 0, + anti_issues => 0, + timedout => 0, + pkg_stats => {}, + total_tasks => 0, + }, + error => 0, + errorString => '', + version => 1, +}; + +# used for checking if the level value is somethingw understand +my $level_check = { info => 1, debug => 1, error => 1, warning => 1, critical => 1 }; + +# read the config and put together the defaults +my $defaults = { + dsn => 'dbi:Pg:dbname=cape', + user => 'cape', + pass => '', + clog => '/opt/CAPEv2/log/cuckoo.log', + plog => '/opt/CAPEv2/log/process.log', + mplogok => 1, + ignores => '/usr/local/etc/cape_extend.ignores', + sendErrors => 1, + sendCriticals => 1, + sendWarnings => 1, + cuckoosql => 0, +}; +my $config = Config::Tiny->read( $ini_file, 'utf8' ); +if ( !defined($config) ) { + $config = $defaults; +} +else { + $config = $config->{_}; + + # reel in the defaults + foreach my $default_key ( keys( %{$defaults} ) ) { + if ( !defined( $config->{$default_key} ) ) { + $config->{$default_key} = $defaults->{$default_key}; + } + } +} + +# read in the ignore file +my $ignores = { info => [], debug => [], error => [], warning => [], critical => [] }; +if ( -f $config->{ignores} ) { + my $ignore_raw = read_file( $config->{ignores} ); + my @ignore_split = grep( !/^[\ \t]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $ignore_raw ) ) ); + foreach my $to_ignore (@ignore_split) { + my ( $ignore_level, $pattern ) = split( /[\ \t]+/, $to_ignore, 2 ); + if ( defined($ignore_level) and defined($pattern) ) { + $ignore_level = lc($ignore_level); + push( @{ $ignores->{$ignore_level} }, $pattern ); + } + } +} + +# put together the list of logs to read +my @logs; +if ( !-f $config->{clog} ) { + $return_json->{error} = '"' . $defaults->{clog} . '" does not exist'; +} +else { + push( @logs, $config->{clog} ); +} +if ( !-f $config->{plog} && !$config->{mplogok} ) { + $return_json->{error} = '"' . $defaults->{clog} . '" does not exist'; +} +else { + push( @logs, $config->{plog} ); +} + +my $process_loop = 0; +my $process_logs = 1; +while ( $process_logs && defined( $logs[$process_loop] ) ) { + my $log = $logs[$process_loop]; + + my $bw; + eval { $bw = File::ReadBackwards->new($log); }; + + my $continue = 1; + my $current_entry = ''; + while ( defined($bw) && defined( my $log_line = $bw->readline ) && $continue ) { + $current_entry = $log_line . $current_entry; + if ( $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) + { + # parse it and blank it for when we get to the next one. + my ( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 ); + $current_entry = ''; + + # chomp off the seconds place after the , + $time =~ s/\,.*//; + my $t = Time::Piece->strptime( $date . 'T' . $time, '%Y-%m-%dT%H:%M:%S' ); + + if ( $t->epoch <= $target_time ) { + $continue = 0; + } + else { + $level = lc($level); + $level =~ s/\://; + if ( defined( $level_check->{$level} ) ) { + my $add_it = 1; + my $ignore_int = 0; + foreach ( @{ $ignores->{$level} } ) { + my $test = $_; + if ( $entry =~ /$test/ ) { + $add_it = 0; + } + $ignore_int++; + } + if ($add_it) { + $return_json->{data}->{$level}++; + if ( $level eq 'error' and $config->{sendErrors} ) { + push( @{ $return_json->{data}->{errors} }, $entry ); + } + elsif ( $level eq 'warning' and $config->{sendWarnings} ) { + push( @{ $return_json->{data}->{warnings} }, $entry ); + } + elsif ( $level eq 'criticals' and $config->{sendCriticals} ) { + push( @{ $return_json->{data}->{criticals} }, $entry ); + } + } + } + } + } + } + + $process_loop++; +} + +my $query; +if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { + $query + = "select status,package from tasks where ( added_on > FROM_UNIXTIME('" + . $target_time + . "')) or " + . "( started_on > FROM_UNIXTIME('" + . $target_time + . "')) or " + . "( completed_on > FROM_UNIXTIME('" + . $target_time . "')); "; +} +else { + $query + = "select status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' )"; + if ( !$config->{cuckoosql} ) { + $query + = $query + . " or ( analysis_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "(analysis_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( processing_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( processing_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( signatures_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( signatures_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( reporting_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( reporting_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' );"; + } + else { + $query = $query . ';'; + } +} + +eval { + my $dbh = DBI->connect( $config->{dsn}, $config->{user}, $config->{pass} ) || die($DBI::errstr); + my $sth = $dbh->prepare($query); + $sth->execute; + my $task_status; + my $task_package; + my $dropped_files; + my $running_processes; + my $api_calls; + my $domains; + my $signatures_total; + my $signatures_alert; + my $files_written; + my $registry_keys_modified; + my $crash_issues; + my $anti_issues; + my $timedout; + # + # MySQL is basically for old Cuckoo support. + # CAPEv2 does not really play nice with it because of column issues + # + if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { + $sth->bind_columns( undef, \$task_status, \$task_package ); + while ( $sth->fetch ) { + if ( defined( $return_json->{data}->{$task_status} ) ) { + $return_json->{data}->{$task_status}++; + $return_json->{data}->{total_tasks}++; + } + } + } + else { + $sth->bind_columns( + undef, \$task_status, \$task_package, \$dropped_files, + \$running_processes, \$api_calls, \$domains, \$signatures_total, + \$signatures_alert, \$files_written, \$registry_keys_modified, \$crash_issues, + \$anti_issues, \$timedout + ); + while ( $sth->fetch ) { + if ( defined( $return_json->{data}->{$task_status} ) ) { + $return_json->{data}->{$task_status}++; + $return_json->{data}->{total_tasks}++; + } + + # skip blank entries + if ( $task_package ne '' ) { + if ( defined( $return_json->{data}->{packages}->{$task_package} ) ) { + $return_json->{data}->{packages}->{$task_package}++; + } + else { + $return_json->{data}->{packages}->{$task_package} = 1; + } + } + + # if dropped files is defined and not blank, the rest will + # if this is blank then runstatistics is conf/reporting.conf + if ( defined($dropped_files) ) { + $return_json->{data}->{dropped_files} += $dropped_files; + $return_json->{data}->{running_processes} += $running_processes; + $return_json->{data}->{api_calls} += $api_calls; + $return_json->{data}->{domains} += $domains; + $return_json->{data}->{signatures_total} += $signatures_total; + $return_json->{data}->{signatures_alert} += $signatures_alert; + $return_json->{data}->{files_written} += $files_written; + $return_json->{data}->{registry_keys_modified} += $registry_keys_modified; + $return_json->{data}->{crash_issues} += $crash_issues; + $return_json->{data}->{anti_issues} += $anti_issues; + + # put per package stats together + if ( $task_package ne '' ) { + if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) { + $return_json->{data}->{pkg_stats}->{$task_package} = { + dropped_files => $dropped_files, + running_processes => $running_processes, + api_calls => $api_calls, + domains => $domains, + signatures_total => $signatures_total, + signatures_alert => $signatures_alert, + files_written => $files_written, + registry_keys_modified => $registry_keys_modified, + crash_issues => $crash_issues, + anti_issues => $anti_issues + }; + } + else { + $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; + $return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes; + $return_json->{data}->{pkg_stats}->{$task_package}->{api_calls} += $api_calls; + $return_json->{data}->{pkg_stats}->{$task_package}->{domains} += $domains; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total} += $signatures_total; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert} += $signatures_alert; + $return_json->{data}->{pkg_stats}->{$task_package}->{files_written} += $files_written; + $return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified} + += $registry_keys_modified; + $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; + $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; + } + } + } + + # timedout value is not a perl boolean + if ( $timedout =~ /^[Ff]/ ) { + $return_json->{data}->{timedout}++; + } + } + } +}; +if ($@) { + $return_json->{error} = 2; + $return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@; +} + +print encode_json($return_json) . "\n"; From 30ed00f771c167a03ec4b801f2e5c426da3bf37e Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 15 Jul 2022 12:10:20 -0500 Subject: [PATCH 206/332] typo correction for CAPE monitor... warnigns -> warnings #413 --- snmp/cape | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/cape b/snmp/cape index d3748ad64..2f859e552 100755 --- a/snmp/cape +++ b/snmp/cape @@ -146,7 +146,7 @@ my $return_json = { info => 0, debug => 0, warning => 0, - warnigns => [], + warnings => [], critical => 0, criticals => [], banned => 0, From cacdc7315789454ed0ec4d69927162aadba2475d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 18 Jul 2022 18:20:31 -0500 Subject: [PATCH 207/332] add per timeslot stats for CAPE (#414) --- snmp/cape | 176 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 164 insertions(+), 12 deletions(-) diff --git a/snmp/cape b/snmp/cape index 2f859e552..cde895569 100755 --- a/snmp/cape +++ b/snmp/cape @@ -96,6 +96,7 @@ use Config::Tiny; use DBI; use Time::Piece; use File::Slurp; +use Statistics::Lite qw(:all); sub version { print "cape v. 0.0.1\n"; @@ -179,6 +180,27 @@ my $return_json = { version => 1, }; +my @stats_for = ( + 'dropped_files', 'running_processes', 'api_calls', 'domains', + 'signatures_total', 'signatures_alert', 'files_written', 'registry_keys_modified', + 'crash_issues', 'anti_issues', +); + +my $ag_stats = { + dropped_files => [], + running_processes => [], + api_calls => [], + domains => [], + signatures_total => [], + signatures_alert => [], + files_written => [], + registry_keys_modified => [], + crash_issues => [], + anti_issues => [], +}; + +my $pkg_stats = {}; + # used for checking if the level value is somethingw understand my $level_check = { info => 1, debug => 1, error => 1, warning => 1, critical => 1 }; @@ -387,19 +409,57 @@ eval { } } - # if dropped files is defined and not blank, the rest will - # if this is blank then runstatistics is conf/reporting.conf - if ( defined($dropped_files) ) { - $return_json->{data}->{dropped_files} += $dropped_files; - $return_json->{data}->{running_processes} += $running_processes; - $return_json->{data}->{api_calls} += $api_calls; - $return_json->{data}->{domains} += $domains; - $return_json->{data}->{signatures_total} += $signatures_total; - $return_json->{data}->{signatures_alert} += $signatures_alert; - $return_json->{data}->{files_written} += $files_written; + if ( defined($running_processes) ) { + $return_json->{data}->{running_processes} += $running_processes; + push( @{ $ag_stats->{running_processes} }, $running_processes ); + } + else { + + } + + if ( defined($api_calls) ) { + $return_json->{data}->{api_calls} += $api_calls; + push( @{ $ag_stats->{api_calls} }, $api_calls ); + } + + if ( defined($domains) ) { + $return_json->{data}->{domains} += $domains; + push( @{ $ag_stats->{domains} }, $domains ); + } + + if ( defined($signatures_alert) ) { + $return_json->{data}->{signatures_alert} += $signatures_alert; + push( @{ $ag_stats->{signatures_alert} }, $signatures_alert ); + } + + if ( defined($signatures_total) ) { + $return_json->{data}->{signatures_total} += $signatures_total; + push( @{ $ag_stats->{signatures_total} }, $signatures_total ); + } + + if ( defined($files_written) ) { + $return_json->{data}->{files_written} += $files_written; + push( @{ $ag_stats->{files_written} }, $files_written ); + } + + if ( defined($registry_keys_modified) ) { $return_json->{data}->{registry_keys_modified} += $registry_keys_modified; - $return_json->{data}->{crash_issues} += $crash_issues; - $return_json->{data}->{anti_issues} += $anti_issues; + push( @{ $ag_stats->{registry_keys_modified} }, $registry_keys_modified ); + } + + if ( defined($crash_issues) ) { + $return_json->{data}->{crash_issues} += $crash_issues; + push( @{ $ag_stats->{crash_issues} }, $crash_issues ); + } + + if ( defined($anti_issues) ) { + $return_json->{data}->{anti_issues} += $anti_issues; + push( @{ $ag_stats->{anti_issues} }, $anti_issues ); + } + + if ( defined($dropped_files) ) { + $return_json->{data}->{dropped_files} += $dropped_files; + push( @{ $ag_stats->{dropped_files} }, $dropped_files ); # put per package stats together if ( $task_package ne '' ) { @@ -416,6 +476,18 @@ eval { crash_issues => $crash_issues, anti_issues => $anti_issues }; + $pkg_stats->{$task_package} = { + dropped_files => [$dropped_files], + running_processes => [$running_processes], + api_calls => [$api_calls], + domains => [$domains], + signatures_total => [$signatures_total], + signatures_alert => [$signatures_alert], + files_written => [$files_written], + registry_keys_modified => [$registry_keys_modified], + crash_issues => [$crash_issues], + anti_issues => [$anti_issues] + }; } else { $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; @@ -429,6 +501,17 @@ eval { += $registry_keys_modified; $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; + + push( @{ $pkg_stats->{$task_package}->{dropped_files} }, $dropped_files ); + push( @{ $pkg_stats->{$task_package}->{running_processes} }, $running_processes ); + push( @{ $pkg_stats->{$task_package}->{api_calls} }, $api_calls ); + push( @{ $pkg_stats->{$task_package}->{domains} }, $domains ); + push( @{ $pkg_stats->{$task_package}->{signatures_total} }, $signatures_total ); + push( @{ $pkg_stats->{$task_package}->{signatures_alert} }, $signatures_alert ); + push( @{ $pkg_stats->{$task_package}->{files_written} }, $files_written ); + push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified ); + push( @{ $pkg_stats->{$task_package}->{crash_issues} }, $crash_issues ); + push( @{ $pkg_stats->{$task_package}->{anti_issues} }, $anti_issues ); } } } @@ -445,4 +528,73 @@ if ($@) { $return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@; } +# compute the aggregate stats +foreach my $current_entry (@stats_for) { + if ( $#{ $ag_stats->{$current_entry} } > 0 ) { + $return_json->{data}{ 'min.' . $current_entry } = min( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'max.' . $current_entry } = max( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'range.' . $current_entry } = range( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'mean.' . $current_entry } = mean( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'median.' . $current_entry } = median( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'mode.' . $current_entry } = mode( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'v.' . $current_entry } = variance( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'sd.' . $current_entry } = stddev( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'vp.' . $current_entry } = variancep( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'sdp.' . $current_entry } = stddevp( @{ $ag_stats->{$current_entry} } ); + } + else { + $return_json->{data}{ 'min.' . $current_entry } = 0; + $return_json->{data}{ 'max.' . $current_entry } = 0; + $return_json->{data}{ 'range.' . $current_entry } = 0; + $return_json->{data}{ 'mean.' . $current_entry } = 0; + $return_json->{data}{ 'median.' . $current_entry } = 0; + $return_json->{data}{ 'mode.' . $current_entry } = 0; + $return_json->{data}{ 'v.' . $current_entry } = 0; + $return_json->{data}{ 'sd.' . $current_entry } = 0; + $return_json->{data}{ 'vp.' . $current_entry } = 0; + $return_json->{data}{ 'sdp.' . $current_entry } = 0; + } + +} + +# compute the stats for each package +foreach my $current_pkg ( keys( %{$pkg_stats} ) ) { + foreach my $current_entry (@stats_for) { + if ( $#{ $pkg_stats->{$current_pkg}{$current_entry} } > 0 ) { + $return_json->{data}{pkg_stats}{$current_pkg}{ 'min.' . $current_entry } + = min( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'max.' . $current_entry } + = max( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'range.' . $current_entry } + = range( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mean.' . $current_entry } + = mean( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'median.' . $current_entry } + = median( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mode.' . $current_entry } + = mode( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'v.' . $current_entry } + = variance( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sd.' . $current_entry } + = stddev( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'vp.' . $current_entry } + = variancep( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sdp.' . $current_entry } + = stddevp( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + } + else { + $return_json->{data}{pkg_stats}{$current_pkg}{ 'min.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'max.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'range.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mean.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'median.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mode.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'v.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sd.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'vp.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sdp.' . $current_entry } = 0; + } + } +} + print encode_json($return_json) . "\n"; From dbf2b7c7e827fdf56d2758ccc2fd38375622daeb Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Jul 2022 12:42:17 -0500 Subject: [PATCH 208/332] cape: typo fix so it will send criticals if requested (#416) --- snmp/cape | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/cape b/snmp/cape index cde895569..e0c2c795a 100755 --- a/snmp/cape +++ b/snmp/cape @@ -310,7 +310,7 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { elsif ( $level eq 'warning' and $config->{sendWarnings} ) { push( @{ $return_json->{data}->{warnings} }, $entry ); } - elsif ( $level eq 'criticals' and $config->{sendCriticals} ) { + elsif ( $level eq 'critical' and $config->{sendCriticals} ) { push( @{ $return_json->{data}->{criticals} }, $entry ); } } From ea69fd1a3ad90db2e8250fe317acdd8886dfb45b Mon Sep 17 00:00:00 2001 From: Tim de Boer Date: Thu, 28 Jul 2022 15:55:01 +0200 Subject: [PATCH 209/332] Rename LMNS_vlans.scr to LNMS_vlans.scr (#410) --- snmp/Routeros/{LMNS_vlans.scr => LNMS_vlans.scr} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename snmp/Routeros/{LMNS_vlans.scr => LNMS_vlans.scr} (100%) diff --git a/snmp/Routeros/LMNS_vlans.scr b/snmp/Routeros/LNMS_vlans.scr similarity index 100% rename from snmp/Routeros/LMNS_vlans.scr rename to snmp/Routeros/LNMS_vlans.scr From 8449171226f6a155a586507b935957f4d2c42ce6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 31 Jul 2022 19:57:40 -0500 Subject: [PATCH 210/332] add lnms_return_optimizer (#417) --- utils/lnms_return_optimizer | 77 +++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100755 utils/lnms_return_optimizer diff --git a/utils/lnms_return_optimizer b/utils/lnms_return_optimizer new file mode 100755 index 000000000..884d01cf9 --- /dev/null +++ b/utils/lnms_return_optimizer @@ -0,0 +1,77 @@ +#!/usr/bin/env perl + +use MIME::Base64; +use Gzip::Faster; +use Getopt::Long; +use warnings; +use strict; + +sub version{ + print "lnms_return_optimizer v. 0.0.1\n"; +} + + + +my $version; +my $help; +my $extract; +my $new_line; +GetOptions( + 'e' => \$extract, + 'n' => \$new_line, + 'h' => \$help, + 'help' => \$help, + 'v' => \$version, + 'version' => \$version, + ); + +if ($version) { + version; + exit; +} + +if ($help) { + version; + + print ' +foo | lnms_return_otimizer + +-e Operate in extract mode instead. +-n Include newlines with the base64. + +-h Print help. +--help Print help. +-v Print version info. +--version Print version info. +'; + + exit; +} + +my $data = ''; +foreach my $line () { + $data = $data . $line; +} + +if ($extract) { + if ($data =~ /^[A-Za-z0-9\/\+\n]+\=*\n*$/ ) { + print gunzip(decode_base64($data)); + }else { + print $data; + } +}else { + # gzip and print encode in base64 + # base64 is needed as snmp does not like + my $compressed = encode_base64(gzip($data)); + if (!$new_line) { + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + } + + # check which is smaller and prints it + if (length($compressed) > length($data)) { + print $data; + }else { + print $compressed; + } +} From 41db608ef92ad318a1bed185926e32cd2e266903 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 8 Aug 2022 09:15:57 -0500 Subject: [PATCH 211/332] lnms_return_optimizer => librenms_return_optimizer (#419) --- utils/{lnms_return_optimizer => librenms_return_optimizer} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename utils/{lnms_return_optimizer => librenms_return_optimizer} (100%) diff --git a/utils/lnms_return_optimizer b/utils/librenms_return_optimizer similarity index 100% rename from utils/lnms_return_optimizer rename to utils/librenms_return_optimizer From a1227bad67974ebaba177658f7ae227f26613cf8 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 11 Aug 2022 12:01:16 -0500 Subject: [PATCH 212/332] update date librenms_return_optimizer to take input via pipe or post -- and running the command #420 --- utils/librenms_return_optimizer | 44 ++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/utils/librenms_return_optimizer b/utils/librenms_return_optimizer index 884d01cf9..f910930a0 100755 --- a/utils/librenms_return_optimizer +++ b/utils/librenms_return_optimizer @@ -1,17 +1,39 @@ #!/usr/bin/env perl +#Copyright (c) 2022, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + use MIME::Base64; use Gzip::Faster; use Getopt::Long; use warnings; use strict; +use IPC::Cmd qw[ run ]; sub version{ - print "lnms_return_optimizer v. 0.0.1\n"; + print "librenms_return_optimizer v. 0.0.2\n"; } - - my $version; my $help; my $extract; @@ -34,7 +56,9 @@ if ($help) { version; print ' -foo | lnms_return_otimizer +foo | librenms_return_otimizer +librenms_return_otimizer -- /path/to/some/extend -some -args + -e Operate in extract mode instead. -n Include newlines with the base64. @@ -49,8 +73,16 @@ foo | lnms_return_otimizer } my $data = ''; -foreach my $line () { - $data = $data . $line; +if ( ! $extract ) { + if (defined($ARGV[0])) { + my( $success, $error_message, $full_buf, $stdout_buf, $stderr_buf ) = + run( command => \@ARGV, verbose => 0 ); + $data=join '', @$full_buf; + }else { + foreach my $line () { + $data = $data . $line; + } + } } if ($extract) { From 8b574237e8d571e0873087bfd33ac207cf091c00 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 15 Aug 2022 12:46:19 -0500 Subject: [PATCH 213/332] add unassigned shards for opensearch #421 --- snmp/opensearch | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/opensearch b/snmp/opensearch index 8515136ec..2b133141f 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -153,6 +153,7 @@ $to_return->{data}{c_act_shards} = $health_json->{active_shards}; $to_return->{data}{c_rel_shards} = $health_json->{relocating_shards}; $to_return->{data}{c_init_shards} = $health_json->{initializing_shards}; $to_return->{data}{c_delayed_shards} = $health_json->{delayed_unassigned_shards}; +$to_return->{data}{c_unass_shards} = $health_json->{unassigned_shards}; $to_return->{data}{c_pending_tasks} = $health_json->{number_of_pending_tasks}; $to_return->{data}{c_in_fl_fetch} = $health_json->{number_of_in_flight_fetch}; $to_return->{data}{c_task_max_in_time} = $health_json->{task_max_waiting_in_queue_millis}; From a78596030c7a2d620a71d1d91d986bfd4f15d364 Mon Sep 17 00:00:00 2001 From: oernii Date: Sat, 10 Sep 2022 17:22:52 +0000 Subject: [PATCH 214/332] mdadm sync check - completed percent (#415) Co-authored-by: oernii --- snmp/mdadm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index 28f351382..a9a1bfefe 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -20,7 +20,9 @@ list_devices() { # Outputs either 0, 100, or the value of the file referenced maybe_get() { - if [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then + if [[ $(cat "${1}") =~ " / " ]]; then + echo "100 * $(cat ${1})" | bc + elif [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then cat "${1}" else echo 0 From 1eeeab9b6b0f3135516dcaec148f3e0a77a0b576 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Mon, 19 Sep 2022 18:11:42 -0700 Subject: [PATCH 215/332] Add pwrstatd script (#423) --- snmp/pwrstatd.py | 153 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100755 snmp/pwrstatd.py diff --git a/snmp/pwrstatd.py b/snmp/pwrstatd.py new file mode 100755 index 000000000..919c01e4e --- /dev/null +++ b/snmp/pwrstatd.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# +# Name: Pwrstatd Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "pwrstat -status" output for ingestion into +# LibreNMS via the pwrstatd application. Pwrstatd is a service/application +# provided by CyberPower for their personal PSUs. The software is available +# here: https://www.cyberpowersystems.com/product/software/power-panel-personal/powerpanel-for-linux/ +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/pwrstatd.py +# 2. Edit your snmpd.conf and include: +# extend pwrstatd /etc/snmp/pwrstatd.py +# 3. (Optional) Create a /etc/snmp/pwrstatd.json file and specify the path to the pwrstat +# executable as json [the default path is /sbin/pwrstat]: +# ``` +# { +# "pwrstat_cmd": "/sbin/pwrstat" +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. +# TODO: +# 1. If CyberPower ends up building support to collect data from multiple PSUs on a +# single computer, then this script will be updated to support that. + +import json +import re +import subprocess + +CONFIG_FILE = "/etc/snmp/pwrstatd.json" +KEY_TO_VARIABLE_MAP = { + "Firmware Number": "sn", + "Rating Voltage": "vrating", + "Rating Power": "wrating", + "Utility Voltage": "vutility", + "Output Voltage": "voutput", + "Battery Capacity": "pcapacity", + "Remaining Runtime": "mruntime", + "Load": "wload", +} +PWRSTAT_ARGS = "-status" +PWRSTAT_CMD = "/sbin/pwrstat" +REGEX_PATTERN = r"([\w\s]+)\.\.+ (.*)" + + +def value_sanitizer(key, value): + """ + value_sanitizer(): Parses the given value to extract the exact numerical (or string) value. + + Inputs: + key: The key portion of the output after regex parsing (clean). + value: The entire value portion of the output after regex parsing (dirty). + Outputs: + str, int, or None depending on what key is given. + """ + if key == "Firmware Number": + return str(value) + elif key in ( + "Rating Voltage", + "Rating Power", + "Utility Voltage", + "Output Voltage", + "Battery Capacity", + "Remaining Runtime", + "Load", + ): + return int(value.split(" ")[0]) + else: + return None + + +def main(): + """ + main(): main function performs pwrstat command execution and output parsing. + + Inputs: + None + Outputs: + None + """ + pwrstat_cmd = PWRSTAT_CMD + output_data = {"errorString": "", "error": 0, "version": 1, "data": []} + psu_data = { + "mruntime": None, + "pcapacity": None, + "pload": None, + "sn": None, + "voutput": None, + "vrating": None, + "vutility": None, + "wload": None, + "wrating": None, + } + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + if "pwrstat_cmd" in config_file.keys(): + pwrstat_cmd = config_file["pwrstat_cmd"] + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + output_data["error"] = 1 + output_data["errorString"] = "Config file Error: '%s'" % err + + try: + # Execute pwrstat command + pwrstat_process = subprocess.Popen( + [pwrstat_cmd, PWRSTAT_ARGS], + stdin=None, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + poutput, perror = pwrstat_process.communicate() + + if perror: + raise OSError(perror.decode("utf-8")) + + # Parse pwrstat command output and collect data. + for line in poutput.decode("utf-8").split("\n"): + regex_search = re.search(REGEX_PATTERN, line.strip()) + if not regex_search: + continue + + try: + key = regex_search.groups()[0] + value = regex_search.groups()[1] + if key in KEY_TO_VARIABLE_MAP.keys(): + psu_data[KEY_TO_VARIABLE_MAP[key]] = value_sanitizer(key, value) + except IndexError as err: + output_data["error"] = 1 + output_data["errorString"] = "Command Output Parsing Error: '%s'" % err + continue + + # Manually calculate percentage load on PSU + if psu_data["wrating"]: + # int to float hacks in-place for python2 backwards compatibility + psu_data["pload"] = int( + float(psu_data["wload"]) / float(psu_data["wrating"]) * 100 + ) + except (subprocess.CalledProcessError, OSError) as err: + output_data["error"] = 1 + output_data["errorString"] = "Command Execution Error: '%s'" % err + + output_data["data"].append(psu_data) + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From 232f54860215da2f3829fd01f5dd8f186c696bca Mon Sep 17 00:00:00 2001 From: bnerickson Date: Fri, 14 Oct 2022 11:42:28 -0700 Subject: [PATCH 216/332] Add systemd script (#426) * Add systemd script * Fixing a couple python black styling errors --- snmp/systemd.py | 191 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100755 snmp/systemd.py diff --git a/snmp/systemd.py b/snmp/systemd.py new file mode 100755 index 000000000..1e6b47d51 --- /dev/null +++ b/snmp/systemd.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# +# Name: Systemd Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "systemctl" output for ingestion into +# LibreNMS via the systemd application. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/systemd.py +# 2. Edit your snmpd.conf and include: +# extend systemdd /etc/snmp/systemd.py +# 3. (Optional) Create a /etc/snmp/systemd.json file and specify: +# a.) "systemctl_cmd" - String path to the systemctl binary ["/usr/bin/systemctl"] +# b.) "include_inactive_units" - True/False string to include inactive units in +# results ["False"] +# ``` +# { +# "systemctl_cmd": "/bin/systemctl", +# "include_inactive_units": "True" +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. + +import json +import subprocess +import sys + +CONFIG_FILE = "/etc/snmp/systemd.json" +SYSTEMCTL_ARGS = ["list-units", "--full", "--plain", "--no-legend", "--no-page"] +SYSTEMCTL_CMD = "/usr/bin/systemctl" +# The unit "sub" type is the only unit state that has three layers of +# depth. "load" and "active" are two layers deep. +SYSTEMCTL_TERNARY_STATES = ["sub"] + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + systemctl_cmd: The full systemctl command to execute. + """ + systemctl_cmd = [SYSTEMCTL_CMD] + systemctl_args = SYSTEMCTL_ARGS + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + systemctl_cmd = [config_file["systemctl_cmd"]] + if config_file["include_inactive_units"].lower().strip() == "true": + systemctl_args.append("--all") + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return full systemctl command. + systemctl_cmd.extend(systemctl_args) + return systemctl_cmd + + +def command_executor(systemctl_cmd): + """ + command_executor(): Execute the systemctl command and return the output. + + Inputs: + systemctl_cmd: The full systemctl command to execute. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + try: + # Execute systemctl command + poutput = subprocess.check_output( + systemctl_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + +def unit_parser(line, systemctl_data): + """ + unit_parser(): Parses a unit's line for load, active, and sub status. Each + of those values is incremented in the global systemctl_data + variable as-well-as the totals for each category. + + Inputs: + line: The unit's status line from the systemctl stdout. + Outputs: + None + """ + line_parsed = line.strip().split() + + try: + # Reverse the to grab the sub type + # (ignoring periods in the service name). + parsed_results = { + "load": line_parsed[1], + "active": line_parsed[2], + "sub": {line_parsed[0][::-1].split(".")[0][::-1]: line_parsed[3]}, + } + except (IndexError) as err: + error_handler("Command Output Parsing Error", err) + + for state_type, state_value in parsed_results.items(): + if state_type not in systemctl_data: + systemctl_data[state_type] = {} + if state_type not in SYSTEMCTL_TERNARY_STATES: + systemctl_data[state_type][state_value] = ( + 1 + if state_value not in systemctl_data[state_type] + else (systemctl_data[state_type][state_value] + 1) + ) + systemctl_data[state_type]["total"] = ( + 1 + if "total" not in systemctl_data[state_type] + else (systemctl_data[state_type]["total"] + 1) + ) + else: + for sub_state_type, sub_state_value in state_value.items(): + if sub_state_type not in systemctl_data[state_type]: + systemctl_data[state_type][sub_state_type] = {} + systemctl_data[state_type][sub_state_type][sub_state_value] = ( + 1 + if sub_state_value not in systemctl_data[state_type][sub_state_type] + else ( + systemctl_data[state_type][sub_state_type][sub_state_value] + 1 + ) + ) + systemctl_data[state_type][sub_state_type]["total"] = ( + 1 + if "total" not in systemctl_data[state_type][sub_state_type] + else (systemctl_data[state_type][sub_state_type]["total"] + 1) + ) + return systemctl_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, + and unit stdout parsing. Then it prints out the expected json output + for the systemd application. + + Inputs: + None + Outputs: + None + """ + output_data = {"errorString": "", "error": 0, "version": 1, "data": {}} + + # Parse configuration file. + systemctl_cmd = config_file_parser() + + # Execute systemctl command and parse output. + for line in command_executor(systemctl_cmd).decode("utf-8").split("\n"): + if not line: + continue + output_data["data"] = unit_parser(line, output_data["data"]) + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From f9fea2e06c90188731e2ab680ceb71c1594c9d85 Mon Sep 17 00:00:00 2001 From: Trae Santiago <249409+Trae32566@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:42:47 -0500 Subject: [PATCH 217/332] fixed conditional not checking for file before `cat`ing, and removed reliance on bc (#425) --- snmp/mdadm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index a9a1bfefe..b0c9b3c5f 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -20,8 +20,8 @@ list_devices() { # Outputs either 0, 100, or the value of the file referenced maybe_get() { - if [[ $(cat "${1}") =~ " / " ]]; then - echo "100 * $(cat ${1})" | bc + if [ -f "${1}" ] && [[ $(cat "${1}") =~ " / " ]]; then + echo $((100 * $(cat "${1}"))) elif [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then cat "${1}" else From e6fa86dc82f0479cf239b9871c62ff20925189d6 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Sun, 16 Oct 2022 11:15:25 -0500 Subject: [PATCH 218/332] Fix memcached security vulnerability (#428) Add snmp extend script --- agent-local/memcached | 3 +-- snmp/memcached | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) create mode 100755 snmp/memcached diff --git a/agent-local/memcached b/agent-local/memcached index b4ed626f8..6ba751b69 100755 --- a/agent-local/memcached +++ b/agent-local/memcached @@ -7,7 +7,6 @@ $stats = $m->getStats(); if(is_array($stats)) { echo("<<>>\n"); - echo(serialize($m->getStats())); + echo(json_encode($m->getStats())); echo("\n"); } -?> diff --git a/snmp/memcached b/snmp/memcached new file mode 100755 index 000000000..f0d7844ee --- /dev/null +++ b/snmp/memcached @@ -0,0 +1,22 @@ +#!/usr/bin/php + false, + 'error' => 99, + 'errorString' => 'php-memcached extension is not available, it must be installed and enabled.', + 'version' => '1.1' + )); + exit; +} + +$m = new Memcached(); +$m->addServer('localhost', 11211); + +echo json_encode(array( + 'data' => $m->getStats(), + 'error' => $m->getLastErrorCode(), + 'errorString' => $m->getLastErrorMessage(), + 'version' => '1.1', +)); From 10bae306f306921a188d64c14988d3b876f0b76d Mon Sep 17 00:00:00 2001 From: Oskar Szafraniec Date: Wed, 26 Oct 2022 15:13:20 +0200 Subject: [PATCH 219/332] run_query PHP 8.1 hot fix (#430) As of PHP 8.1.0, the default setting is MYSQLI_REPORT_ERROR | MYSQLI_REPORT_STRICT. Previously, it was MYSQLI_REPORT_OFF. --- snmp/mysql | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/snmp/mysql b/snmp/mysql index 89e3c9059..530637352 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -1310,6 +1310,7 @@ function to_int ( $str ) { function run_query($sql, $conn) { global $debug; debug($sql); + mysqli_report(MYSQLI_REPORT_OFF); $result = @mysqli_query($conn, $sql); if ( $debug && strpos($sql, 'SHOW SLAVE STATUS ') === false ) { $error = @mysqli_error($conn); @@ -1319,13 +1320,15 @@ function run_query($sql, $conn) { } } $array = array(); - $count = @mysqli_num_rows($result); - if ( $count > 10000 ) { - debug('Abnormal number of rows returned: ' . $count); - } - else { - while ( $row = @mysqli_fetch_array($result) ) { - $array[] = $row; + if ( $result ) { + $count = @mysqli_num_rows($result); + if ( $count > 10000 ) { + debug('Abnormal number of rows returned: ' . $count); + } + else { + while ( $row = @mysqli_fetch_array($result) ) { + $array[] = $row; + } } } debug(array($sql, $array)); From 0f0a31fa417ff3ed9137893d9c61c520bf9ad539 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 1 Nov 2022 14:50:18 -0700 Subject: [PATCH 220/332] Cleaning up pwrstatd code (#431) --- snmp/pwrstatd.py | 180 +++++++++++++++++++++++++++++------------------ 1 file changed, 113 insertions(+), 67 deletions(-) diff --git a/snmp/pwrstatd.py b/snmp/pwrstatd.py index 919c01e4e..dc5d332a4 100755 --- a/snmp/pwrstatd.py +++ b/snmp/pwrstatd.py @@ -7,7 +7,8 @@ # Description: This is a simple script to parse "pwrstat -status" output for ingestion into # LibreNMS via the pwrstatd application. Pwrstatd is a service/application # provided by CyberPower for their personal PSUs. The software is available -# here: https://www.cyberpowersystems.com/product/software/power-panel-personal/powerpanel-for-linux/ +# here: +# https://www.cyberpowersystems.com/product/software/power-panel-personal/powerpanel-for-linux/ # Installation: # 1. Copy this script to /etc/snmp/ and make it executable: # chmod +x /etc/snmp/pwrstatd.py @@ -28,6 +29,7 @@ import json import re import subprocess +import sys CONFIG_FILE = "/etc/snmp/pwrstatd.json" KEY_TO_VARIABLE_MAP = { @@ -40,11 +42,79 @@ "Remaining Runtime": "mruntime", "Load": "wload", } -PWRSTAT_ARGS = "-status" +PWRSTAT_ARGS = ["-status"] PWRSTAT_CMD = "/sbin/pwrstat" REGEX_PATTERN = r"([\w\s]+)\.\.+ (.*)" +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + pwrstat_cmd: The full pwrstat command to execute. + """ + pwrstat_cmd = [PWRSTAT_CMD] + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + pwrstat_cmd = [config_file["pwrstat_cmd"]] + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return full pwrstat command. + pwrstat_cmd.extend(PWRSTAT_ARGS) + return pwrstat_cmd + + +def command_executor(pwrstat_cmd): + """ + command_executor(): Execute the pwrstat command and return the output. + + Inputs: + pwrstat_cmd: The full pwrstat command to execute. + Outputs: + poutput: The stdout of the executed command. + """ + try: + # Execute pwrstat command + poutput = subprocess.check_output( + pwrstat_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + def value_sanitizer(key, value): """ value_sanitizer(): Parses the given value to extract the exact numerical (or string) value. @@ -57,7 +127,7 @@ def value_sanitizer(key, value): """ if key == "Firmware Number": return str(value) - elif key in ( + if key in ( "Rating Voltage", "Rating Power", "Utility Voltage", @@ -67,8 +137,42 @@ def value_sanitizer(key, value): "Load", ): return int(value.split(" ")[0]) - else: - return None + return None + + +def output_parser(pwrstat_output): + """ + output_parser(): Parses the pwrstat command output and returns a dictionary + of PSU metrics. + + Inputs: + pwrstat_output: The pwrstat command stdout + Outputs: + psu_data: A dictionary of PSU metrics. + """ + psu_data = {} + + for line in pwrstat_output.decode("utf-8").split("\n"): + regex_search = re.search(REGEX_PATTERN, line.strip()) + + if not regex_search: + continue + + try: + key = regex_search.groups()[0] + value = regex_search.groups()[1] + if key in KEY_TO_VARIABLE_MAP: + psu_data[KEY_TO_VARIABLE_MAP[key]] = value_sanitizer(key, value) + except IndexError as err: + error_handler("Command Output Parsing Error", err) + + # Manually calculate percentage load on PSU + if "wrating" in psu_data and "wload" in psu_data and psu_data["wrating"]: + # int to float hacks in-place for python2 backwards compatibility + psu_data["pload"] = int( + float(psu_data["wload"]) / float(psu_data["wrating"]) * 100 + ) + return psu_data def main(): @@ -80,72 +184,14 @@ def main(): Outputs: None """ - pwrstat_cmd = PWRSTAT_CMD output_data = {"errorString": "", "error": 0, "version": 1, "data": []} - psu_data = { - "mruntime": None, - "pcapacity": None, - "pload": None, - "sn": None, - "voutput": None, - "vrating": None, - "vutility": None, - "wload": None, - "wrating": None, - } - # Load configuration file if it exists - try: - with open(CONFIG_FILE, "r") as json_file: - config_file = json.load(json_file) - if "pwrstat_cmd" in config_file.keys(): - pwrstat_cmd = config_file["pwrstat_cmd"] - except FileNotFoundError: - pass - except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: - output_data["error"] = 1 - output_data["errorString"] = "Config file Error: '%s'" % err + # Parse configuration file. + pwrstat_cmd = config_file_parser() - try: - # Execute pwrstat command - pwrstat_process = subprocess.Popen( - [pwrstat_cmd, PWRSTAT_ARGS], - stdin=None, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - poutput, perror = pwrstat_process.communicate() - - if perror: - raise OSError(perror.decode("utf-8")) - - # Parse pwrstat command output and collect data. - for line in poutput.decode("utf-8").split("\n"): - regex_search = re.search(REGEX_PATTERN, line.strip()) - if not regex_search: - continue - - try: - key = regex_search.groups()[0] - value = regex_search.groups()[1] - if key in KEY_TO_VARIABLE_MAP.keys(): - psu_data[KEY_TO_VARIABLE_MAP[key]] = value_sanitizer(key, value) - except IndexError as err: - output_data["error"] = 1 - output_data["errorString"] = "Command Output Parsing Error: '%s'" % err - continue - - # Manually calculate percentage load on PSU - if psu_data["wrating"]: - # int to float hacks in-place for python2 backwards compatibility - psu_data["pload"] = int( - float(psu_data["wload"]) / float(psu_data["wrating"]) * 100 - ) - except (subprocess.CalledProcessError, OSError) as err: - output_data["error"] = 1 - output_data["errorString"] = "Command Execution Error: '%s'" % err + # Execute pwrstat command and parse output. + output_data["data"].append(output_parser(command_executor(pwrstat_cmd))) - output_data["data"].append(psu_data) print(json.dumps(output_data)) From 539a10db4f92d31cb5d486d74792240cea343d64 Mon Sep 17 00:00:00 2001 From: Jethro Date: Sat, 5 Nov 2022 11:22:19 +0100 Subject: [PATCH 221/332] Add default puppet7 location of the summary file (#434) --- snmp/puppet_agent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index cc9b36343..8afa87d1e 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -23,6 +23,7 @@ summary_files = [ "/var/cache/puppet/state/last_run_summary.yaml", "/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml", + "/opt/puppetlabs/puppet/public/last_run_summary.yaml", ] From 61a0a43f8d88a5402c01148a07b54b8890f3d92a Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sun, 13 Nov 2022 02:04:02 -0800 Subject: [PATCH 222/332] Add wireguard script (#432) * Add wireguard script * Fixing a minor lint isort issue * Removing str sanitization that was never supposed to be part of the original commit * Fixed one final lint issue * Fixing comments to use a VALID json format. String enforement for the friendly name variable. --- snmp/wireguard.py | 247 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100755 snmp/wireguard.py diff --git a/snmp/wireguard.py b/snmp/wireguard.py new file mode 100755 index 000000000..66c90ec93 --- /dev/null +++ b/snmp/wireguard.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python +# +# Name: Wireguard Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming the +# base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "wg show all" output for ingestion into LibreNMS +# via the wireguard application. We collect traffic, a friendly identifier (arbitrary +# name), and last handshake time for all clients on all wireguard interfaces. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/wireguard.py +# 2. Edit your snmpd.conf and include: +# extend wireguard /etc/snmp/wireguard.py +# 3. Create a /etc/snmp/wireguard.json file and specify: +# a.) (optional) "wg_cmd" - String path to the wg binary ["/usr/bin/wg"] +# b.) "public_key_to_arbitrary_name" - A dictionary to convert between the publickey +# assigned to the client (specified in the wireguard +# interface conf file) to an arbitrary, friendly +# name. The friendly names MUST be unique within +# each interface. Also note that the interface name +# and friendly names are used in the RRD filename, +# so using special characters is highly discouraged. +# ``` +# { +# "wg_cmd": "/bin/wg", +# "public_key_to_arbitrary_name": { +# "wg0": { +# "z1iSIymFEFi/PS8rR19AFBle7O4tWowMWuFzHO7oRlE=": "client1", +# "XqWJRE21Fw1ke47mH1yPg/lyWqCCfjkIXiS6JobuhTI=": "server.domain.com" +# } +# } +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. +# TODO: +# 1. If Wireguard ever implements a friendly identifier, then scrape that instead of providing +# arbitrary names manually in the json conf file. + +import json +import subprocess +import sys +from datetime import datetime +from itertools import chain + +CONFIG_FILE = "/etc/snmp/wireguard.json" +WG_ARGS = ["show", "all", "dump"] +WG_CMD = "/usr/bin/wg" + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and command execution. We set + the data to none and print out the json. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": {}, + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parse the config file and extract the necessary parameters. + + Inputs: + None + Outputs: + wg_cmd: The full wg command to execute. + interface_clients_dict: Dictionary mapping of interface names to public_key->client names. + """ + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + interface_clients_dict = config_file["public_key_to_arbitrary_name"] + wg_cmd = [config_file["wg_cmd"]] if "wg_cmd" in config_file else [WG_CMD] + except ( + FileNotFoundError, + KeyError, + PermissionError, + OSError, + json.decoder.JSONDecodeError, + ) as err: + error_handler("Config File Error", err) + + # Create and return full wg command. + wg_cmd.extend(WG_ARGS) + return wg_cmd, interface_clients_dict + + +def config_file_validator(interface_clients_dict): + """ + config_file_validator(): Verifies the uniqueness of the arbitrary names in the interface to + public_key->client names dictionary. + + Inputs: + interface_clients_dict: Dictionary mapping of interface names to public_key->client names. + Outputs: + None + """ + # Search for valid, unique arbitrary names + for interface, public_key_to_arbitrary_name in interface_clients_dict.items(): + rev_dict = {} + for public_key, arbitrary_name in public_key_to_arbitrary_name.items(): + rev_dict.setdefault(str(arbitrary_name), set()).add(public_key) + + # Verify the arbitrary names set in the wireguard.json file are unique. + result = set( + chain.from_iterable( + arbitrary_name + for public_key, arbitrary_name in rev_dict.items() + if len(arbitrary_name) > 1 + ) + ) + if not result: + continue + + err = ( + "%s interface has non-unique arbitrary names configured for public keys %s" + % (interface, str(result)) + ) + error_handler("Config File Error", err) + + +def command_executor(wg_cmd): + """ + command_executor(): Execute the wg command and return the output. + + Inputs: + wg_cmd: The full wg command to execute. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + try: + # Execute wg command + poutput = subprocess.check_output( + wg_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + +def output_parser(line, interface_clients_dict): + """ + output_parser(): Parses a line from the wg command for the client's public key, traffic inbound + and outbound, wireguard interface, and last handshake timestamp. + + Inputs: + line: The wireguard client status line from the wg command stdout. + interface_clients_dict: Dictionary mapping of interface to public_key->client names. + Outputs: + wireguard_data: A dictionary of a peer's server interface, public key, bytes sent and + received, and minutes since last handshake + """ + line_parsed = line.strip().split() + + try: + interface = str(line_parsed[0]) + public_key = str(line_parsed[1]) + timestamp = int(line_parsed[5]) + bytes_rcvd = int(line_parsed[6]) + bytes_sent = int(line_parsed[7]) + except (IndexError, ValueError) as err: + error_handler("Command Output Parsing Error", err) + + # Return an empty dictionary if the interface is not in the dictionary. + if interface not in interface_clients_dict: + return {} + + # Return an empty dictionary if there is no public key to arbitrary name mapping. + if public_key not in interface_clients_dict[interface]: + return {} + + # Perform in-place replacement of publickeys with arbitrary names. + friendly_name = str(interface_clients_dict[interface][public_key]) + + # Calculate minutes since last handshake here + last_handshake_timestamp = datetime.fromtimestamp(timestamp) if timestamp else 0 + minutes_since_last_handshake = ( + int((datetime.now() - last_handshake_timestamp).total_seconds() / 60) + if last_handshake_timestamp + else None + ) + + wireguard_data = { + interface: { + friendly_name: { + "minutes_since_last_handshake": minutes_since_last_handshake, + "bytes_rcvd": bytes_rcvd, + "bytes_sent": bytes_sent, + } + } + } + + return wireguard_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, and unit stdout + parsing. Then it prints out the expected json output for the wireguard application. + + Inputs: + None + Outputs: + None + """ + output_data = {"errorString": "", "error": 0, "version": 1, "data": {}} + + # Parse configuration file. + wg_cmd, interface_clients_dict = config_file_parser() + + # Verify contents of the config file are valid. + config_file_validator(interface_clients_dict) + + # Execute wg command and parse output. We skip the first line ("[1:]") since that's the + # wireguard server's public key declaration. + for line in command_executor(wg_cmd).decode("utf-8").split("\n")[1:]: + if not line: + continue + # Parse each line and import the resultant dictionary into output_data. We update the + # interface key with new clients as they are found and instantiate new interface keys as + # they are found. + for intf, intf_data in output_parser(line, interface_clients_dict).items(): + if intf not in output_data["data"]: + output_data["data"][intf] = {} + for client, client_data in intf_data.items(): + output_data["data"][intf][client] = client_data + + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From c2b00adcddee371ecd9d4e9e259bebac98a07931 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sun, 13 Nov 2022 02:05:29 -0800 Subject: [PATCH 223/332] Adding optional configuration file support to postgres snmp script (#437) --- snmp/postgres | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/snmp/postgres b/snmp/postgres index c03b2ccb4..764484000 100644 --- a/snmp/postgres +++ b/snmp/postgres @@ -22,19 +22,53 @@ #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. -#set the user here to use -#be sure to set up the user in .pgpass for the user snmpd is running as +# Location of optional config file. +CONFIG_FILE="/etc/snmp/postgres.config" + +# Default DBuser is pgsql. Be sure to set up the user in .pgpass for the user snmpd +# is running as. You can either update the variable below, or add "DBuser=" +# to the /etc/snmp/postgres.config file without quotes and replacing . DBuser=pgsql # You may want to disable totalling for the postgres DB as that can make the total graphs artificially noisy. # 1 = don't total stats for the DB postgres # 0 = include postgres in the totals +# To set this to 0, you can either update the variable below, or add "ignorePG=0" to +# the /etc/snmp/postgres.config file (without quotes). ignorePG=1; +# Hostname to connect to. By default this is blank and check_postgres.ph will connect +# to the Unix socket. You can either update the variable below, or add "DBhost=" +# to the /etc/snmp/postgres.config file without quotes and replacing . +DBhost="" + +# Load configuration from config file if the file exists. +if [ -f "$CONFIG_FILE" ]; then + saved_IFS=$IFS + IFS="=" + + while read -r key value; do + if [ "$key" = "DBuser" ]; then + DBuser=$value + elif [ "$key" = "ignorePG" ]; then + ignorePG=$value + elif [ "$key" = "DBhost" ]; then + DBhost=$value + fi + done < $CONFIG_FILE + + IFS=$saved_IFS +fi + #make sure the paths are right for your system cpg='/usr/bin/env check_postgres.pl' -$cpg -u $DBuser --action dbstats | awk -F ' ' ' +cpg_command="$cpg -u $DBuser --action dbstats" +if [ "$DBhost" != "" ]; then + cpg_command="$cpg_command -H $DBhost" +fi + +$cpg_command | awk -F ' ' ' BEGIN{ backends=0; From c09263d04e522185d5b99fe3477dda4a4be4bc97 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sun, 13 Nov 2022 02:06:10 -0800 Subject: [PATCH 224/332] Fixing agent-local hddtemp script (#436) --- agent-local/hddtemp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 2cee3939c..75442c725 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -28,9 +28,9 @@ if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then if type parallel > /dev/null 2>&1; then # When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!) - output=$(parallel "${hddtemp}" -w -q ::: "${disks}" 2>/dev/null) + output=$(parallel "${hddtemp}" -w -q ::: ${disks} 2>/dev/null) else - output=$(${hddtemp} -w -q "${disks}" 2>/dev/null) + output=$(${hddtemp} -w -q ${disks} 2>/dev/null) fi content=$(echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176') if [ "${content}" != "" ]; then From 289354109993795254af51f2471634c9031e0e32 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Fri, 18 Nov 2022 21:16:38 +0100 Subject: [PATCH 225/332] Alarm Flag on UPS-Nut Application (#438) --- snmp/ups-nut.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index b5ba04fe4..e8dd3a824 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -24,7 +24,7 @@ do fi done -for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" +for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" "ups\.alarm:[A-Z ]" do UNKNOWN=$(echo "$TMP" | grep -Eo "ups\.status:") if [ -z "$UNKNOWN" ]; then @@ -38,3 +38,4 @@ do fi fi done + From dea0b7176e8de1cb5c3df1449e3f2924bb5f9fdc Mon Sep 17 00:00:00 2001 From: 00gh <36605979+00gh@users.noreply.github.com> Date: Sun, 20 Nov 2022 03:59:09 +0100 Subject: [PATCH 226/332] snmp/Openwrt/wl*: Added stderr redirects in wl* scripts for iw/iwlist. (#440) See Issue mentioned in librenms/librenms: #14428 OpenWRT example scripts give bad output if wireless interface is down. Redirect the iw/iwlist command stderr output to /dev/null. Co-authored-by: 00gh <00gh> --- snmp/Openwrt/wlClients.sh | 2 +- snmp/Openwrt/wlFrequency.sh | 2 +- snmp/Openwrt/wlNoiseFloor.sh | 2 +- snmp/Openwrt/wlRate.sh | 3 ++- snmp/Openwrt/wlSNR.sh | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index f454e592f..72e3694cb 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -25,7 +25,7 @@ fi count=0 for interface in $interfaces do - new=$(/usr/sbin/iw dev "$interface" station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) + new=$(/usr/sbin/iw dev "$interface" station dump 2>/dev/null | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) count=$(( $count + $new )) done diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 658459ab5..4552cc4b7 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -12,7 +12,7 @@ if [ $# -ne 1 ]; then fi # Extract frequency -frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") +frequency=$(/usr/sbin/iw dev "$1" info 2>/dev/null | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") # Return snmp result /bin/echo "$frequency" diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index a3880cf34..9cebb323d 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -13,7 +13,7 @@ fi # Extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) +noise=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result /bin/echo "$noise" diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index 6b9072435..d53068560 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -16,7 +16,8 @@ fi # Calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") +ratelist=$(/usr/sbin/iw dev "$1" station dump 2>/dev/null | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") +result=0 if [ "$3" = "sum" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') elif [ "$3" = "avg" ]; then diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index 2378c1aac..006cae071 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -14,7 +14,7 @@ if [ $# -ne 2 ]; then fi # Calculate result. Sum just for debug, and return integer (safest / easiest) -snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) +snrlist=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) if [ "$2" = "sum" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') elif [ "$2" = "avg" ]; then From f0d1b10e5794b22eccc66620dd32390aa58acb3a Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Fri, 25 Nov 2022 22:05:34 +0800 Subject: [PATCH 227/332] typo: Fix systemd.py Installation step (#441) --- snmp/systemd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/systemd.py b/snmp/systemd.py index 1e6b47d51..2dd248d44 100755 --- a/snmp/systemd.py +++ b/snmp/systemd.py @@ -10,7 +10,7 @@ # 1. Copy this script to /etc/snmp/ and make it executable: # chmod +x /etc/snmp/systemd.py # 2. Edit your snmpd.conf and include: -# extend systemdd /etc/snmp/systemd.py +# extend systemd /etc/snmp/systemd.py # 3. (Optional) Create a /etc/snmp/systemd.json file and specify: # a.) "systemctl_cmd" - String path to the systemctl binary ["/usr/bin/systemctl"] # b.) "include_inactive_units" - True/False string to include inactive units in From 3d9d1e18a259295d0a846a7eba3150a8d7a06875 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sat, 10 Dec 2022 05:14:41 -0800 Subject: [PATCH 228/332] Cleaning up certificate.py code and adding cert_location support for self-signed certificates (#447) --- snmp/certificate.py | 111 +++++++++++++++++++++++++------------------- 1 file changed, 63 insertions(+), 48 deletions(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index c141afcd1..a1fa87d87 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -13,8 +13,7 @@ # } -def get_certificate_data(domain, port=443): - +def get_certificate_data(domain, cert_location, port=443): context = ssl.create_default_context() conn = context.wrap_socket( socket.socket(socket.AF_INET), @@ -25,13 +24,22 @@ def get_certificate_data(domain, port=443): error_msg = None ssl_info = {} + + # Load certificate for self-signed certificates if provided + if cert_location: + try: + context.load_verify_locations(cert_location) + except (FileNotFoundError, ssl.SSLError, PermissionError) as err: + error_msg = err + return ssl_info, error_msg + try: conn.connect((domain, port)) ssl_info = conn.getpeercert() - except ConnectionRefusedError as e: - error_msg = e + except ConnectionRefusedError as err: + error_msg = err # Manage expired certificates - except ssl.SSLCertVerificationError as e: + except ssl.SSLCertVerificationError: # Arbitrary start date ssl_info["notBefore"] = "Jan 1 00:00:00 2020 GMT" # End date is now (we don't have the real one but the certificate is expired) @@ -41,53 +49,60 @@ def get_certificate_data(domain, port=443): return ssl_info, error_msg -output = {} -output["error"] = 0 -output["errorString"] = "" -output["version"] = 1 +def main(): + output = {} + output["error"] = 0 + output["errorString"] = "" + output["version"] = 1 -with open(CONFIGFILE, "r") as json_file: - try: - configfile = json.load(json_file) - except json.decoder.JSONDecodeError as e: - output["error"] = 1 - output["errorString"] = "Configfile Error: '%s'" % e - -if not output["error"]: - output_data_list = [] - for domain in configfile["domains"]: - output_data = {} - - if "port" not in domain.keys(): - domain["port"] = 443 - certificate_data, error_msg = get_certificate_data( - domain["fqdn"], domain["port"] - ) - - output_data["cert_name"] = domain["fqdn"] - - if not error_msg: - ssl_date_format = r"%b %d %H:%M:%S %Y %Z" - validity_end = datetime.datetime.strptime( - certificate_data["notAfter"], ssl_date_format - ) - validity_start = datetime.datetime.strptime( - certificate_data["notBefore"], ssl_date_format + with open(CONFIGFILE, "r") as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as err: + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % err + + if not output["error"]: + output_data_list = [] + for domain in configfile["domains"]: + output_data = {} + + if "port" not in domain.keys(): + domain["port"] = 443 + if "cert_location" not in domain.keys(): + domain["cert_location"] = None + certificate_data, error_msg = get_certificate_data( + domain["fqdn"], domain["cert_location"], domain["port"] ) - cert_age = datetime.datetime.now() - validity_start - cert_still_valid = validity_end - datetime.datetime.now() - output_data["age"] = cert_age.days - output_data["remaining_days"] = cert_still_valid.days + output_data["cert_name"] = domain["fqdn"] - else: - output_data["age"] = None - output_data["remaining_days"] = None - output["error"] = 1 - output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg) + if not error_msg: + ssl_date_format = r"%b %d %H:%M:%S %Y %Z" + validity_end = datetime.datetime.strptime( + certificate_data["notAfter"], ssl_date_format + ) + validity_start = datetime.datetime.strptime( + certificate_data["notBefore"], ssl_date_format + ) + cert_age = datetime.datetime.now() - validity_start + cert_still_valid = validity_end - datetime.datetime.now() + + output_data["age"] = cert_age.days + output_data["remaining_days"] = cert_still_valid.days + + else: + output_data["age"] = None + output_data["remaining_days"] = None + output["error"] = 1 + output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg) + + output_data_list.append(output_data) + + output["data"] = output_data_list - output_data_list.append(output_data) + print(json.dumps(output)) - output["data"] = output_data_list -print(json.dumps(output)) +if __name__ == "__main__": + main() From af450216aac6be654737bdbf681a11ada959cccd Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Sun, 11 Dec 2022 22:34:11 +0800 Subject: [PATCH 229/332] fix: remove unnecessary line breaks (#443) --- agent-local/nginx-python3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/nginx-python3.py b/agent-local/nginx-python3.py index fd710ba8c..6ebc11482 100755 --- a/agent-local/nginx-python3.py +++ b/agent-local/nginx-python3.py @@ -20,7 +20,7 @@ dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] -print("<<>>\n") +print("<<>>") for param in dataorder: if param == "Active": From 7969e00126b9ad61352f5a941d10ff29055bf72b Mon Sep 17 00:00:00 2001 From: Dan Kerse Date: Tue, 13 Dec 2022 09:32:19 +1300 Subject: [PATCH 230/332] Update fail2ban so it doesn't panic when the return value is zero (#446) --- snmp/fail2ban | 60 +++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 42f29ed63..b6429e61d 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -122,39 +122,37 @@ sub stats{ elsif ($? & 127) { $toReturn{errorString}= sprintf "fail2ban-client died with signal %d, %s coredump\n", ($? & 127), ($? & 128) ? 'with' : 'without'; - } - else { - $toReturn{error}=$? >> 8; - $toReturn{errorString}="fail2ban-client exited with ".$toReturn{error}; - } + } if ( $toReturn{error} == 0 ){ - - my @jailsOutputA=split(/\n/, $jailsOutput); - my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); - $jailsS=~s/.*\://; - $jailsS=~s/\s//g; - my @jails=split(/\,/, $jailsS); - - #process jails - my $int=0; - while(defined($jails[$int])){ - - #get the total for this jail - my $jailStatusOutput=`$f2bc status $jails[$int]`; - my @jailStatusOutputA=split(/\n/, $jailStatusOutput); - my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); - $jailTotal=~s/.*\://; - $jailTotal=~s/\s//g; - - #tally the total and add this jail to the list - $toReturn{data}{total} = $toReturn{data}{total} + $jailTotal; - $toReturn{data}{jails}{ $jails[$int] } = $jailTotal; - - $int++; - } - - } + + my @jailsOutputA=split(/\n/, $jailsOutput); + my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); + $jailsS=~s/.*\://; + $jailsS=~s/\s//g; + my @jails=split(/\,/, $jailsS); + + #process jails + my $int=0; + while(defined($jails[$int])){ + + #get the total for this jail + my $jailStatusOutput=`$f2bc status $jails[$int]`; + my @jailStatusOutputA=split(/\n/, $jailStatusOutput); + my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); + $jailTotal=~s/.*\://; + $jailTotal=~s/\s//g; + + #tally the total and add this jail to the list + $toReturn{data}{total} = $toReturn{data}{total} + $jailTotal; + $toReturn{data}{jails}{ $jails[$int] } = $jailTotal; + + $int++; + } + } else { + $toReturn{error}=$? >> 8; + $toReturn{errorString}="fail2ban-client exited with ".$toReturn{error}; + } my $j=JSON->new; $j->canonical(1); From 1ab38e77b90f7918e3c372739f2983be96325f88 Mon Sep 17 00:00:00 2001 From: Martin <10722552+efelon@users.noreply.github.com> Date: Wed, 4 Jan 2023 00:00:33 +0100 Subject: [PATCH 231/332] Latest pi-hole API needs auth parameter for summary (#451) fix https://github.com/librenms/librenms-agent/issues/366 --- snmp/pi-hole | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/snmp/pi-hole b/snmp/pi-hole index 2a7682f9f..f0d226e01 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -6,20 +6,20 @@ CONFIGFILE='/etc/snmp/pi-hole.conf' API_AUTH_KEY="" API_URL="localhost/admin/api.php" -URL_READ_ONLY="?summaryRaw" +URL_READ_ONLY="?summaryRaw&auth=" URL_QUERY_TYPE="?getQueryTypes&auth=" PICONFIGFILE='/etc/pihole/setupVars.conf' DHCPLEASEFILE='/etc/pihole/dhcp.leases' if [ -f $CONFIGFILE ]; then - # shellcheck disable=SC1090 - . $CONFIGFILE + # shellcheck disable=SC1090 + . $CONFIGFILE fi # read in pi-hole variables for DHCP range if [ -f $PICONFIGFILE ]; then - # shellcheck disable=SC1090 - . $PICONFIGFILE + # shellcheck disable=SC1090 + . $PICONFIGFILE fi #/ Description: BASH script to get Pi-hole stats @@ -89,24 +89,24 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s $API_URL"$URL_READ_ONLY" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') + GET_STATS=$(curl -s "${API_URL}${URL_READ_ONLY}${API_AUTH_KEY}" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') echo "$GET_STATS" | tr " " "\n" # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s $API_URL"$URL_QUERY_TYPE""$API_AUTH_KEY" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') + GET_QUERY_TYPE=$(curl -s "${API_URL}${URL_QUERY_TYPE}${API_AUTH_KEY}" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') echo "$GET_QUERY_TYPE" | tr " " "\n" # Find number of DHCP address in scope and current lease count # case-insensitive compare, just in case :) - if [ "${DHCP_ACTIVE,,}" = "true" ]; then + if [ -n "${DHCP_ACTIVE+x}" ] && [ "${DHCP_ACTIVE,,}" = "true" ]; then # Max IP addresses in scope # Convert IPs to decimal and subtract IFS="." read -r -a array <<< "$DHCP_START" - DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) + DHCPSTARTDECIMAL=$(( (array[0]*256**3) + (array[1]*256**2) + (array[2]*256) + array[3] )) IFS="." read -r -a array <<< "$DHCP_END" - DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) - expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL + DHCPENDDECIMAL=$(( (array[0]*256**3) + (array[1]*256**2) + (array[2]*256) + array[3] )) + echo $(( DHCPENDDECIMAL - DHCPSTARTDECIMAL )) # Current lease count - cat $DHCPLEASEFILE | wc -l + wc -l < ${DHCPLEASEFILE} else echo 0 echo 0 @@ -114,7 +114,7 @@ exportdata() { } if [ -z "$*" ]; then - exportdata + exportdata fi expr "$*" : ".*--help" > /dev/null && usage expr "$*" : ".*--debug" > /dev/null && debug From 8f7608aeffd8ba27b29996fafc673377f45ee2a5 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 3 Jan 2023 15:06:16 -0800 Subject: [PATCH 232/332] =?UTF-8?q?Updating=20wireguard=20script=20to=20di?= =?UTF-8?q?scover=20interfaces=20separately=20and=20execu=E2=80=A6=20(#452?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- snmp/wireguard.py | 82 +++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 38 deletions(-) diff --git a/snmp/wireguard.py b/snmp/wireguard.py index 66c90ec93..d5fb515a8 100755 --- a/snmp/wireguard.py +++ b/snmp/wireguard.py @@ -14,10 +14,14 @@ # extend wireguard /etc/snmp/wireguard.py # 3. Create a /etc/snmp/wireguard.json file and specify: # a.) (optional) "wg_cmd" - String path to the wg binary ["/usr/bin/wg"] -# b.) "public_key_to_arbitrary_name" - A dictionary to convert between the publickey -# assigned to the client (specified in the wireguard -# interface conf file) to an arbitrary, friendly -# name. The friendly names MUST be unique within +# b.) "public_key_to_arbitrary_name" - Two nested dictionaries where the key for the outer +# dictionary is the interface name, and the value for +# the outer dictionary is the inner dictionary. The +# inner dictionary is composed of key values +# corresponding to the clients' public keys +# (specified in the wireguard interface config file) +# and values corresponding to arbitrary friendly +# names. The friendly names MUST be unique within # each interface. Also note that the interface name # and friendly names are used in the RRD filename, # so using special characters is highly discouraged. @@ -44,8 +48,8 @@ from itertools import chain CONFIG_FILE = "/etc/snmp/wireguard.json" -WG_ARGS = ["show", "all", "dump"] WG_CMD = "/usr/bin/wg" +WG_ARGS_SHOW_INTFS = ["show", "interfaces"] def error_handler(error_name, err): @@ -75,7 +79,7 @@ def config_file_parser(): Inputs: None Outputs: - wg_cmd: The full wg command to execute. + wg_cmd: The final wg binary to execute. interface_clients_dict: Dictionary mapping of interface names to public_key->client names. """ # Load configuration file if it exists @@ -93,8 +97,6 @@ def config_file_parser(): ) as err: error_handler("Config File Error", err) - # Create and return full wg command. - wg_cmd.extend(WG_ARGS) return wg_cmd, interface_clients_dict @@ -132,19 +134,19 @@ def config_file_validator(interface_clients_dict): error_handler("Config File Error", err) -def command_executor(wg_cmd): +def command_executor(wg_cmd_full): """ command_executor(): Execute the wg command and return the output. Inputs: - wg_cmd: The full wg command to execute. + wg_cmd_full: The full wg command to execute. Outputs: poutput: The stdout of the executed command (empty byte-string if error). """ try: # Execute wg command poutput = subprocess.check_output( - wg_cmd, + wg_cmd_full, stdin=None, stderr=subprocess.PIPE, ) @@ -153,7 +155,7 @@ def command_executor(wg_cmd): return poutput -def output_parser(line, interface_clients_dict): +def output_parser(line, interface_clients_dict, interface): """ output_parser(): Parses a line from the wg command for the client's public key, traffic inbound and outbound, wireguard interface, and last handshake timestamp. @@ -161,18 +163,18 @@ def output_parser(line, interface_clients_dict): Inputs: line: The wireguard client status line from the wg command stdout. interface_clients_dict: Dictionary mapping of interface to public_key->client names. + interface: The wireguard interface we are parsing. Outputs: - wireguard_data: A dictionary of a peer's server interface, public key, bytes sent and - received, and minutes since last handshake + wireguard_data: A dictionary of a peer's public key, bytes sent and received, and minutes + since last handshake. """ line_parsed = line.strip().split() try: - interface = str(line_parsed[0]) - public_key = str(line_parsed[1]) - timestamp = int(line_parsed[5]) - bytes_rcvd = int(line_parsed[6]) - bytes_sent = int(line_parsed[7]) + public_key = str(line_parsed[0]) + timestamp = int(line_parsed[4]) + bytes_rcvd = int(line_parsed[5]) + bytes_sent = int(line_parsed[6]) except (IndexError, ValueError) as err: error_handler("Command Output Parsing Error", err) @@ -196,12 +198,10 @@ def output_parser(line, interface_clients_dict): ) wireguard_data = { - interface: { - friendly_name: { - "minutes_since_last_handshake": minutes_since_last_handshake, - "bytes_rcvd": bytes_rcvd, - "bytes_sent": bytes_sent, - } + friendly_name: { + "minutes_since_last_handshake": minutes_since_last_handshake, + "bytes_rcvd": bytes_rcvd, + "bytes_sent": bytes_sent, } } @@ -226,19 +226,25 @@ def main(): # Verify contents of the config file are valid. config_file_validator(interface_clients_dict) - # Execute wg command and parse output. We skip the first line ("[1:]") since that's the - # wireguard server's public key declaration. - for line in command_executor(wg_cmd).decode("utf-8").split("\n")[1:]: - if not line: - continue - # Parse each line and import the resultant dictionary into output_data. We update the - # interface key with new clients as they are found and instantiate new interface keys as - # they are found. - for intf, intf_data in output_parser(line, interface_clients_dict).items(): - if intf not in output_data["data"]: - output_data["data"][intf] = {} - for client, client_data in intf_data.items(): - output_data["data"][intf][client] = client_data + # Get list of interfaces + wg_cmd_show_intfs = wg_cmd + WG_ARGS_SHOW_INTFS + wg_intfs = command_executor(wg_cmd_show_intfs).decode("utf-8").strip().split(" ") + + # Execute wg command on each discovered interface and parse output. We skip the first line + # ("[1:]") since that's the wireguard server's public key declaration. + for interface in wg_intfs: + wg_cmd_dump = wg_cmd + ["show"] + [interface] + ["dump"] + output_data["data"][interface] = {} + for line in command_executor(wg_cmd_dump).decode("utf-8").split("\n")[1:]: + if not line: + continue + # Parse each line and import the resultant dictionary into output_data. We update the + # interface key with new clients as they are found and instantiate new interface keys as + # they are found. + for friendly_name, client_data in output_parser( + line, interface_clients_dict, interface + ).items(): + output_data["data"][interface][friendly_name] = client_data print(json.dumps(output_data)) From 519d61e681369a789ea8c96fd2aeadb35e107088 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 20 Jan 2023 16:25:17 -0600 Subject: [PATCH 233/332] CAPE/Cuckoo extend update (#422) * now properly counts pending * lots more work on it and it * more work * malscore, severity, wieght, and confidence now work with packages * misc minor cleanups * fix for the processing log which includes the task id as well --- snmp/cape | 371 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 273 insertions(+), 98 deletions(-) diff --git a/snmp/cape b/snmp/cape index e0c2c795a..c85418014 100755 --- a/snmp/cape +++ b/snmp/cape @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2022, Zane C. Bowers-Hadley +#Copyright (c) 2023, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -34,39 +34,46 @@ Supported command line options are as below. -c Config INI file. Default: /usr/local/etc/cape_extend.ini +Depends can be installed via... + + apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl + The defeault setttings are... # DBI connection DSN dsn=dbi:Pg:dbname=cape - + # DB user user=cape - + # DB PW pass= - + # CAPEv2 cuckoo log file clog=/opt/CAPEv2/log/cuckoo.log - + # CAPEv2 process log file plog=/opt/CAPEv2/log/process.log - + + # storage location + storage=/opt/CAPEv2/storage + # 0/1 for if it is okay for the process log to not exist # this enables it to work with cuckoo as well as CAPEv2 mplogok=1 - + # list of ignores ignores=/usr/local/etc/cape_extend.ignores - + # send errors along for inclusion in the event log sendErrors=1 - + # send criticals along for inclusion in the event log sendCriticals=1 - + # send warnings along for inclusion in the event log sendWarnings= 1 - + # don't use analysis_started_on. analysis_finished_on. processing_started_on, # processing_finished_on, signatures_started_on, signatures_finished_on, # reporting_started_on, or reporting_finished_on with the SQL statement @@ -85,8 +92,17 @@ generated when VM traffic is dropped, you would use the two lines such as below. WARNING PCAP file does not exist at path WARNING Unable to Run Suricata: Pcap file +In 'conf/reporting.conf' for cape, 'litereport' will need enabled. 'keys_to_copy' +should include 'signatures' and 'detections'. + =cut +# # location of the IP cache to use +# ip_cache=/var/cache/cape_extend_ip + +# # subnets not to count for IP accounting +# ip_ignore=/usr/local/etc/cape_ip_ignore + use strict; use warnings; use Getopt::Long; @@ -95,7 +111,7 @@ use JSON; use Config::Tiny; use DBI; use Time::Piece; -use File::Slurp; +use File::Slurp qw(read_file); use Statistics::Lite qw(:all); sub version { @@ -160,7 +176,6 @@ my $return_json = { failed_analysis => 0, failed_processing => 0, failed_reporting => 0, - packages => {}, dropped_files => 0, running_processes => 0, api_calls => 0, @@ -174,16 +189,22 @@ my $return_json = { timedout => 0, pkg_stats => {}, total_tasks => 0, + wrong_pkg => 0, + detections_stats => {}, }, error => 0, errorString => '', version => 1, }; +# holds a list of reported tasks +my $reported = {}; + my @stats_for = ( 'dropped_files', 'running_processes', 'api_calls', 'domains', 'signatures_total', 'signatures_alert', 'files_written', 'registry_keys_modified', - 'crash_issues', 'anti_issues', + 'crash_issues', 'anti_issues', 'malscore', 'severity', + 'confidence', 'weight' ); my $ag_stats = { @@ -197,6 +218,10 @@ my $ag_stats = { registry_keys_modified => [], crash_issues => [], anti_issues => [], + malscore => [], + severity => [], + confidence => [], + weight => [], }; my $pkg_stats = {}; @@ -211,8 +236,11 @@ my $defaults = { pass => '', clog => '/opt/CAPEv2/log/cuckoo.log', plog => '/opt/CAPEv2/log/process.log', + storage => '/opt/CAPEv2/storage', mplogok => 1, ignores => '/usr/local/etc/cape_extend.ignores', + ip_cache => '/var/cache/cape_extend_ip', + ip_ignore => '/usr/local/etc/cape_ip_ignore', sendErrors => 1, sendCriticals => 1, sendWarnings => 1, @@ -247,6 +275,26 @@ if ( -f $config->{ignores} ) { } } +# # process the IP ignore file +# my @ip_ignores; +# if ( -f $config->{ip_ignore} ) { +# my $ip_ignore_raw = read_file( $config->{ip_ignores} ); +# @ip_ignores = grep( !/^[\ \t]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $ip_ignore_raw ) ) ); +# } + +# # process the IP ignore file +# my %ip_cache; +# if ( -f $config->{ip_ignore} ) { +# my $ip_cache_raw = read_file( $config->{ignores} ); +# # IP,count,time +# # Time is unix time. +# my @ip_cache_split = grep( !/^[0-9a-fA-F\:\.]+\,[0-9]+\,[0-9]+$/, split( /\n/, $ip_cache_raw ) ); +# foreach my $line (@ip_cache_split) { +# my ( $ip, $ip_count, $ip_time ) = split( /\,/ . $line ); +# $ip_cache{$ip} = { count => $ip_count, time => $ip_time }; +# } +# } + # put together the list of logs to read my @logs; if ( !-f $config->{clog} ) { @@ -262,6 +310,10 @@ else { push( @logs, $config->{plog} ); } +# +# process all the log lines, counting them +# + my $process_loop = 0; my $process_logs = 1; while ( $process_logs && defined( $logs[$process_loop] ) ) { @@ -274,12 +326,29 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { my $current_entry = ''; while ( defined($bw) && defined( my $log_line = $bw->readline ) && $continue ) { $current_entry = $log_line . $current_entry; - if ( $current_entry - =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + if ( + ( + $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) + || ( $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) ) { + my ( $date, $time, $log_task_id, $lib, $level, $entry ); + # parse it and blank it for when we get to the next one. - my ( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 ); + if ( $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) + { + ( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 ); + } + else { + ( $date, $time, $log_task_id, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 6 ); + $entry = $log_task_id . ': ' . $entry; + } $current_entry = ''; # chomp off the seconds place after the , @@ -314,6 +383,9 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { push( @{ $return_json->{data}->{criticals} }, $entry ); } } + if ( $level eq 'warning' && $entry =~ /submitted\ the\ job\ with\ wrong\ package/ ) { + $return_json->{wrong_pkg}++; + } } } } @@ -322,10 +394,13 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { $process_loop++; } +# +# put together query for getting the current tasks +# my $query; if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { - $query - = "select status,package from tasks where ( added_on > FROM_UNIXTIME('" + $query = "select id,status,package from tasks where ( status != 'pending' ) and '. +'( added_on > FROM_UNIXTIME('" . $target_time . "')) or " . "( started_on > FROM_UNIXTIME('" @@ -336,9 +411,11 @@ if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { } else { $query - = "select status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + = "select id,status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where" + . " (status != 'pending') and " + . " ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " . "( started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " - . "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' )"; + . "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' ) "; if ( !$config->{cuckoosql} ) { $query = $query @@ -358,6 +435,13 @@ else { eval { my $dbh = DBI->connect( $config->{dsn}, $config->{user}, $config->{pass} ) || die($DBI::errstr); + + eval { + my $sth_pending = $dbh->prepare("select * from tasks where status = 'pending'"); + $sth_pending->execute; + $return_json->{data}{pending} = $sth_pending->rows; + }; + my $sth = $dbh->prepare($query); $sth->execute; my $task_status; @@ -373,6 +457,7 @@ eval { my $crash_issues; my $anti_issues; my $timedout; + my $task_id; # # MySQL is basically for old Cuckoo support. # CAPEv2 does not really play nice with it because of column issues @@ -388,10 +473,10 @@ eval { } else { $sth->bind_columns( - undef, \$task_status, \$task_package, \$dropped_files, - \$running_processes, \$api_calls, \$domains, \$signatures_total, - \$signatures_alert, \$files_written, \$registry_keys_modified, \$crash_issues, - \$anti_issues, \$timedout + undef, \$task_id, \$task_status, \$task_package, + \$dropped_files, \$running_processes, \$api_calls, \$domains, + \$signatures_total, \$signatures_alert, \$files_written, \$registry_keys_modified, + \$crash_issues, \$anti_issues, \$timedout ); while ( $sth->fetch ) { if ( defined( $return_json->{data}->{$task_status} ) ) { @@ -399,122 +484,174 @@ eval { $return_json->{data}->{total_tasks}++; } - # skip blank entries - if ( $task_package ne '' ) { - if ( defined( $return_json->{data}->{packages}->{$task_package} ) ) { - $return_json->{data}->{packages}->{$task_package}++; - } - else { - $return_json->{data}->{packages}->{$task_package} = 1; - } + if ( $task_status eq 'reported' ) { + $reported->{$task_id} = { + package => $task_package, + dropped_files => $dropped_files, + running_processes => $running_processes, + domains => $domains, + api_calls => $api_calls, + signatures_total => $signatures_total, + signatures_alert => $signatures_alert, + files_written => $files_written, + registry_keys_modified => $registry_keys_modified, + crash_issue => $crash_issues, + anti_issues => $anti_issues, + timedout => $timedout, + }; } - if ( defined($running_processes) ) { + if ( !defined($task_package) || $task_package eq '' ) { + $task_package = 'generic'; + } + + if ( !defined($running_processes) ) { + $running_processes = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{running_processes} += $running_processes; push( @{ $ag_stats->{running_processes} }, $running_processes ); } - else { + if ( !defined($api_calls) ) { + $api_calls = 0; } - - if ( defined($api_calls) ) { + if ( $task_status eq 'reported' ) { $return_json->{data}->{api_calls} += $api_calls; push( @{ $ag_stats->{api_calls} }, $api_calls ); } - if ( defined($domains) ) { + if ( !defined($domains) ) { + $domains = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{domains} += $domains; push( @{ $ag_stats->{domains} }, $domains ); } - if ( defined($signatures_alert) ) { + if ( !defined($signatures_alert) ) { + $signatures_alert = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{signatures_alert} += $signatures_alert; push( @{ $ag_stats->{signatures_alert} }, $signatures_alert ); } - if ( defined($signatures_total) ) { + if ( !defined($signatures_total) ) { + $signatures_total = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{signatures_total} += $signatures_total; push( @{ $ag_stats->{signatures_total} }, $signatures_total ); } - if ( defined($files_written) ) { + if ( !defined($files_written) ) { + $files_written = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{files_written} += $files_written; push( @{ $ag_stats->{files_written} }, $files_written ); } - if ( defined($registry_keys_modified) ) { + if ( !defined($registry_keys_modified) ) { + $registry_keys_modified = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{registry_keys_modified} += $registry_keys_modified; push( @{ $ag_stats->{registry_keys_modified} }, $registry_keys_modified ); } - if ( defined($crash_issues) ) { + if ( !defined($crash_issues) ) { + $crash_issues = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{crash_issues} += $crash_issues; push( @{ $ag_stats->{crash_issues} }, $crash_issues ); } - if ( defined($anti_issues) ) { + if ( !defined($anti_issues) ) { + $anti_issues = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{anti_issues} += $anti_issues; push( @{ $ag_stats->{anti_issues} }, $anti_issues ); } - if ( defined($dropped_files) ) { + if ( !defined($dropped_files) ) { + $dropped_files = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{dropped_files} += $dropped_files; push( @{ $ag_stats->{dropped_files} }, $dropped_files ); + } - # put per package stats together - if ( $task_package ne '' ) { - if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) { - $return_json->{data}->{pkg_stats}->{$task_package} = { - dropped_files => $dropped_files, - running_processes => $running_processes, - api_calls => $api_calls, - domains => $domains, - signatures_total => $signatures_total, - signatures_alert => $signatures_alert, - files_written => $files_written, - registry_keys_modified => $registry_keys_modified, - crash_issues => $crash_issues, - anti_issues => $anti_issues - }; - $pkg_stats->{$task_package} = { - dropped_files => [$dropped_files], - running_processes => [$running_processes], - api_calls => [$api_calls], - domains => [$domains], - signatures_total => [$signatures_total], - signatures_alert => [$signatures_alert], - files_written => [$files_written], - registry_keys_modified => [$registry_keys_modified], - crash_issues => [$crash_issues], - anti_issues => [$anti_issues] - }; - } - else { - $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; - $return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes; - $return_json->{data}->{pkg_stats}->{$task_package}->{api_calls} += $api_calls; - $return_json->{data}->{pkg_stats}->{$task_package}->{domains} += $domains; - $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total} += $signatures_total; - $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert} += $signatures_alert; - $return_json->{data}->{pkg_stats}->{$task_package}->{files_written} += $files_written; - $return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified} - += $registry_keys_modified; - $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; - $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; - - push( @{ $pkg_stats->{$task_package}->{dropped_files} }, $dropped_files ); - push( @{ $pkg_stats->{$task_package}->{running_processes} }, $running_processes ); - push( @{ $pkg_stats->{$task_package}->{api_calls} }, $api_calls ); - push( @{ $pkg_stats->{$task_package}->{domains} }, $domains ); - push( @{ $pkg_stats->{$task_package}->{signatures_total} }, $signatures_total ); - push( @{ $pkg_stats->{$task_package}->{signatures_alert} }, $signatures_alert ); - push( @{ $pkg_stats->{$task_package}->{files_written} }, $files_written ); - push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified ); - push( @{ $pkg_stats->{$task_package}->{crash_issues} }, $crash_issues ); - push( @{ $pkg_stats->{$task_package}->{anti_issues} }, $anti_issues ); - } - } + # put per package stats together + if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) { + $return_json->{data}->{pkg_stats}->{$task_package} = { + dropped_files => $dropped_files, + running_processes => $running_processes, + api_calls => $api_calls, + domains => $domains, + signatures_total => $signatures_total, + signatures_alert => $signatures_alert, + files_written => $files_written, + registry_keys_modified => $registry_keys_modified, + crash_issues => $crash_issues, + anti_issues => $anti_issues, + banned => 0, + pending => 0, + running => 0, + completed => 0, + distributed => 0, + reported => 0, + recovered => 0, + failed_analysis => 0, + failed_processing => 0, + failed_reporting => 0, + tasks => 1, + }; + $pkg_stats->{$task_package} = { + dropped_files => [$dropped_files], + running_processes => [$running_processes], + api_calls => [$api_calls], + domains => [$domains], + signatures_total => [$signatures_total], + signatures_alert => [$signatures_alert], + files_written => [$files_written], + registry_keys_modified => [$registry_keys_modified], + crash_issues => [$crash_issues], + anti_issues => [$anti_issues], + malscore => [], + confidence => [], + severity => [], + }; + } + else { + $return_json->{data}->{pkg_stats}->{$task_package}->{tasks}++; + $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; + $return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes; + $return_json->{data}->{pkg_stats}->{$task_package}->{api_calls} += $api_calls; + $return_json->{data}->{pkg_stats}->{$task_package}->{domains} += $domains; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total} += $signatures_total; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert} += $signatures_alert; + $return_json->{data}->{pkg_stats}->{$task_package}->{files_written} += $files_written; + $return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified} + += $registry_keys_modified; + $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; + $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; + + push( @{ $pkg_stats->{$task_package}->{dropped_files} }, $dropped_files ); + push( @{ $pkg_stats->{$task_package}->{running_processes} }, $running_processes ); + push( @{ $pkg_stats->{$task_package}->{api_calls} }, $api_calls ); + push( @{ $pkg_stats->{$task_package}->{domains} }, $domains ); + push( @{ $pkg_stats->{$task_package}->{signatures_total} }, $signatures_total ); + push( @{ $pkg_stats->{$task_package}->{signatures_alert} }, $signatures_alert ); + push( @{ $pkg_stats->{$task_package}->{files_written} }, $files_written ); + push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified ); + push( @{ $pkg_stats->{$task_package}->{crash_issues} }, $crash_issues ); + push( @{ $pkg_stats->{$task_package}->{anti_issues} }, $anti_issues ); } + $return_json->{data}->{pkg_stats}->{$task_package}->{$task_status}++; # timedout value is not a perl boolean if ( $timedout =~ /^[Ff]/ ) { @@ -528,7 +665,43 @@ if ($@) { $return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@; } +# +# put together the stats for the reported items +# +foreach my $task_id ( keys( %{$reported} ) ) { + eval { + my $report = decode_json( read_file( $config->{storage} . '/analyses/' . $task_id . '/reports/lite.json' ) ); + my $package = $report->{info}{package}; + if ( defined( $report->{malscore} ) ) { + push( @{ $ag_stats->{malscore} }, $report->{malscore} ); + push( @{ $pkg_stats->{$package}{malscore} }, $report->{malscore} ); + } + + my $sig_int = 0; + while ( defined( $report->{signatures}[$sig_int] ) ) { + if ( defined( $report->{signatures}[$sig_int]{confidence} ) ) { + push( @{ $ag_stats->{confidence} }, $report->{signatures}[$sig_int]{confidence} ); + push( @{ $pkg_stats->{$package}{confidence} }, $report->{signatures}[$sig_int]{confidence} ); + } + + if ( defined( $report->{signatures}[$sig_int]{severity} ) ) { + push( @{ $ag_stats->{severity} }, $report->{signatures}[$sig_int]{severity} ); + push( @{ $pkg_stats->{$package}{severity} }, $report->{signatures}[$sig_int]{severity} ); + } + + if ( defined( $report->{signatures}[$sig_int]{weight} ) ) { + push( @{ $ag_stats->{weight} }, $report->{signatures}[$sig_int]{weight} ); + push( @{ $pkg_stats->{$package}{weight} }, $report->{signatures}[$sig_int]{weight} ); + } + + $sig_int++; + } + }; +} + +# # compute the aggregate stats +# foreach my $current_entry (@stats_for) { if ( $#{ $ag_stats->{$current_entry} } > 0 ) { $return_json->{data}{ 'min.' . $current_entry } = min( @{ $ag_stats->{$current_entry} } ); @@ -557,7 +730,9 @@ foreach my $current_entry (@stats_for) { } +# # compute the stats for each package +# foreach my $current_pkg ( keys( %{$pkg_stats} ) ) { foreach my $current_entry (@stats_for) { if ( $#{ $pkg_stats->{$current_pkg}{$current_entry} } > 0 ) { From 08659753173017b045a45a372250ef467b7bcbf6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 24 Jan 2023 13:00:25 -0600 Subject: [PATCH 234/332] fix depends for cape extend (#454) --- snmp/cape | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) mode change 100755 => 100644 snmp/cape diff --git a/snmp/cape b/snmp/cape old mode 100755 new mode 100644 index c85418014..8c58b3f02 --- a/snmp/cape +++ b/snmp/cape @@ -36,7 +36,9 @@ Supported command line options are as below. Depends can be installed via... - apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl + + apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl libdbi-perl libdbd-pg-perl + The defeault setttings are... From a47c1973c7754de28f88616007c3afdb0e6975e1 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sat, 28 Jan 2023 05:51:18 -0800 Subject: [PATCH 235/332] Initial commit for linux_iw script (#442) --- snmp/linux_iw.py | 409 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100755 snmp/linux_iw.py diff --git a/snmp/linux_iw.py b/snmp/linux_iw.py new file mode 100755 index 000000000..1b3cb4196 --- /dev/null +++ b/snmp/linux_iw.py @@ -0,0 +1,409 @@ +#!/usr/bin/env python + +""" +Name: linux_iw Script +Author: bnerickson w/SourceDoctor's certificate.py script forming the base + of the vast majority of this one. +Version: 1.0 +Description: This is a simple script to parse iw command output for ingestion into LibreNMS via the + linux_iw application. This script can be used on wireless clients as well as wireless + access points. +Installation: + 1. Copy this script to /etc/snmp/ and make it executable: + chmod +x /etc/snmp/linux_iw.py + 2. Edit your snmpd.conf and include: + extend linux_iw /etc/snmp/linux_iw.py + 3. (optional) Create a /etc/snmp/linux_iw.json file and specify: + a.) (optional) "linux_iw_cap_lifetime" - Specify the number of days a dead client (for + APs) or AP (for clients) should remain on the graphs in LibreNMS before being removed + (data is not removed, however). There are two special values that can also be used: + specifying '0' will never expire any client and specifying '-1' (or any negative + value) will result in NO client wireless metrics being graphed in LibreNMS [global + default: 0] + b.) (optional) "iw_cmd" - String path to the wg binary [default: "/usr/sbin/iw"] + c.) (optional) "mac_addr_to_friendly_name" - A dictionary to convert between the wireless + mac address and a friendly, arbitrary name for wireless clients. This name will be + used on the graph titles in LibreNMS, so it's just for readability and easier human = + parsing of data. + ``` + { + "linux_iw_cap_lifetime": 50, + "iw_cmd": "/bin/iw", + "mac_addr_to_friendly_name": { + "00:53:00:00:00:01": "client_1.domain.tlv", + "00:53:ff:ff:ff:ff": "my_tablet" + } + } + ``` + 4. Restart snmpd and activate the app for desired host. +""" + +import json +import re +import subprocess +import sys + +VALID_MAC_ADDR = ( + r"([0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F])" +) +CONFIG_FILE = "/etc/snmp/linux_iw.json" +INITIAL_REGEX_MAPPER = { + "interfaces": { + "regex": r"(?m)\s+Interface (.+)$", + }, + "stations": {"regex": r"(?m)^Station " + VALID_MAC_ADDR + r" \(on "}, +} +SUB_REGEX_MAPPER = { + "interface_info": { + "center1": { + "regex": ( + r"^\s+channel \d+ \(\d+ MHz\), width: \d+ MHz,.*center1: " + + r"(\d+) MHz" + ), + "variable_type": "type_int", + }, + "center2": { + "regex": ( + r"^\s+channel \d+ \(\d+ MHz\), width: \d+ MHz,.*center2: " + + r"(\d+) MHz" + ), + "variable_type": "type_int", + }, + "channel": { + "regex": r"^\s+channel \d+ \((\d+) MHz\)", + "variable_type": "type_int", + }, + "ssid": { + "regex": r"^\s+ssid (.+)$", + "variable_type": "type_string", + }, + "txpower": { + "regex": r"^\s+txpower (\d+\.\d+) dBm$", + "variable_type": "type_float", + }, + "type": { + "regex": r"^\s+type (.+)$", + "variable_type": "type_string", + }, + "width": { + "regex": r"^\s+channel \d+ \(\d+ MHz\), width: (\d+) MHz", + "variable_type": "type_int", + }, + }, + "station_get": { + "beacon_interval": { + "regex": r"^\s+beacon interval:\s*(\d+)$", + "variable_type": "type_int", + }, + "connected_time": { + "regex": r"^\s+connected time:\s*(\d+) seconds$", + "variable_type": "type_int", + }, + "dtim_interval": { + "regex": r"^\s+DTIM period:\s*(\d+)$", + "variable_type": "type_int", + }, + "inactive_time": { + "regex": r"^\s+inactive time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "rx_bitrate": { + "regex": r"^\s+rx bitrate:\s*(\d+\.\d+) MBit\/s.*", + "variable_type": "type_float", + }, + "rx_bytes": { + "regex": r"^\s+rx bytes:\s*(\d+)$", + "variable_type": "type_int", + }, + "rx_drop_misc": { + "regex": r"^\s+rx drop misc:\s*(\d+)$", + "variable_type": "type_int", + }, + "rx_duration": { + "regex": r"^\s+rx duration:\s*(\d+) us$", + "variable_type": "type_int", + }, + "rx_packets": { + "regex": r"^\s+rx packets:\s*(\d+)$", + "variable_type": "type_int", + }, + "signal": { + "regex": r"^\s+signal:\s*(-?\d+) \[-?\d+, -?\d+\] dBm$", + "variable_type": "type_int", + }, + "tx_bitrate": { + "regex": r"^\s+tx bitrate:\s*(\d+\.\d+) MBit\/s.*", + "variable_type": "type_float", + }, + "tx_bytes": { + "regex": r"^\s+tx bytes:\s*(\d+)$", + "variable_type": "type_int", + }, + "tx_failed": { + "regex": r"^\s+tx failed:\s*(\d+)$", + "variable_type": "type_int", + }, + "tx_packets": { + "regex": r"^\s+tx packets:\s*(\d+)$", + "variable_type": "type_int", + }, + "tx_retries": { + "regex": r"^\s+tx retries:\s*(\d+)$", + "variable_type": "type_int", + }, + }, + "survey_dump": { + "noise": { + "regex": r"^\s+noise:\s*(-?\d+) dBm$", + "variable_type": "type_int", + }, + "channel_active_time": { + "regex": r"^\s+channel active time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "channel_busy_time": { + "regex": r"^\s+channel busy time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "channel_receive_time": { + "regex": r"^\s+channel receive time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "channel_transmit_time": { + "regex": r"^\s+channel transmit time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + }, +} +IW_CMD = "/usr/sbin/iw" + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": f"{error_name}: '{err}'", + "error": 1, + "version": 1, + "data": {}, + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + iw_cmd: The full iw binary as a string in a list to execute. + mac_addr_to_friendly_name: Dictionary mapping of mac addresses to friendly, arbitrary names. + """ + linux_iw_cap_lifetime = None + iw_cmd = [IW_CMD] + mac_addr_to_friendly_name = {} + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + if "linux_iw_cap_lifetime" in config_file: + linux_iw_cap_lifetime = config_file["linux_iw_cap_lifetime"] + if "iw_cmd" in config_file: + iw_cmd = [config_file["iw_cmd"]] + if "mac_addr_to_friendly_name" in config_file: + # Convert all mac addresses to lower case. + mac_addr_to_friendly_name = dict( + (k.lower(), v) + for k, v in config_file["mac_addr_to_friendly_name"].items() + ) + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return full iw command. + return linux_iw_cap_lifetime, iw_cmd, mac_addr_to_friendly_name + + +def command_executor(iw_cmd, iw_args, command_output_regex): + """ + command_executor(): Execute the iw command and return the output. + + Inputs: + iw_cmd: The full iw binary as a string in a list. + iw_args: Args to pass to the iw command. + command_output_refex: Regex to filter output after command execution. + Outputs: + poutput: The utf-8-encoded stdout of the executed command. + """ + try: + # Execute iw command + poutput = subprocess.check_output( + iw_cmd + iw_args, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + + # Filter stdout with regex if it was passed. + if command_output_regex: + regex_search = re.search(command_output_regex, poutput.decode("utf-8")) + poutput = regex_search.group().encode("utf-8") if regex_search else None + + return poutput + + +def output_parser(iw_output, iw_regex_dict): + """ + output_parser(): Parses the iw command output and returns a dictionary + of PSU metrics. + + Inputs: + iw_output: The iw command stdout + iw_regex_dict: A dictionary of regex and variable type values. + Outputs: + iw_data: A dictionary of iw metics. + """ + iw_data = {} + + if not iw_output: + return iw_data + + for line in iw_output.decode("utf-8").split("\n"): + for metric_type, regex_dict in iw_regex_dict.items(): + regex_search = re.search(regex_dict["regex"], line) + + if not regex_search: + continue + + try: + metric_value = regex_search.groups()[0] + + if regex_dict["variable_type"] == "type_int": + iw_data[metric_type] = int(metric_value) + if regex_dict["variable_type"] == "type_float": + iw_data[metric_type] = float(metric_value) + if regex_dict["variable_type"] == "type_string": + iw_data[metric_type] = str(metric_value) + except (IndexError, ValueError) as err: + error_handler("Command Output Parsing Error", err) + + return iw_data + + +def main(): + """ + main(): main function performs iw command execution and output parsing. + + Inputs: + None + Outputs: + None + """ + # Parse configuration file. + linux_iw_cap_lifetime, iw_cmd, mac_addr_to_friendly_name = config_file_parser() + + output_data = { + "errorString": "", + "error": 0, + "version": 1, + "data": { + "linux_iw_cap_lifetime": int(linux_iw_cap_lifetime) + if linux_iw_cap_lifetime + else None, + "friendly_names": mac_addr_to_friendly_name, + "interfaces": {}, + }, + } + + # Get list of interfaces + interfaces = re.findall( + INITIAL_REGEX_MAPPER["interfaces"]["regex"], + command_executor(iw_cmd, ["dev"], None).decode("utf-8"), + ) + + # Get operational mode of each interface. + + # Get interface commands output + for interface in interfaces: + output_data["data"]["interfaces"][interface] = {} + + # Get interface info + output_data["data"]["interfaces"][interface].update( + output_parser( + command_executor(iw_cmd, ["dev", interface, "info"], None), + SUB_REGEX_MAPPER["interface_info"], + ) + ) + + survey_dump_command_output_regex = ( + r"(?m)Survey data from " + + interface + + r"\s+frequency:\s*\d+ MHz \[in use\]\n(\s+.*\n)+" + ) + # Get survey info + output_data["data"]["interfaces"][interface].update( + output_parser( + command_executor( + iw_cmd, + [interface, "survey", "dump"], + survey_dump_command_output_regex, + ), + SUB_REGEX_MAPPER["survey_dump"], + ) + ) + + # Get list of stations connected to interface + stations = re.findall( + INITIAL_REGEX_MAPPER["stations"]["regex"] + interface + r"\)$", + command_executor( + iw_cmd, ["dev", interface, "station", "dump"], None + ).decode("utf-8"), + ) + + # Get station info + output_data["data"]["interfaces"][interface]["caps"] = {} + for station in stations: + output_data["data"]["interfaces"][interface]["caps"][station] = {} + output_data["data"]["interfaces"][interface]["caps"][station].update( + output_parser( + command_executor( + iw_cmd, ["dev", interface, "station", "get", station], None + ), + SUB_REGEX_MAPPER["station_get"], + ) + ) + + # Calculate SNR + if ( + "noise" not in output_data["data"]["interfaces"][interface] + or "signal" + not in output_data["data"]["interfaces"][interface]["caps"][station] + ): + continue + output_data["data"]["interfaces"][interface]["caps"][station]["snr"] = ( + output_data["data"]["interfaces"][interface]["caps"][station]["signal"] + - output_data["data"]["interfaces"][interface]["noise"] + ) + + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From fd78c041d5e0456c671eba13481c1d514209df52 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 16 Feb 2023 20:14:30 -0600 Subject: [PATCH 236/332] add a extend for privoxy (#458) --- snmp/privoxy | 449 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 449 insertions(+) create mode 100755 snmp/privoxy diff --git a/snmp/privoxy b/snmp/privoxy new file mode 100755 index 000000000..b52405910 --- /dev/null +++ b/snmp/privoxy @@ -0,0 +1,449 @@ +#!/usr/bin/env perl + +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart snmpd. + + extend privoxy /etc/snmp/extends/privoxy + +Supported command line options are as below. + + -f Logfile. + Default: /var/log/privoxy/logfile + -c gzip+base64 compression + -p Pretty print. + +The last is only really relevant to the usage with SNMP. + +=cut + +use strict; +use warnings; +use Getopt::Std; +use File::ReadBackwards; +use JSON; +use Time::Piece; +use IPC::Run3; + +# get the current time +my $t = localtime; +my $till = $t->epoch; +$till = $till - 300; + +# needed as strptime will always assume UTC, resulting in localtime and it being off +if ( $t->tzoffset =~ /^-/ ) { + my $offset = $t->tzoffset; + $offset =~ s/^\-//; + $till = $till - $offset; +} +else { + my $offset = $t->tzoffset; + $offset =~ s/^\+//; + $till = $till + $offset; +} + +my $logfile = '/var/log/privoxy/logfile'; +my $compress; + +#gets the options +my %opts; +getopts( 'f:cp', \%opts ); +if ( defined( $opts{f} ) ) { + $logfile = $opts{f}; +} +if ( defined( $opts{c} ) ) { + $compress = 1; +} + +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{p} ) { + $json->pretty(); +} + +# initiate what will be returned +my $to_return = { + error => 0, + errorString => '', + version => 1, + data => { + client_requests => 0, + client_cons => 0, + out_requests => 0, + crunches => 0, + blocks => 0, + block_percent => 0, + fast_redirs => 0, + con_timeouts => 0, + con_failures => 0, + ska_offers => 0, + nog_conns => 0, + reused_server_cons => 0, + empty_resps => 0, + empty_resps_new => 0, + empty_resps_reuse => 0, + imp_accounted => 0, + req_get => 0, + req_head => 0, + req_post => 0, + req_put => 0, + req_delete => 0, + req_connect => 0, + req_options => 0, + req_trace => 0, + req_patch => 0, + ver_1_0 => 0, + ver_1_1 => 0, + ver_2 => 0, + ver_3 => 0, + max_reqs => 0, + bytes_to_client => 0, + resp_1xx => 0, + resp_2xx => 0, + resp_200 => 0, + resp_2xx_other => 0, + resp_3xx => 0, + resp_301 => 0, + resp_302 => 0, + resp_303 => 0, + resp_3xx_other => 0, + resp_4xx => 0, + resp_403 => 0, + resp_404 => 0, + resp_451 => 0, + resp_4xx_other => 0, + resp_5xx => 0, + resp_500 => 0, + resp_502 => 0, + resp_503 => 0, + resp_504 => 0, + resp_5xx_other => 0, + unique_bdomains => 0, + unique_bdomains_np => 0, + unique_domains => 0, + unique_domains_np => 0, + ubd_np_per => 0, + ubd_per => 0, + }, +}; + +my $bw; +eval { $bw = File::ReadBackwards->new($logfile) + or die "can't read " . $logfile . "... $!"; }; +if ($@) { + $to_return->{error} = 1; + $to_return->{errorString} = $@; + $to_return->{data} = {}; + print $json->encode($to_return); + if ( !$opts{p} ) { + print "\n"; + } + exit 0; +} + +my $read_file = 1; + +# holds a list of blocked domains found +my $unique_bdomains = {}; +my $unique_bdomains_np = {}; + +# holds a list of domains found +my $unique_domains = {}; +my $unique_domains_np = {}; + +# read all log lines in reverse +my $lines = ''; +my $log_line = ''; +while ( defined( $log_line = $bw->readline ) + && $read_file ) +{ + my $log_t; + + # get the timestamp on non-CLF style log lines + if ( $log_line =~ /^(?\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d)/ ) { + $log_t = Time::Piece->strptime( $+{timestamp}, '%Y-%m-%d %H:%M:%S' ); + } + + # get the timestamp on CLF style log lines + elsif ( $log_line =~ /\[(?\d\d\/[A-Za-z]+\/\d\d\d\d\:\d\d\:\d\d\:\d\d)\]/ ) { + $log_t = Time::Piece->strptime( $+{timestamp}, '%d/%b/%Y:%H:%M:%S' ); + } + + if ( defined($log_t) ) { + + # if we have gone beyond where we want to go to, then stop... + # otherwise add it + if ( $log_t->epoch < $till ) { + $read_file = 0; + } + else { + $lines = $log_line . $lines; + + if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Crunch\:\ Blocked\:\ / ) { + my $log_line_tmp = $log_line; + $log_line_tmp =~ s/.*Crunch\:\ Blocked\:\ //; + $unique_bdomains->{$log_line_tmp} = 1; + $log_line =~ s/\:\d+$//; + $unique_bdomains_np->{$log_line_tmp} = 1; + } + if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Connect\:\ to\ / ) { + my $log_line_tmp = $log_line; + $log_line_tmp =~ s/.*Connect\:\ to\ //; + + # if it has a space, it is a line displaying the stating of the connect + if ( $log_line_tmp !~ /\ / ) { + $unique_domains->{$log_line_tmp} = 1; + $log_line =~ s/\:\d+$//; + $unique_domains_np->{$log_line_tmp} = 1; + } + } + } + } + + # if we don't have log_t, just add the line and lot the log parser figure out what it is + else { + $lines = $log_line . $lines; + } +} + +my $stdout; +my $stderr; +my @cmd = ( 'privoxy-log-parser.pl', '--statistics', '--show-complete-request-distribution' ); +run3( \@cmd, \$lines, \$stdout, \$stderr ); + +my @stdout_split = split( /\n/, $stdout ); + +my $multiline_mode; +foreach my $line (@stdout_split) { + + # needed as some lines have white space on the end that makes parsing annoying + $line =~ s/\ +$//; + + # start processing lines based on the start of the line + if ( $line =~ /^Client\ requests\ total\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $to_return->{data}{client_requests} = $line; + } + elsif ( $line =~ /^Crunches\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{crunches} = $line; + } + elsif ( $line =~ /^Blocks:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{blocks} = $line; + } + elsif ( $line =~ /^Fast\ redirections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{fast_redirs} = $line; + } + elsif ( $line =~ /^Connection\ timeouts\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{con_timeouts} = $line; + } + elsif ( $line =~ /^Connection\ failures\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{con_failures} = $line; + } + elsif ( $line =~ /^Outgoing\ requests\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{out_requests} = $line; + } + elsif ( $line =~ /^Server keep-alive offers\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{ska_offers} = $line; + } + elsif ( $line =~ /^New\ outgoing\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ \-//; + $line =~ s/\ .*$//; + $to_return->{data}{nog_conns} = $line; + } + elsif ( $line =~ /^Reused\ server\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*connections\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{reused_server_cons} = $line; + } + elsif ( $line =~ /^Empty\ responses\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{empty_resps} = $line; + } + elsif ( $line =~ /^Empty\ responses\ on\ new\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{empty_resps_new} = $line; + } + elsif ( $line =~ /^Empty\ responses\ on\ reused\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{empty_resps_reuse} = $line; + } + elsif ( $line =~ /^Client\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{client_cons} = $line; + } + elsif ( $line =~ /^Bytes\ of\ content\ transferred\ to\ the\ client\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{bytes_to_client} = $line; + } + elsif ( $line =~ /^Improperly\ accounted\ requests\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ \~//; + $line =~ s/\ .*$//; + $to_return->{data}{imp_accounted} = $line; + } + + # match various multi line modes starts + elsif ( $line =~ /^Client\ requests\ per\ connection\ distribution\:/ ) { + $multiline_mode = 'requests per con'; + } + elsif ( $line =~ /^Method\ distribution\:/ ) { + $multiline_mode = 'method'; + } + elsif ( $line =~ /^Client HTTP versions:/ ) { + $multiline_mode = 'version'; + } + elsif ( $line + =~ /^HTTP\ status\ codes\ according\ to\ \'debug\ 512\' \(status\ codes\ sent\ by\ the\ server\ may\ differ\)\:/ + ) + { + $multiline_mode = 'response'; + } + + # if it starts with a space, it is a multiline mode item + elsif ( $line =~ /^\ / ) { + if ( $multiline_mode eq 'requsts per con' ) { + $line =~ s/.*\:\ //; + if ( $line > $to_return->{data}{max_reqs} ) { + $to_return->{data}{max_reqs} = $line; + } + } + elsif ( $multiline_mode eq 'method' ) { + $line =~ s/^ +//; + my ( $count, $method ) = split( /\ \:\ /, $line ); + $method = lc($method); + if ( defined( $to_return->{data}{ 'req_' . $method } ) ) { + $to_return->{data}{ 'req_' . $method } = $count; + } + } + elsif ( $multiline_mode eq 'version' ) { + $line =~ s/^ +//; + my ( $count, $version ) = split( /\ \:\ /, $line ); + $version = lc($version); + $version =~ s/http\//ver_/; + $version =~ s/\./_/g; + if ( defined( $to_return->{data}{$version} ) ) { + $to_return->{data}{$version} = $count; + } + } + elsif ( $multiline_mode eq 'response' ) { + $line =~ s/^ +//; + my ( $count, $response ) = split( /\ \:\ /, $line ); + if ( defined( $to_return->{data}{ 'resp_' . $response } ) ) { + + $to_return->{data}{ 'resp_' . $response } = $count; + } + elsif ( $response =~ /^2\d\d/ ) { + $to_return->{data}{resp_2xx_other} = $to_return->{data}{resp_2xx_other} + $count; + } + elsif ( $response =~ /^3\d\d/ ) { + $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_3xx_other} + $count; + } + elsif ( $response =~ /^4\d\d/ ) { + $to_return->{data}{resp_4xx_other} = $to_return->{data}{resp_4xx_other} + $count; + } + elsif ( $response =~ /^5\d\d/ ) { + $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_5xx_other} + $count; + } + + if ( $response =~ /^1\d\d/ ) { + $to_return->{data}{resp_1xx} = $to_return->{data}{resp_1xx} + $count; + } + elsif ( $response =~ /^2\d\d/ ) { + $to_return->{data}{resp_2xx} = $to_return->{data}{resp_2xx} + $count; + } + elsif ( $response =~ /^3\d\d/ ) { + $to_return->{data}{resp_3xx} = $to_return->{data}{resp_3xx} + $count; + } + elsif ( $response =~ /^4\d\d/ ) { + $to_return->{data}{resp_4xx} = $to_return->{data}{resp_4xx} + $count; + } + elsif ( $response =~ /^5\d\d/ ) { + $to_return->{data}{resp_5xx} = $to_return->{data}{resp_5xx} + $count; + } + } + } + else { + $multiline_mode = ''; + } +} + +my @keys_tmp = keys( %{$unique_bdomains} ); +$to_return->{data}{unique_bdomains} = @keys_tmp; +@keys_tmp = keys( %{$unique_bdomains_np} ); +$to_return->{data}{unique_bdomains_np} = @keys_tmp; +@keys_tmp = keys( %{$unique_domains} ); +$to_return->{data}{unique_domains} = @keys_tmp; +@keys_tmp = keys( %{$unique_domains_np} ); +$to_return->{data}{unique_domains_np} = @keys_tmp; + +if ( $to_return->{data}{unique_domains} > 0 && $to_return->{data}{unique_bdomains} > 0 ) { + $to_return->{data}{ubd_per} = $to_return->{data}{unique_bdomains} / $to_return->{data}{unique_domains}; + $to_return->{data}{ubd_np_per} = $to_return->{data}{unique_bdomains_np} / $to_return->{data}{unique_domains_np}; +} + +# percentage of requests blocked +if ( $to_return->{data}{blocks} > 0 && $to_return->{data}{client_requests} > 0 ) { + $to_return->{data}{block_percent} = $to_return->{data}{blocks} / $to_return->{data}{client_requests}; +} + +print $json->encode($to_return); +if ( !$opts{p} ) { + print "\n"; +} +exit 0; From bd4c94642882d1c6f116001b5bb71c1a94b235f0 Mon Sep 17 00:00:00 2001 From: Henne Van Och Date: Wed, 1 Mar 2023 01:09:43 +0100 Subject: [PATCH 237/332] Improve docker stats (#450) --- snmp/docker-stats.py | 112 +++++++++++++++++++++++++++++++++++++++++++ snmp/docker-stats.sh | 38 --------------- 2 files changed, 112 insertions(+), 38 deletions(-) create mode 100644 snmp/docker-stats.py delete mode 100644 snmp/docker-stats.sh diff --git a/snmp/docker-stats.py b/snmp/docker-stats.py new file mode 100644 index 000000000..7460c6d4b --- /dev/null +++ b/snmp/docker-stats.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +import datetime +import json +import subprocess + +from dateutil import parser + +VERSION = 2 +ONLY_RUNNING_CONTAINERS = True + + +def run(cmd): + res = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + return res + + +def inspectContainer(container): + raw = run(["docker", "inspect", "-s", container]) + data = json.loads(raw) + + return data + + +def getStats(): + command = [ + "docker", + "stats", + "--no-stream", + "--no-trunc", + "--format", + "{{ json . }}", + ] + if not ONLY_RUNNING_CONTAINERS: + command.append("-a") + raw = run(command) + lines = raw.split(b"\n") + containers = [] + for line in lines[0:-1]: + containers.append(json.loads(line)) + + return containers + + +def dump(): + containers = [] + try: + stats_containers = getStats() + except subprocess.CalledProcessError as e: + print( + json.dumps( + { + "version": VERSION, + "data": containers, + "error": e.returncode, + "errorString": e.output.decode("utf-8"), + } + ) + ) + return + + for container in stats_containers: + try: + inspected_container = inspectContainer(container["Name"]) + except subprocess.CalledProcessError: + continue + + started_at = parser.parse(inspected_container[0]["State"]["StartedAt"]) + + if inspected_container[0]["State"]["Running"]: + finished_at = datetime.datetime.now(started_at.tzinfo) + else: + finished_at = parser.parse(inspected_container[0]["State"]["FinishedAt"]) + + uptime = finished_at - started_at + + containers.append( + { + "container": container["Name"], + "pids": container["PIDs"], + "memory": { + "used": container["MemUsage"].split(" / ")[0], + "limit": container["MemUsage"].split(" / ")[1], + "perc": container["MemPerc"], + }, + "cpu": container["CPUPerc"], + "size": { + "size_rw": inspected_container[0]["SizeRw"], + "size_root_fs": inspected_container[0]["SizeRootFs"], + }, + "state": { + "status": inspected_container[0]["State"]["Status"], + "started_at": inspected_container[0]["State"]["StartedAt"], + "finished_at": inspected_container[0]["State"]["FinishedAt"], + "uptime": round(uptime.total_seconds()), + }, + } + ) + + print( + json.dumps( + { + "version": VERSION, + "data": containers, + "error": "0", + "errorString": "", + } + ) + ) + + +if __name__ == "__main__": + dump() diff --git a/snmp/docker-stats.sh b/snmp/docker-stats.sh deleted file mode 100644 index 7ac7473f2..000000000 --- a/snmp/docker-stats.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -VERSION=1 - -function dockerStatsFormat() { - cat <&1) -ERROR=$? -if [ $ERROR -ne 0 ];then - ERROR_STRING=${STATS} - unset STATS -fi -jq -nMc \ - --slurpfile stats <(echo "${STATS:-}") \ - --arg version "${VERSION:-1}" \ - --arg error "${ERROR:-0}" \ - --arg errorString "${ERROR_STRING:-}" \ - '{"version": $version, "data": $stats, "error": $error, "errorString": $errorString }' - -# vim: tabstop=2:shiftwidth=2:expandtab: From a8031e605b0eec65358ef28a6bf23c437037cd46 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 19 Mar 2023 14:58:18 -0600 Subject: [PATCH 238/332] add new ZFS extend, for both Linux and FreeBSD (#460) --- snmp/zfs | 390 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 390 insertions(+) create mode 100755 snmp/zfs diff --git a/snmp/zfs b/snmp/zfs new file mode 100755 index 000000000..8a873c7f1 --- /dev/null +++ b/snmp/zfs @@ -0,0 +1,390 @@ +#!/usr/bin/env perl + +=head1 DESCRIPTION + +This is a SNMP extend for ZFS for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + + extend zfs /etc/snmp/zfs + +=cut + +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska +# for zfs-stats and figuring out the math for all the stats + +use strict; +use warnings; +use JSON; +use Getopt::Std; +use File::Slurp; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "FreeBSD ZFS v3 stats extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + +} + +#this will be dumped to json at the end +my %tojson; + +#gets the options +my %opts = (); +getopts( 'p', \%opts ); + +#process each pool and shove them into JSON +my $zpool_output = `/sbin/zpool list -pH`; +my @pools = split( /\n/, $zpool_output ); +my $pools_int = 0; +$tojson{online} = 0; +$tojson{degraded} = 0; +$tojson{offline} = 0; +$tojson{faulted} = 0; +$tojson{health} = 1; +$tojson{unavail} = 0; +$tojson{removed} = 0; +$tojson{unknown} = 0; +my @toShoveIntoJSON; + +while ( defined( $pools[$pools_int] ) ) { + my %newPool; + + my $pool = $pools[$pools_int]; + chomp($pool); + $pool =~ s/[\t\ ]+/,/g; + $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\%//g; + $pool =~ s/\,([0-1\.]*)x\,/,$1,/; + + ( + $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, + $newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, + $newPool{dedup}, $newPool{health}, $newPool{altroot} + ) = split( /\,/, $pool ); + + if ( $newPool{health} eq 'ONLINE' ) { + $newPool{health} = 0; + $tojson{online}++; + } + elsif ( $newPool{health} eq 'DEGRADED' ) { + $newPool{health} = 1; + $tojson{health} = 0; + $tojson{degraded}++; + } + elsif ( $newPool{health} eq 'OFFLINE' ) { + $newPool{health} = 2; + $tojson{offline}++; + } + elsif ( $newPool{health} eq 'FAULTED' ) { + $newPool{health} = 3; + $tojson{health} = 0; + $tojson{faulted}++; + } + elsif ( $newPool{health} eq 'UNAVAIL' ) { + $newPool{health} = 4; + $tojson{health} = 0; + $tojson{unavail}++; + } + elsif ( $newPool{health} eq 'REMOVED' ) { + $newPool{health} = 5; + $tojson{health} = 0; + $tojson{removed}++; + } + else { + $newPool{health} = 6; + $tojson{health} = 0; + $tojson{unknown}++; + } + + if ( $newPool{expandsz} eq '-' ) { + $newPool{expandsz} = 0; + } + + my $iostat = `zpool iostat -l -q -p -H $newPool{name}`; + chomp($iostat); + $iostat =~ s/\t/,/g; + $iostat =~ s/\,\-\,\-\,/\,0\,0\,/g; + $iostat =~ s/\%//g; + $iostat =~ s/\,([0-1\.]*)x\,/,$1,/; + chomp($iostat); + my $parsed; + ( + $parsed, $parsed, $newPool{operations_r}, $newPool{operations_w}, + $newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, $newPool{total_wait_w}, + $newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, $newPool{syncq_wait_w}, + $newPool{asyncq_wait_w}, $newPool{scrub_wait}, $newPool{trim_wait}, $newPool{syncq_read_p}, + $newPool{syncq_read_a}, $newPool{syncq_write_p}, $newPool{syncq_write_a}, $newPool{asyncq_read_p}, + $newPool{asyncq_read_a}, $newPool{asyncq_write_p}, $newPool{asyncq_write_a}, $newPool{scrubq_read_p}, + $newPool{scrubq_read_a}, $newPool{trimq_write_p}, $newPool{trimq_write_a}, + ) = split( /\,/, $iostat ); + + my @pool_keys = keys(%newPool); + foreach my $item (@pool_keys) { + if ( $item ne 'altroot' && $newPool{$item} eq '-' ) { + $newPool{$item} = 0; + } + } + + push( @toShoveIntoJSON, \%newPool ); + + $pools_int++; +} +$tojson{pools} = \@toShoveIntoJSON; + +# +# OS specific bits +# +my $stats_stuff = {}; +if ( $^O eq 'freebsd' ) { + my @to_pull = ( 'kstat.zfs', 'vfs.zfs', ); + my @sysctls_pull = `/sbin/sysctl -q @to_pull`; + foreach my $stat (@sysctls_pull) { + chomp($stat); + my ( $var, $val ) = split( /:/, $stat, 2 ); + + # If $val is empty, skip it. Likely a var with a newline before + # the data so it is trying to "split" the data. + if ( length $val ) { + $val =~ s/^ //; + $var =~ s/^.*\.arcstats\.//; + $stats_stuff->{$var} = $val; + } + } + +} +elsif ( $^O eq 'linux' ) { + my @arcstats_lines = read_file('/proc/spl/kstat/zfs/arcstats'); + foreach my $line (@arcstats_lines) { + chomp($line); + my ( $stat, $int, $value ) = split( /[\t\ ]+/, $line, 3 ); + $stats_stuff->{$stat} = $value; + } +} + +# does not seem to exist for me, but some of these don't seem to be created till needed +if ( !defined( $stats_stuff->{"recycle_miss"} ) ) { + $stats_stuff->{"recycle_miss"} = 0; +} + +## +## ARC misc +## +$tojson{deleted} = $stats_stuff->{"deleted"}; +$tojson{evict_skip} = $stats_stuff->{"evict_skip"}; +$tojson{mutex_skip} = $stats_stuff->{'mutex_miss'}; +$tojson{recycle_miss} = $stats_stuff->{"recycle_miss"}; + +## +## ARC size +## +my $target_size_percent = $stats_stuff->{"c"} / $stats_stuff->{"c_max"} * 100; +my $arc_size_percent = $stats_stuff->{"size"} / $stats_stuff->{"c_max"} * 100; +my $target_size_adaptive_ratio = $stats_stuff->{"c"} / $stats_stuff->{"c_max"}; +my $min_size_percent = $stats_stuff->{"c_min"} / $stats_stuff->{"c_max"} * 100; + +$tojson{arc_size} = $stats_stuff->{"size"}; +$tojson{target_size_max} = $stats_stuff->{"c_max"}; +$tojson{target_size_min} = $stats_stuff->{"c_min"}; +$tojson{target_size} = $stats_stuff->{"c"}; +$tojson{target_size_per} = $target_size_percent; +$tojson{arc_size_per} = $arc_size_percent; +$tojson{target_size_arat} = $target_size_adaptive_ratio; +$tojson{min_size_per} = $min_size_percent; + +## +## ARC size breakdown +## +my $mfu_size; +my $recently_used_percent; +my $frequently_used_percent; +if ( $stats_stuff->{"size"} >= $stats_stuff->{"c"} ) { + $mfu_size = $stats_stuff->{"size"} - $stats_stuff->{"p"}; + $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"size"} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{"size"} * 100; +} +else { + $mfu_size = $stats_stuff->{"c"} - $stats_stuff->{"p"}; + $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"c"} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{"c"} * 100; +} + +$tojson{p} = $stats_stuff->{"p"}; + +## +## ARC efficiency +## +my $arc_hits = $stats_stuff->{"hits"}; +my $arc_misses = $stats_stuff->{"misses"}; +my $demand_data_hits = $stats_stuff->{"demand_data_hits"}; +my $demand_data_misses = $stats_stuff->{"demand_data_misses"}; +my $demand_metadata_hits = $stats_stuff->{"demand_metadata_hits"}; +my $demand_metadata_misses = $stats_stuff->{"demand_metadata_misses"}; +my $mfu_ghost_hits = $stats_stuff->{"mfu_ghost_hits"}; +my $mfu_hits = $stats_stuff->{"mfu_hits"}; +my $mru_ghost_hits = $stats_stuff->{"mru_ghost_hits"}; +my $mru_hits = $stats_stuff->{"mru_hits"}; +my $prefetch_data_hits = $stats_stuff->{"prefetch_data_hits"}; +my $prefetch_data_misses = $stats_stuff->{"prefetch_data_misses"}; +my $prefetch_metadata_hits = $stats_stuff->{"prefetch_metadata_hits"}; +my $prefetch_metadata_misses = $stats_stuff->{"prefetch_metadata_misses"}; + +## +## ARC efficiency, common +## + +my $anon_hits = $arc_hits - ( $mfu_hits + $mru_hits + $mfu_ghost_hits + $mru_ghost_hits ); +my $arc_accesses_total = $arc_hits + $arc_misses; +my $demand_data_total = $demand_data_hits + $demand_data_misses; +my $prefetch_data_total = $prefetch_data_hits + $prefetch_data_misses; +my $real_hits = $mfu_hits + $mru_hits; + +my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; +my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; +my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; + +my $data_demand_percent = 0; +if ( $demand_data_total != 0 ) { + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; +} + +my $data_prefetch_percent = 0; +if ( $prefetch_data_total != 0 ) { + $data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100; +} + +my $anon_hits_percent; +if ( $anon_hits != 0 ) { + $anon_hits_percent = $anon_hits / $arc_hits * 100; +} +else { + $anon_hits_percent = 0; +} + +my $mru_percent = $mru_hits / $arc_hits * 100; +my $mfu_percent = $mfu_hits / $arc_hits * 100; +my $mru_ghost_percent = $mru_ghost_hits / $arc_hits * 100; +my $mfu_ghost_percent = $mfu_ghost_hits / $arc_hits * 100; + +my $demand_hits_percent = $demand_data_hits / $arc_hits * 100; +my $prefetch_hits_percent = $prefetch_data_hits / $arc_hits * 100; +my $metadata_hits_percent = $demand_metadata_hits / $arc_hits * 100; +my $prefetch_metadata_hits_percent = $prefetch_metadata_hits / $arc_hits * 100; + +my $demand_misses_percent = $demand_data_misses / $arc_misses * 100; +my $prefetch_misses_percent = $prefetch_data_misses / $arc_misses * 100; +my $metadata_misses_percent = $demand_metadata_misses / $arc_misses * 100; +my $prefetch_metadata_misses_percent = $prefetch_metadata_misses / $arc_misses * 100; + +# ARC misc. efficient stats +$tojson{arc_hits} = $arc_hits; +$tojson{arc_misses} = $arc_misses; +$tojson{demand_data_hits} = $demand_data_hits; +$tojson{demand_data_misses} = $demand_data_misses; +$tojson{demand_meta_hits} = $demand_metadata_hits; +$tojson{demand_meta_misses} = $demand_metadata_misses; +$tojson{mfu_ghost_hits} = $mfu_ghost_hits; +$tojson{mfu_hits} = $mfu_hits; +$tojson{mru_ghost_hits} = $mru_ghost_hits; +$tojson{mru_hits} = $mru_hits; +$tojson{pre_data_hits} = $prefetch_data_hits; +$tojson{pre_data_misses} = $prefetch_data_misses; +$tojson{pre_meta_hits} = $prefetch_metadata_hits; +$tojson{pre_meta_misses} = $prefetch_metadata_misses; +$tojson{anon_hits} = $anon_hits; +$tojson{arc_accesses_total} = $arc_accesses_total; +$tojson{demand_data_total} = $demand_data_total; +$tojson{pre_data_total} = $prefetch_data_total; +$tojson{real_hits} = $real_hits; + +# ARC efficient percents +$tojson{cache_hits_per} = $cache_hit_percent; +$tojson{cache_miss_per} = $cache_miss_percent; +$tojson{actual_hit_per} = $actual_hit_percent; +$tojson{data_demand_per} = $data_demand_percent; +$tojson{data_pre_per} = $data_prefetch_percent; +$tojson{anon_hits_per} = $anon_hits_percent; +$tojson{mru_per} = $mru_percent; +$tojson{mfu_per} = $mfu_percent; +$tojson{mru_ghost_per} = $mru_ghost_percent; +$tojson{mfu_ghost_per} = $mfu_ghost_percent; +$tojson{demand_hits_per} = $demand_hits_percent; +$tojson{pre_hits_per} = $prefetch_hits_percent; +$tojson{meta_hits_per} = $metadata_hits_percent; +$tojson{pre_meta_hits_per} = $prefetch_metadata_hits_percent; +$tojson{demand_misses_per} = $demand_misses_percent; +$tojson{pre_misses_per} = $prefetch_misses_percent; +$tojson{meta_misses_per} = $metadata_misses_percent; +$tojson{pre_meta_misses_per} = $prefetch_metadata_misses_percent; + +$tojson{mfu_size} = $mfu_size; +$tojson{rec_used_per} = $recently_used_percent; +$tojson{freq_used_per} = $frequently_used_percent; + +## +## pull in the l2 stats +## +my @l2_keys = grep( /l2\_/, keys( %{$stats_stuff} ) ); +foreach my $item (@l2_keys) { + $tojson{$item} = $stats_stuff->{$item}; +} +$tojson{l2_errors} = $tojson{l2_writes_error} + $tojson{l2_cksum_bad} + $tojson{l2_io_error}; +$tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses}; + +## +## print the results +## + +my %head_hash; +$head_hash{'data'} = \%tojson; +$head_hash{'version'} = 3; +$head_hash{'error'} = 0; +$head_hash{'errorString'} = ''; + +my $j = JSON->new; + +if ( $opts{p} ) { + $j->pretty(1); +} + +print $j->encode( \%head_hash ); + +if ( !$opts{p} ) { + print "\n"; +} + +exit 0; From 67a1a0117fe393e8c718aceda3743a417f4a3cb8 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 26 Apr 2023 09:36:17 -0600 Subject: [PATCH 239/332] opensearch/elasticsearch fix as apparently ._all.total.indexing.is_throttled will sometimes be undef (#464) --- snmp/opensearch | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 2b133141f..5b731b2eb 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2022, Zane C. Bowers-Hadley +#Copyright (c) 2023, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -159,7 +159,7 @@ $to_return->{data}{c_in_fl_fetch} = $health_json->{number_of_in_flight_fetc $to_return->{data}{c_task_max_in_time} = $health_json->{task_max_waiting_in_queue_millis}; $to_return->{data}{c_act_shards_perc} = $health_json->{active_shards_percent_as_number}; -# status color to int, nagious style +# status color to int, nagios style # green / ok = 0 # yellow / warning = 1 # red / critical = 2 @@ -194,7 +194,9 @@ $to_return->{data}{ti_del_time} = $stats_json->{_all}{total}{indexing}{del $to_return->{data}{ti_noop_up_total} = $stats_json->{_all}{total}{indexing}{noop_update_total}; $to_return->{data}{ti_throttled_time} = $stats_json->{_all}{total}{indexing}{throttle_time_in_millis}; -if ( $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) { +if ( defined( $stats_json->{_all}{total}{indexing}{is_throttled} ) + && $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) +{ $to_return->{data}{ti_throttled} = 1; } else { From f30f71342ad80c837b21674cfc8ee7f3266af772 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 28 Apr 2023 07:47:56 -0600 Subject: [PATCH 240/332] minor doc changes and add gzip+base64 compression to snmp/zfs (#463) --- snmp/zfs | 51 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 8a873c7f1..d80e73e2e 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -10,12 +10,31 @@ For more information, see Lnew; -if ( $opts{p} ) { +if ( $opts{p} && ! $opts{b} ) { $j->pretty(1); } -print $j->encode( \%head_hash ); +my $return_string = $j->encode( \%head_hash ); -if ( !$opts{p} ) { - print "\n"; +if ( !$opts{p} && ! $opts{b} ) { + print $return_string."\n"; + exit 0; +}elsif (!$opts{b}) { + print $return_string; + exit 0; +} + +my $compressed = encode_base64( gzip($return_string) ); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +if ( length($compressed) > length($return_string) ) { + print $return_string."\n"; +} +else { + print $compressed; } exit 0; From af28a4fcca97f4673891e46d733f1136fb9ec7f2 Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Fri, 28 Apr 2023 21:50:07 +0800 Subject: [PATCH 241/332] feat: OS Updates support agent (#444) --- snmp/osupdate | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/osupdate b/snmp/osupdate index 87e16873f..8ce829836 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -35,6 +35,7 @@ CMD_APK=' version' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ +echo '<<>>' if command -v zypper &>/dev/null ; then # OpenSUSE # shellcheck disable=SC2086 From 8f95171953ecedbc5140840fb50ea8b8af5ec733 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 9 May 2023 08:23:39 -0700 Subject: [PATCH 242/332] Adding linux_config_files snmp script to monitor configuration file updates (#453) --- snmp/linux_config_files.py | 173 +++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 snmp/linux_config_files.py diff --git a/snmp/linux_config_files.py b/snmp/linux_config_files.py new file mode 100644 index 000000000..4544e6ea2 --- /dev/null +++ b/snmp/linux_config_files.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# +# Name: linux_config_files Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "pkg_tool_cmd" output for ingestion into +# LibreNMS via the linux_config_files application. Additional distribution +# support may be added. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/linux_config_files.py +# 2. Edit your snmpd.conf and include: +# extend linux_config_files /etc/snmp/linux_config_files.py +# 3. (Optional, if RPM-based) Create a /etc/snmp/linux_config_files.json file and specify: +# a.) "pkg_system" - String designating the distribution name of the system. At +# the moment only "rpm" is supported. +# b.) "pkg_tool_cmd" - String path to the package tool binary ["/sbin/rpmconf"] +# ``` +# { +# "pkg_system": "rpm", +# "pkg_tool_cmd": "/bin/rpmconf", +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. + +import json +import subprocess +import sys + +CONFIG_FILE = "/etc/snmp/linux_config_files.json" +PKG_SYSTEM = "rpm" +PKG_TOOL_ARGS = {"rpm": ["--all", "--test"]} +PKG_TOOL_CMD = {"rpm": "/sbin/rpmconf"} + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + pkg_system: The package management used by the system. + pkg_tool_cmd: The full package tool command to execute. + """ + pkg_system = PKG_SYSTEM + pkg_tool_cmd = [PKG_TOOL_CMD[pkg_system]] + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + if "pkg_system" in config_file: + pkg_system = config_file["pkg_system"].strip().lower() + pkg_tool_cmd = ( + [config_file["pkg_tool_cmd"].strip().lower()] + if "pkg_tool_cmd" in config_file + else [PKG_TOOL_CMD[pkg_system]] + ) + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return pkg_system & full pkg_tool_cmd command. + pkg_tool_cmd.extend(PKG_TOOL_ARGS[pkg_system]) + return pkg_system, pkg_tool_cmd + + +def command_executor(pkg_system, pkg_tool_cmd): + """ + command_executor(): Execute the pkg_tool_cmd command and return the output. + + Inputs: + pkg_system: The package management used by the system. + pkg_tool_cmd: The full package tool command to execute. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + poutput = None + try: + # Execute pkg_tool_cmd command + poutput = subprocess.check_output( + pkg_tool_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + # Per rpmconf man page, an error code of 5 indicates there are conf file + # to merge, so disregard that error code. + if err.returncode != 5 or pkg_system != "rpm": + error_handler("Command Execution Error", err) + poutput = err.output + return poutput + + +def output_parser(pkg_system, cmd_output): + """ + output_parser(): Parses stdout of executed command and returns updated dictionary + with parsed data. + + Inputs: + pkg_system: The package management used by the system. + cmd_output: stdout of the executed command. + Outputs: + output_data: Dictionary updated with parsed data. + """ + output_data = { + "errorString": "", + "error": 0, + "version": 1, + "data": {"number_of_confs": None}, + } + + if pkg_system == "rpm": + if not cmd_output: + output_data["data"]["number_of_confs"] = 0 + else: + output_data["data"]["number_of_confs"] = len( + cmd_output.decode("utf-8").strip().split("\n") + ) + + return output_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, + and unit stdout parsing. Then it prints out the expected json output + for the pkg_tool_cmd application. + + Inputs: + None + Outputs: + None + """ + # Parse configuration file. + pkg_system, pkg_tool_cmd = config_file_parser() + + # Execute pkg_tool_cmd command and parse output. + cmd_output = command_executor(pkg_system, pkg_tool_cmd) + + # Parse command output. + output_data = output_parser(pkg_system, cmd_output) + + # Print json dumps of dictionary. + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From 49f38107800e08e35fbbb2aef250b24778ee1b34 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 23 May 2023 11:25:30 -0700 Subject: [PATCH 243/332] Removing unnecessary backslashes from postgres script. (#468) --- snmp/postgres | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/snmp/postgres b/snmp/postgres index 764484000..c0154da4f 100644 --- a/snmp/postgres +++ b/snmp/postgres @@ -12,11 +12,11 @@ # and/or other materials provided with the distribution. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF @@ -91,27 +91,27 @@ BEGIN{ db=""; ignorePG='$ignorePG'; toAdd=1; -} +} { - gsub(/dbname\:/, ""); - gsub(/backends\:/, ""); - gsub(/commits\:/, ""); - gsub(/rollbacks\:/, ""); - gsub(/idxscan\:/, ""); - gsub(/idxtupread\:/, ""); - gsub(/idxtupfetch\:/, ""); - gsub(/idxblksread\:/, ""); - gsub(/idxblkshit\:/, ""); - gsub(/seqscan\:/, ""); - gsub(/seqtupread\:/, ""); - gsub(/ret\:/, ""); - gsub(/fetch\:/, ""); - gsub(/ins\:/, ""); - gsub(/upd\:/, ""); - gsub(/del\:/, ""); + gsub(/dbname:/, ""); + gsub(/backends:/, ""); + gsub(/commits:/, ""); + gsub(/rollbacks:/, ""); + gsub(/idxscan:/, ""); + gsub(/idxtupread:/, ""); + gsub(/idxtupfetch:/, ""); + gsub(/idxblksread:/, ""); + gsub(/idxblkshit:/, ""); + gsub(/seqscan:/, ""); + gsub(/seqtupread:/, ""); + gsub(/ret:/, ""); + gsub(/fetch:/, ""); + gsub(/ins:/, ""); + gsub(/upd:/, ""); + gsub(/del:/, ""); #must be processed last or they step on other gsub - gsub(/read\:/, ""); - gsub(/hit\:/, ""); + gsub(/read:/, ""); + gsub(/hit:/, ""); if ( $18 == "postgres" ){ if ( ignorePG == 1 ){ toAdd=0 } From a011a88653145536d065ba5e2bd029e3771231ea Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 10 Jun 2023 15:55:57 -0500 Subject: [PATCH 244/332] add Linux Softnet Stat extend (#470) --- snmp/linux_softnet_stat | 144 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100755 snmp/linux_softnet_stat diff --git a/snmp/linux_softnet_stat b/snmp/linux_softnet_stat new file mode 100755 index 000000000..f7987a391 --- /dev/null +++ b/snmp/linux_softnet_stat @@ -0,0 +1,144 @@ +#!/usr/bin/env perl + +=head1 DESCRIPTION + +This is a SNMP extend for monitoring /proc/net/softnet_stat on Linux for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. If used with -b, this switch will be ignored. + +=head2 -b + +Gzip the output and convert to Base64. + +=cut + +use strict; +use warnings; +use JSON; +use Getopt::Std; +use File::Slurp; +use MIME::Base64; +use Gzip::Faster; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "Linux softnet stats extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + +} + +#this will be dumped to json at the end +my %tojson; +$tojson{totals} = { + backlog_length => 0, + cpu_collision => 0, + flow_limit => 0, + packet_dropped => 0, + packets => 0, + received_rps => 0, + time_squeeze => 0, +}; +$tojson{cores} = []; +$tojson{core_count} = 0; +$tojson{budget} = `sysctl net.core.netdev_budget 2> /dev/null`; +$tojson{budget_usecs} = `sysctl net.core.netdev_budget_usecs 2> /dev/null`; +chomp( $tojson{budget} ); +chomp( $tojson{budget_usecs} ); +$tojson{budget} =~ s/.*\=[\ \t]*//; +$tojson{budget_usecs} =~ s/.*\=[\ \t]*//; + +if ( $tojson{budget} !~ /^[0-9]+$/ ) { + $tojson{budget} = 'unknown'; +} +if ( $tojson{budget_usecs} !~ /^[0-9]+$/ ) { + $tojson{budget_usecs} = 'unknown'; +} + +#gets the options +my %opts = (); +getopts( 'pb', \%opts ); + +my $j = JSON->new; + +if ( $opts{p} && !$opts{b} ) { + $j->pretty(1); + $j->canonical(1); +} + +## +## read it all in +## + +my $lines_raw = read_file('/proc/net/softnet_stat'); +my @lines_split = split( /\n/, $lines_raw ); + +# not all linux kernel versions include softnet_backlog_len or index +my @to_total = keys( %{ $tojson{totals} } ); +foreach my $line (@lines_split) { + my %found; + ( + $found{packets}, $found{packet_drop}, $found{time_squeeze}, $found{zero4}, + $found{zero5}, $found{zero6}, $found{zero7}, $found{zero8}, + $found{cpu_collision}, $found{received_rps}, $found{flow_limit}, $found{backlog_length}, + $found{index} + ) = split( /[\ \t]+/, $line ); + + push( + @{ $tojson{cores} }, + { + core => $tojson{core_count}, + } + ); + + foreach my $item (@to_total) { + if ( !defined( $found{$item} ) ) { + $found{$item} = 0; + } else { + $found{$item} = hex( $found{$item} ); + } + $tojson{totals}{$item} += $found{$item}; + $tojson{cores}[ $tojson{core_count} ]{$item} = $found{$item}; + } + + $tojson{core_count}++; +} ## end foreach my $line (@lines_split) + +## +## print the results +## + +my %head_hash; +$head_hash{'data'} = \%tojson; +$head_hash{'version'} = 1; +$head_hash{'error'} = 0; +$head_hash{'errorString'} = ''; + +my $return_string = $j->encode( \%head_hash ); + +if ( !$opts{p} && !$opts{b} ) { + print $return_string. "\n"; + exit 0; +} elsif ( !$opts{b} ) { + print $return_string; + exit 0; +} + +my $compressed = encode_base64( gzip($return_string) ); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +if ( length($compressed) > length($return_string) ) { + print $return_string. "\n"; +} else { + print $compressed; +} + +exit 0; From 8efec5e2dcfa7cd833c2ddac1776ffcf7334dd9c Mon Sep 17 00:00:00 2001 From: Wheel Date: Mon, 12 Jun 2023 21:06:57 -0400 Subject: [PATCH 245/332] Update osupdate (#466) #444 broke osupdate script --- snmp/osupdate | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/osupdate b/snmp/osupdate index 8ce829836..87e16873f 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -35,7 +35,6 @@ CMD_APK=' version' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -echo '<<>>' if command -v zypper &>/dev/null ; then # OpenSUSE # shellcheck disable=SC2086 From 330237af974a0e86a2a1331ce29283ec2e5dce2b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 12 Jun 2023 20:16:24 -0500 Subject: [PATCH 246/332] add a line to uncomment if using as a agent (#471) --- snmp/osupdate | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index 87e16873f..9949fba44 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -32,6 +32,9 @@ CMD_PKG=' audit -q -F' BIN_APK='/sbin/apk' CMD_APK=' version' +# If using this as a agent and not a extend, uncomment the line below. +#echo '<<>>' + ################################################################ # Don't change anything unless you know what are you doing # ################################################################ From 55676063475088103854b80a7c06d912662d4251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A4mes=20M=C3=A9n=C3=A9trey?= Date: Mon, 19 Jun 2023 08:12:15 +0200 Subject: [PATCH 247/332] Smart: fetch the values 177, 231 and 233 as normalized instead of raw (#472) --- snmp/smart | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/snmp/smart b/snmp/smart index ef1304b0d..d3d35bfbd 100755 --- a/snmp/smart +++ b/snmp/smart @@ -326,6 +326,7 @@ foreach my $line ( @disks ){ if ( $line =~ /^[0123456789]+ / ) { my @lineA=split(/\ /, $line, 10); my $raw=$lineA[9]; + my $normalized=$lineA[3]; my $id=$lineA[0]; # Crucial SSD @@ -339,21 +340,27 @@ foreach my $line ( @disks ){ ( $id == 5 ) || ( $id == 10 ) || ( $id == 173 ) || - ( $id == 177 ) || ( $id == 183 ) || ( $id == 184 ) || ( $id == 187 ) || ( $id == 196 ) || ( $id == 197 ) || ( $id == 198 ) || - ( $id == 199 ) || - ( $id == 231 ) || - ( $id == 233 ) + ( $id == 199 ) ) { my @rawA=split( /\ /, $raw ); $IDs{$id}=$rawA[0]; } + # single int normalized values + if ( + ( $id == 177 ) || + ( $id == 231 ) || + ( $id == 233 ) + ) { + $IDs{$id}=int($normalized); + } + # 9, power on hours if ( $id == 9 ) { my @runtime=split(/[\ h]/, $raw); From 6e11473a0bb18a7b90959b348b01396bce8b1036 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 19 Jun 2023 01:46:41 -0500 Subject: [PATCH 248/332] privoxy fix nog_conns and actually do compression now (#473) * fix nog_conns parsing for privoxy * add compression support --- snmp/privoxy | 128 ++++++++++++++++++++++----------------------------- 1 file changed, 56 insertions(+), 72 deletions(-) diff --git a/snmp/privoxy b/snmp/privoxy index b52405910..26e87cddd 100755 --- a/snmp/privoxy +++ b/snmp/privoxy @@ -47,6 +47,8 @@ use File::ReadBackwards; use JSON; use Time::Piece; use IPC::Run3; +use MIME::Base64; +use Gzip::Faster; # get the current time my $t = localtime; @@ -58,8 +60,7 @@ if ( $t->tzoffset =~ /^-/ ) { my $offset = $t->tzoffset; $offset =~ s/^\-//; $till = $till - $offset; -} -else { +} else { my $offset = $t->tzoffset; $offset =~ s/^\+//; $till = $till + $offset; @@ -161,7 +162,7 @@ if ($@) { print "\n"; } exit 0; -} +} ## end if ($@) my $read_file = 1; @@ -197,8 +198,7 @@ while ( defined( $log_line = $bw->readline ) # otherwise add it if ( $log_t->epoch < $till ) { $read_file = 0; - } - else { + } else { $lines = $log_line . $lines; if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Crunch\:\ Blocked\:\ / ) { @@ -218,15 +218,15 @@ while ( defined( $log_line = $bw->readline ) $log_line =~ s/\:\d+$//; $unique_domains_np->{$log_line_tmp} = 1; } - } - } - } + } ## end if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Connect\:\ to\ /) + } ## end else [ if ( $log_t->epoch < $till ) ] + } ## end if ( defined($log_t) ) # if we don't have log_t, just add the line and lot the log parser figure out what it is else { $lines = $log_line . $lines; } -} +} ## end while ( defined( $log_line = $bw->readline ) ...) my $stdout; my $stderr; @@ -246,92 +246,77 @@ foreach my $line (@stdout_split) { $multiline_mode = ''; $line =~ s/.*\:\ //; $to_return->{data}{client_requests} = $line; - } - elsif ( $line =~ /^Crunches\:/ ) { + } elsif ( $line =~ /^Crunches\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{crunches} = $line; - } - elsif ( $line =~ /^Blocks:/ ) { + } elsif ( $line =~ /^Blocks:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{blocks} = $line; - } - elsif ( $line =~ /^Fast\ redirections\:/ ) { + } elsif ( $line =~ /^Fast\ redirections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{fast_redirs} = $line; - } - elsif ( $line =~ /^Connection\ timeouts\:/ ) { + } elsif ( $line =~ /^Connection\ timeouts\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{con_timeouts} = $line; - } - elsif ( $line =~ /^Connection\ failures\:/ ) { + } elsif ( $line =~ /^Connection\ failures\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{con_failures} = $line; - } - elsif ( $line =~ /^Outgoing\ requests\:/ ) { + } elsif ( $line =~ /^Outgoing\ requests\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{out_requests} = $line; - } - elsif ( $line =~ /^Server keep-alive offers\:/ ) { + } elsif ( $line =~ /^Server keep-alive offers\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{ska_offers} = $line; - } - elsif ( $line =~ /^New\ outgoing\ connections\:/ ) { + } elsif ( $line =~ /^New\ outgoing\ connections\:/ ) { $multiline_mode = ''; - $line =~ s/.*\:\ \-//; + $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{nog_conns} = $line; - } - elsif ( $line =~ /^Reused\ server\ connections\:/ ) { + } elsif ( $line =~ /^Reused\ server\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*connections\:\ //; $line =~ s/\ .*$//; $to_return->{data}{reused_server_cons} = $line; - } - elsif ( $line =~ /^Empty\ responses\:/ ) { + } elsif ( $line =~ /^Empty\ responses\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{empty_resps} = $line; - } - elsif ( $line =~ /^Empty\ responses\ on\ new\ connections\:/ ) { + } elsif ( $line =~ /^Empty\ responses\ on\ new\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{empty_resps_new} = $line; - } - elsif ( $line =~ /^Empty\ responses\ on\ reused\ connections\:/ ) { + } elsif ( $line =~ /^Empty\ responses\ on\ reused\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{empty_resps_reuse} = $line; - } - elsif ( $line =~ /^Client\ connections\:/ ) { + } elsif ( $line =~ /^Client\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{client_cons} = $line; - } - elsif ( $line =~ /^Bytes\ of\ content\ transferred\ to\ the\ client\:/ ) { + } elsif ( $line =~ /^Bytes\ of\ content\ transferred\ to\ the\ client\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{bytes_to_client} = $line; - } - elsif ( $line =~ /^Improperly\ accounted\ requests\:/ ) { + } elsif ( $line =~ /^Improperly\ accounted\ requests\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ \~//; $line =~ s/\ .*$//; @@ -341,14 +326,11 @@ foreach my $line (@stdout_split) { # match various multi line modes starts elsif ( $line =~ /^Client\ requests\ per\ connection\ distribution\:/ ) { $multiline_mode = 'requests per con'; - } - elsif ( $line =~ /^Method\ distribution\:/ ) { + } elsif ( $line =~ /^Method\ distribution\:/ ) { $multiline_mode = 'method'; - } - elsif ( $line =~ /^Client HTTP versions:/ ) { + } elsif ( $line =~ /^Client HTTP versions:/ ) { $multiline_mode = 'version'; - } - elsif ( $line + } elsif ( $line =~ /^HTTP\ status\ codes\ according\ to\ \'debug\ 512\' \(status\ codes\ sent\ by\ the\ server\ may\ differ\)\:/ ) { @@ -362,16 +344,14 @@ foreach my $line (@stdout_split) { if ( $line > $to_return->{data}{max_reqs} ) { $to_return->{data}{max_reqs} = $line; } - } - elsif ( $multiline_mode eq 'method' ) { + } elsif ( $multiline_mode eq 'method' ) { $line =~ s/^ +//; my ( $count, $method ) = split( /\ \:\ /, $line ); $method = lc($method); if ( defined( $to_return->{data}{ 'req_' . $method } ) ) { $to_return->{data}{ 'req_' . $method } = $count; } - } - elsif ( $multiline_mode eq 'version' ) { + } elsif ( $multiline_mode eq 'version' ) { $line =~ s/^ +//; my ( $count, $version ) = split( /\ \:\ /, $line ); $version = lc($version); @@ -380,48 +360,38 @@ foreach my $line (@stdout_split) { if ( defined( $to_return->{data}{$version} ) ) { $to_return->{data}{$version} = $count; } - } - elsif ( $multiline_mode eq 'response' ) { + } elsif ( $multiline_mode eq 'response' ) { $line =~ s/^ +//; my ( $count, $response ) = split( /\ \:\ /, $line ); if ( defined( $to_return->{data}{ 'resp_' . $response } ) ) { $to_return->{data}{ 'resp_' . $response } = $count; - } - elsif ( $response =~ /^2\d\d/ ) { + } elsif ( $response =~ /^2\d\d/ ) { $to_return->{data}{resp_2xx_other} = $to_return->{data}{resp_2xx_other} + $count; - } - elsif ( $response =~ /^3\d\d/ ) { + } elsif ( $response =~ /^3\d\d/ ) { $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_3xx_other} + $count; - } - elsif ( $response =~ /^4\d\d/ ) { + } elsif ( $response =~ /^4\d\d/ ) { $to_return->{data}{resp_4xx_other} = $to_return->{data}{resp_4xx_other} + $count; - } - elsif ( $response =~ /^5\d\d/ ) { + } elsif ( $response =~ /^5\d\d/ ) { $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_5xx_other} + $count; } if ( $response =~ /^1\d\d/ ) { $to_return->{data}{resp_1xx} = $to_return->{data}{resp_1xx} + $count; - } - elsif ( $response =~ /^2\d\d/ ) { + } elsif ( $response =~ /^2\d\d/ ) { $to_return->{data}{resp_2xx} = $to_return->{data}{resp_2xx} + $count; - } - elsif ( $response =~ /^3\d\d/ ) { + } elsif ( $response =~ /^3\d\d/ ) { $to_return->{data}{resp_3xx} = $to_return->{data}{resp_3xx} + $count; - } - elsif ( $response =~ /^4\d\d/ ) { + } elsif ( $response =~ /^4\d\d/ ) { $to_return->{data}{resp_4xx} = $to_return->{data}{resp_4xx} + $count; - } - elsif ( $response =~ /^5\d\d/ ) { + } elsif ( $response =~ /^5\d\d/ ) { $to_return->{data}{resp_5xx} = $to_return->{data}{resp_5xx} + $count; } - } - } - else { + } ## end elsif ( $multiline_mode eq 'response' ) + } else { $multiline_mode = ''; } -} +} ## end foreach my $line (@stdout_split) my @keys_tmp = keys( %{$unique_bdomains} ); $to_return->{data}{unique_bdomains} = @keys_tmp; @@ -442,6 +412,20 @@ if ( $to_return->{data}{blocks} > 0 && $to_return->{data}{client_requests} > 0 ) $to_return->{data}{block_percent} = $to_return->{data}{blocks} / $to_return->{data}{client_requests}; } +if ($compress) { + my $return_string = encode_json($to_return); + my $compressed = encode_base64( gzip($return_string) ); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) > length($return_string) ) { + print $return_string. "\n"; + } else { + print $compressed; + } + + exit 0; +} ## end if ($compress) + print $json->encode($to_return); if ( !$opts{p} ) { print "\n"; From 3bb46945c43c65cd10c000969efcf9c134aba093 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 19 Jun 2023 21:00:16 -0500 Subject: [PATCH 249/332] add smart-v1, a new JSON based smart poller (#474) Now also grabs.... general health status FW version selftest log make model disk + serial --- snmp/smart-v1 | 537 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 537 insertions(+) create mode 100755 snmp/smart-v1 diff --git a/snmp/smart-v1 b/snmp/smart-v1 new file mode 100755 index 000000000..9a42e175b --- /dev/null +++ b/snmp/smart-v1 @@ -0,0 +1,537 @@ +#!/usr/bin/env perl +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf like below. + + extend smart /etc/snmp/smart + +Then add to root's cron tab, if you have more than a few disks. + + */3 * * * * /etc/snmp/smart -u + +You will also need to create the config file, which defaults to the same path as the script, +but with .config appended. So if the script is located at /etc/snmp/smart, the config file +will be /etc/snmp/smart.config. Alternatively you can also specific a config via -c. + +Anything starting with a # is comment. The format for variables is $variable=$value. Empty +lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any +line with out a matched variable or # are treated as a disk. + + #This is a comment + cache=/var/cache/smart + smartctl=/usr/local/sbin/smartctl + useSN=0 + ada0 + da5 /dev/da5 -d sat + twl0,0 /dev/twl0 -d 3ware,0 + twl0,1 /dev/twl0 -d 3ware,1 + twl0,2 /dev/twl0 -d 3ware,2 + +The variables are as below. + + cache = The path to the cache file to use. Default: /var/cache/smart + smartctl = The path to use for smartctl. Default: /usr/bin/env smartctl + useSN = If set to 1, it will use the disks SN for reporting instead of the device name. + 1 is the default. 0 will use the device name. + +A disk line is can be as simple as just a disk name under /dev/. Such as in the config above +The line "ada0" would resolve to "/dev/ada0" and would be called with no special argument. If +a line has a space in it, everything before the space is treated as the disk name and is what +used for reporting and everything after that is used as the argument to be passed to smartctl. + +If you want to guess at the configuration, call it with -g and it will print out what it thinks +it should be. + +=cut + +## +## You should not need to touch anything below here. +## +use warnings; +use strict; +use Getopt::Std; +use JSON; +use MIME::Base64; +use Gzip::Faster; + +my $cache = '/var/cache/smart'; +my $smartctl = '/usr/bin/env smartctl'; +my @disks; +my $useSN = 1; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "SMART SNMP extend 0.1.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n" + . "-u Update '" + . $cache . "'\n" + . "-g Guess at the config and print it to STDOUT.\n" + . "-c The config file to use.\n" + . "-p Pretty print the JSON.\n" + . "-Z GZip+Base64 compress the results.\n"; + +} ## end sub main::HELP_MESSAGE + +#gets the options +my %opts = (); +getopts( 'ugc:pZ', \%opts ); + +# configure JSON for later usage +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{p} ) { + $json->pretty; +} + +my $to_return = { + data => { disks => {} }, + version => 1, + error => 0, + errorString => '', +}; + +# guess if asked +if ( defined( $opts{g} ) ) { + + #get what path to use for smartctl + $smartctl = `which smartctl`; + chomp($smartctl); + if ( $? != 0 ) { + warn("'which smartctl' failed with a exit code of $?"); + exit 1; + } + + #try to touch the default cache location and warn if it can't be done + system( 'touch ' . $cache . '>/dev/null' ); + if ( $? != 0 ) { + $cache = '#Could not touch ' . $cache . "You will need to manually set it\n" . "cache=?\n"; + } else { + system( 'rm -f ' . $cache . '>/dev/null' ); + $cache = 'cache=' . $cache . "\n"; + } + + # used for checking if a disk has been found more than once + my %found_disks_names; + my @argumentsA; + + #have smartctl scan and see if it finds anythings not get found + my $scan_output = `$smartctl --scan-open`; + my @scan_outputA = split( /\n/, $scan_output ); + + # remove non-SMART devices sometimes returned + @scan_outputA = grep( !/ses[0-9]/, @scan_outputA ); # not a disk, but may or may not have SMART attributes + @scan_outputA = grep( !/pass[0-9]/, @scan_outputA ); # very likely a duplicate and a disk under another name + @scan_outputA = grep( !/cd[0-9]/, @scan_outputA ); # CD drive + if ( $^O eq 'freebsd' ) { + @scan_outputA = grep( !/sa[0-9]/, @scan_outputA ); # tape drive + @scan_outputA = grep( !/ctl[0-9]/, @scan_outputA ); # CAM target layer + } elsif ( $^O eq 'linux' ) { + @scan_outputA = grep( !/st[0-9]/, @scan_outputA ); # SCSI tape drive + @scan_outputA = grep( !/ht[0-9]/, @scan_outputA ); # ATA tape drive + } + + # make the first pass, figuring out what all we have and trimming comments + foreach my $arguments (@scan_outputA) { + my $name = $arguments; + + $arguments =~ s/ \#.*//; # trim the comment out of the argument + $name =~ s/ .*//; + $name =~ s/\/dev\///; + if ( defined( $found_disks_names{$name} ) ) { + $found_disks_names{$name}++; + } else { + $found_disks_names{$name} = 0; + } + + push( @argumentsA, $arguments ); + + } ## end foreach my $arguments (@scan_outputA) + + # second pass, putting the lines together + my %current_disk; + my $drive_lines = ''; + foreach my $arguments (@argumentsA) { + my $name = $arguments; + $name =~ s/ .*//; + $name =~ s/\/dev\///; + + if ( $found_disks_names{$name} == 0 ) { + # If no other devices, just name it after the base device. + $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; + } else { + # if more than one, start at zero and increment, apennding comma number to the base device name + if ( defined( $current_disk{$name} ) ) { + $current_disk{$name}++; + } else { + $current_disk{$name} = 0; + } + $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; + } + + } ## end foreach my $arguments (@argumentsA) + + print "useSN=1\n" . 'smartctl=' . $smartctl . "\n" . $cache . $drive_lines; + + exit 0; +} ## end if ( defined( $opts{g} ) ) + +#get which config file to use +my $config = $0 . '.config'; +if ( defined( $opts{c} ) ) { + $config = $opts{c}; +} + +#reads the config file, optionally +my $config_file = ''; +open( my $readfh, "<", $config ) or die "Can't open '" . $config . "'"; +read( $readfh, $config_file, 1000000 ); +close($readfh); + +#parse the config file and remove comments and empty lines +my @configA = split( /\n/, $config_file ); +@configA = grep( !/^$/, @configA ); +@configA = grep( !/^\#/, @configA ); +@configA = grep( !/^[\s\t]*$/, @configA ); +my $configA_int = 0; +while ( defined( $configA[$configA_int] ) ) { + my $line = $configA[$configA_int]; + chomp($line); + $line =~ s/^[\t\s]+//; + $line =~ s/[\t\s]+$//; + + my ( $var, $val ) = split( /=/, $line, 2 ); + + my $matched; + if ( $var eq 'cache' ) { + $cache = $val; + $matched = 1; + } + + if ( $var eq 'smartctl' ) { + $smartctl = $val; + $matched = 1; + } + + if ( $var eq 'useSN' ) { + $useSN = $val; + $matched = 1; + } + + if ( !defined($val) ) { + push( @disks, $line ); + } + + $configA_int++; +} ## end while ( defined( $configA[$configA_int] ) ) + +#if set to 1, no cache will be written and it will be printed instead +my $noWrite = 0; + +# if no -u, it means we are being called from snmped +if ( !defined( $opts{u} ) ) { + # if the cache file exists, print it, otherwise assume one is not being used + if ( -f $cache ) { + my $old = ''; + open( my $readfh, "<", $cache ) or die "Can't open '" . $cache . "'"; + read( $readfh, $old, 1000000 ); + close($readfh); + print $old; + exit 0; + } else { + $opts{u} = 1; + $noWrite = 1; + } +} ## end if ( !defined( $opts{u} ) ) + +foreach my $line (@disks) { + my $disk; + my $name; + if ( $line =~ /\ / ) { + ( $name, $disk ) = split( /\ /, $line, 2 ); + } else { + $disk = $line; + $name = $line; + } + my $output; + if ( $disk !~ /\// ) { + $disk = '/dev/' . $disk; + } + $output = `$smartctl -A $disk`; + my %IDs = ( + '5' => 'null', + '10' => 'null', + '173' => 'null', + '177' => 'null', + '183' => 'null', + '184' => 'null', + '187' => 'null', + '188' => 'null', + '190' => 'null', + '194' => 'null', + '196' => 'null', + '197' => 'null', + '198' => 'null', + '199' => 'null', + '231' => 'null', + '233' => 'null', + '9' => 'null', + 'disk' => $disk, + 'serial' => undef, + 'selftest_log' => undef, + 'health_pass' => 0, + ); + $IDs{'disk'} =~ s/^\/dev\///; + + my @outputA; + + if ( $output =~ /NVMe Log/ ) { + # we have an NVMe drive with annoyingly different output + my %mappings = ( + 'Temperature' => 194, + 'Power Cycles' => 12, + 'Power On Hours' => 9, + 'Percentage Used' => 231, + ); + foreach ( split( /\n/, $output ) ) { + if (/:/) { + my ( $key, $val ) = split(/:/); + $val =~ s/^\s+|\s+$|\D+//g; + if ( exists( $mappings{$key} ) ) { + if ( $mappings{$key} == 231 ) { + $IDs{ $mappings{$key} } = 100 - $val; + } else { + $IDs{ $mappings{$key} } = $val; + } + } + } ## end if (/:/) + } ## end foreach ( split( /\n/, $output ) ) + + } else { + @outputA = split( /\n/, $output ); + my $outputAint = 0; + while ( defined( $outputA[$outputAint] ) ) { + my $line = $outputA[$outputAint]; + $line =~ s/^ +//; + $line =~ s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[9]; + my $normalized = $lineA[3]; + my $id = $lineA[0]; + + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 ) { + $IDs{231} = $raw; + } + + # single int raw values + if ( ( $id == 5 ) + || ( $id == 10 ) + || ( $id == 173 ) + || ( $id == 183 ) + || ( $id == 184 ) + || ( $id == 187 ) + || ( $id == 196 ) + || ( $id == 197 ) + || ( $id == 198 ) + || ( $id == 199 ) ) + { + my @rawA = split( /\ /, $raw ); + $IDs{$id} = $rawA[0]; + } ## end if ( ( $id == 5 ) || ( $id == 10 ) || ( $id...)) + + # single int normalized values + if ( ( $id == 177 ) + || ( $id == 231 ) + || ( $id == 233 ) ) + { + $IDs{$id} = int($normalized); + } + + # 9, power on hours + if ( $id == 9 ) { + my @runtime = split( /[\ h]/, $raw ); + $IDs{$id} = $runtime[0]; + } + + # 188, Command_Timeout + if ( $id == 188 ) { + my $total = 0; + my @rawA = split( /\ /, $raw ); + my $rawAint = 0; + while ( defined( $rawA[$rawAint] ) ) { + $total = $total + $rawA[$rawAint]; + $rawAint++; + } + $IDs{$id} = $total; + } ## end if ( $id == 188 ) + + # 190, airflow temp + # 194, temp + if ( ( $id == 190 ) + || ( $id == 194 ) ) + { + my ($temp) = split( /\ /, $raw ); + $IDs{$id} = $temp; + } + } ## end if ( $line =~ /^[0123456789]+ / ) + + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) + + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct + + if ( $line =~ "Elements in grown defect list:" ) { + + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[5]; + + # Reallocated Sector Count ID + $IDs{5} = $raw; + + } + + # Current Drive Temperature + # Marking as 194 Temperature_Celsius + + if ( $line =~ "Current Drive Temperature:" ) { + + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[3]; + + # Temperature C ID + $IDs{194} = $raw; + + } + + # End of SAS Wrapper + + $outputAint++; + } ## end while ( defined( $outputA[$outputAint] ) ) + } ## end else [ if ( $output =~ /NVMe Log/ ) ] + + #get the selftest logs + $output = `$smartctl -l selftest $disk`; + @outputA = split( /\n/, $output ); + my @completed = grep( /Completed without error/, @outputA ); + $IDs{'completed'} = scalar @completed; + my @interrupted = grep( /Interrupted/, @outputA ); + $IDs{'interrupted'} = scalar @interrupted; + my @read_failure = grep( /read failure/, @outputA ); + $IDs{'read_failure'} = scalar @read_failure; + my @unknown_failure = grep( /unknown failure/, @outputA ); + $IDs{'unknown_failure'} = scalar @unknown_failure; + my @extended = grep( /Extended/, @outputA ); + $IDs{'extended'} = scalar @extended; + my @short = grep( /Short/, @outputA ); + $IDs{'short'} = scalar @short; + my @conveyance = grep( /Conveyance/, @outputA ); + $IDs{'conveyance'} = scalar @conveyance; + my @selective = grep( /Selective/, @outputA ); + $IDs{'selective'} = scalar @selective; + + # if we have logs, actually grab the log output + if ( $IDs{'completed'} > 0 + || $IDs{'interrupted'} > 0 + || $IDs{'read_failure'} > 0 + || $IDs{'extended'} > 0 + || $IDs{'short'} > 0 + || $IDs{'conveyance'} > 0 + || $IDs{'selective'} > 0 ) + { + my @log_lines; + push( @log_lines, @extended, @short, @conveyance, @selective ); + $IDs{'selftest_log'} = join( "\n", sort(@log_lines) ); + } ## end if ( $IDs{'completed'} > 0 || $IDs{'interrupted'...}) + + # get the drive serial number, if needed + my $disk_id = $name; + $output=`$smartctl -i $disk`; + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $IDs{'serial'} = $1; + $IDs{'serial'} =~ s/^\s+|\s+$//g; + } + if ($useSN) { + $disk_id = $IDs{'serial'}; + } + + while ( $output =~ /(?i)Model Family:(.*)/g ) { + $IDs{'model_family'} = $1; + $IDs{'model_family'} =~ s/^\s+|\s+$//g; + } + + while ( $output =~ /(?i)Device Model:(.*)/g ) { + $IDs{'device_model'} = $1; + $IDs{'device_model'} =~ s/^\s+|\s+$//g; + } + + while ( $output =~ /(?i)Model Number:(.*)/g ) { + $IDs{'model_number'} = $1; + $IDs{'model_number'} =~ s/^\s+|\s+$//g; + } + + while ( $output =~ /(?i)Firmware Version:(.*)/g ) { + $IDs{'fw_version'} = $1; + $IDs{'fw_version'} =~ s/^\s+|\s+$//g; + } + + $output = `$smartctl -H $disk`; + if ( $output =~ /SMART\ overall\-health\ self\-assessment\ test\ result\:\ PASSED/ ) { + $IDs{'health_pass'} = 1; + } + + $to_return->{data}{disks}{$disk_id} = \%IDs; + +} ## end foreach my $line (@disks) + +my $toReturn = $json->encode($to_return); + +if ( !$opts{p} ) { + $toReturn = $toReturn . "\n"; +} + +if ($opts{Z}) { + my $compressed = encode_base64( gzip($toReturn) ); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) < length($toReturn) ) { + $toReturn=$compressed; + } +} + +if ( !$noWrite ) { + open( my $writefh, ">", $cache ) or die "Can't open '" . $cache . "'"; + print $writefh $toReturn; + close($writefh); +} else { + print $toReturn; +} From 442e691b1a6095dcb49d4d1ecbe67e85d21910b5 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 26 Jun 2023 22:33:44 -0500 Subject: [PATCH 250/332] update smart-v1 some more (#476) * add various HP specific bits for identity info * more HP related cleanup * add initial ccis guessing support * ccis -> cciss * rework cciss support some more * derp, fix qoute type * make useSN configuration with -g * rework self test logs to be more HP friendly * more test cleanup * more test cleanup * finally get the extended test playing nice with HP stuff * don't print needless error messages if cciss_vol_status is not found * cleanup a edge case, add a new edge case, and now find the max temp * add id 232 * make the scan modes selectable and begin reworking cciss forproperly checking all possible devices * rework how the cciss device path is generated * add exit status checking * improve cciss guess * cleanup the cciss checks some more * convert to IO::Compress::Gzip and update docs --- snmp/smart-v1 | 796 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 543 insertions(+), 253 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 9a42e175b..d3b9bbdd6 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -30,11 +30,11 @@ Add this to snmpd.conf like below. Then add to root's cron tab, if you have more than a few disks. - */3 * * * * /etc/snmp/smart -u + */5 * * * * /etc/snmp/extends/smart -u You will also need to create the config file, which defaults to the same path as the script, but with .config appended. So if the script is located at /etc/snmp/smart, the config file -will be /etc/snmp/smart.config. Alternatively you can also specific a config via -c. +will be /etc/snmp/extends/smart.config. Alternatively you can also specific a config via -c. Anything starting with a # is comment. The format for variables is $variable=$value. Empty lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any @@ -65,6 +65,31 @@ used for reporting and everything after that is used as the argument to be passe If you want to guess at the configuration, call it with -g and it will print out what it thinks it should be. + +Switches: + +-c The config file to use. +-u Update +-p Pretty print the JSON. +-Z GZip+Base64 compress the results. + +-g Guess at the config and print it to STDOUT +-C Enable manual checking for guess and cciss. +-S Set useSN to 0 when using -g +-G Guess modes to use. This is a comma seperated list. + Default :: scan-open,cciss-vol-status + +Guess Modes: + +- scan :: Use "--scan" with smartctl. "scan-open" will take presidence. + +- scan-open :: Call smartctl with "--scan-open". + +- cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or + /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, + and then optionally checking for disks via smrtctl if -C is given. Should be noted + though that -C will not find drives that are currently missing/failed. + =cut ## @@ -75,7 +100,7 @@ use strict; use Getopt::Std; use JSON; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); my $cache = '/var/cache/smart'; my $smartctl = '/usr/bin/env smartctl'; @@ -85,38 +110,92 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.1.0\n"; + print "SMART SNMP extend 0.2.0\n"; } sub main::HELP_MESSAGE { - print "\n" - . "-u Update '" - . $cache . "'\n" - . "-g Guess at the config and print it to STDOUT.\n" - . "-c The config file to use.\n" - . "-p Pretty print the JSON.\n" - . "-Z GZip+Base64 compress the results.\n"; + &VERSION_MESSAGE; + print "\n" . "-u Update '" . $cache . "'\n" . '-g Guess at the config and print it to STDOUT +-c The config file to use. +-p Pretty print the JSON. +-Z GZip+Base64 compress the results. +-C Enable manual checking for guess and cciss. +-S Set useSN to 0 when using -g +-G Guess modes to use. This is a comma seperated list. + Default :: scan-open,cciss-vol-status + +Scan Modes: + +- scan :: Use "--scan" with smartctl. "scan-open" will take presidence. + +- scan-open :: Call smartctl with "--scan-open". + +- cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or + /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, + and then optionally checking for disks via smrtctl if -C is given. Should be noted + though that -C will not find drives that are currently missing/failed. +'; } ## end sub main::HELP_MESSAGE #gets the options my %opts = (); -getopts( 'ugc:pZ', \%opts ); +getopts( 'ugc:pZhvCSG', \%opts ); + +if ( $opts{h} ) { + &HELP_MESSAGE; + exit; +} +if ( $opts{v} ) { + &VERSION_MESSAGE; + exit; +} + +# +# figure out what scan modes to use if -g specified +# +my $scan_modes = { + 'scan-open' => 0, + 'scan' => 0, + 'cciss_vol_status' => 0, +}; +if ( $opts{g} ) { + if ( !defined( $opts{G} ) ) { + $opts{G} = 'scan-open,cciss_vol_status'; + } + $opts{G} =~ s/[\ \t]//g; + my @scan_modes_split = split( /,/, $opts{G} ); + foreach my $mode (@scan_modes_split) { + if ( !defined $scan_modes->{$mode} ) { + die( '"' . $mode . '" is not a recognized scan mode' ); + } + $scan_modes->{$mode} = 1; + } +} ## end if ( $opts{g} ) # configure JSON for later usage -my $json = JSON->new->allow_nonref->canonical(1); -if ( $opts{p} ) { - $json->pretty; +# only need to do this if actually running as in -g is not specified +my $json; +if ( !$opts{g} ) { + + $json = JSON->new->allow_nonref->canonical(1); + if ( $opts{p} ) { + $json->pretty; + } } my $to_return = { - data => { disks => {} }, + data => { disks => {}, exit_nonzero => 0, unhealthy => 0, }, version => 1, error => 0, errorString => '', }; +# +# # guess if asked +# +# if ( defined( $opts{g} ) ) { #get what path to use for smartctl @@ -136,67 +215,192 @@ if ( defined( $opts{g} ) ) { $cache = 'cache=' . $cache . "\n"; } - # used for checking if a disk has been found more than once - my %found_disks_names; - my @argumentsA; - - #have smartctl scan and see if it finds anythings not get found - my $scan_output = `$smartctl --scan-open`; - my @scan_outputA = split( /\n/, $scan_output ); - - # remove non-SMART devices sometimes returned - @scan_outputA = grep( !/ses[0-9]/, @scan_outputA ); # not a disk, but may or may not have SMART attributes - @scan_outputA = grep( !/pass[0-9]/, @scan_outputA ); # very likely a duplicate and a disk under another name - @scan_outputA = grep( !/cd[0-9]/, @scan_outputA ); # CD drive - if ( $^O eq 'freebsd' ) { - @scan_outputA = grep( !/sa[0-9]/, @scan_outputA ); # tape drive - @scan_outputA = grep( !/ctl[0-9]/, @scan_outputA ); # CAM target layer - } elsif ( $^O eq 'linux' ) { - @scan_outputA = grep( !/st[0-9]/, @scan_outputA ); # SCSI tape drive - @scan_outputA = grep( !/ht[0-9]/, @scan_outputA ); # ATA tape drive - } + my $drive_lines = ''; - # make the first pass, figuring out what all we have and trimming comments - foreach my $arguments (@scan_outputA) { - my $name = $arguments; + # + # + # scan-open and scan guess mode handling + # + # + if ( $scan_modes->{'scan-open'} || $scan_modes->{'scan'} ) { + # used for checking if a disk has been found more than once + my %found_disks_names; + my @argumentsA; + + # use scan-open if it is set, overriding scan if it is also set + my $mode = 'scan'; + if ( $scan_modes->{'scan-open'} ) { + $mode = 'scan-open'; + } - $arguments =~ s/ \#.*//; # trim the comment out of the argument - $name =~ s/ .*//; - $name =~ s/\/dev\///; - if ( defined( $found_disks_names{$name} ) ) { - $found_disks_names{$name}++; - } else { - $found_disks_names{$name} = 0; + #have smartctl scan and see if it finds anythings not get found + my $scan_output = `$smartctl --$mode`; + my @scan_outputA = split( /\n/, $scan_output ); + + # remove non-SMART devices sometimes returned + @scan_outputA = grep( !/ses[0-9]/, @scan_outputA ); # not a disk, but may or may not have SMART attributes + @scan_outputA = grep( !/pass[0-9]/, @scan_outputA ); # very likely a duplicate and a disk under another name + @scan_outputA = grep( !/cd[0-9]/, @scan_outputA ); # CD drive + if ( $^O eq 'freebsd' ) { + @scan_outputA = grep( !/sa[0-9]/, @scan_outputA ); # tape drive + @scan_outputA = grep( !/ctl[0-9]/, @scan_outputA ); # CAM target layer + } elsif ( $^O eq 'linux' ) { + @scan_outputA = grep( !/st[0-9]/, @scan_outputA ); # SCSI tape drive + @scan_outputA = grep( !/ht[0-9]/, @scan_outputA ); # ATA tape drive } - push( @argumentsA, $arguments ); + # make the first pass, figuring out what all we have and trimming comments + foreach my $arguments (@scan_outputA) { + my $name = $arguments; - } ## end foreach my $arguments (@scan_outputA) + $arguments =~ s/ \#.*//; # trim the comment out of the argument + $name =~ s/ .*//; + $name =~ s/\/dev\///; + if ( defined( $found_disks_names{$name} ) ) { + $found_disks_names{$name}++; + } else { + $found_disks_names{$name} = 0; + } - # second pass, putting the lines together - my %current_disk; - my $drive_lines = ''; - foreach my $arguments (@argumentsA) { - my $name = $arguments; - $name =~ s/ .*//; - $name =~ s/\/dev\///; - - if ( $found_disks_names{$name} == 0 ) { - # If no other devices, just name it after the base device. - $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; - } else { - # if more than one, start at zero and increment, apennding comma number to the base device name - if ( defined( $current_disk{$name} ) ) { - $current_disk{$name}++; + push( @argumentsA, $arguments ); + + } ## end foreach my $arguments (@scan_outputA) + + # second pass, putting the lines together + my %current_disk; + foreach my $arguments (@argumentsA) { + my $name = $arguments; + $name =~ s/ .*//; + $name =~ s/\/dev\///; + + if ( $found_disks_names{$name} == 0 ) { + # If no other devices, just name it after the base device. + $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; } else { - $current_disk{$name} = 0; + # if more than one, start at zero and increment, apennding comma number to the base device name + if ( defined( $current_disk{$name} ) ) { + $current_disk{$name}++; + } else { + $current_disk{$name} = 0; + } + $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; } - $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; + + } ## end foreach my $arguments (@argumentsA) + } ## end if ( $scan_modes->{'scan-open'} || $scan_modes...) + + # + # + # scan mode handler for cciss_vol_status + # /dev/sg* devices for cciss on Linux + # /dev/ccis* devices for cciss on FreeBSD + # + # + if ( $scan_modes->{'cciss_vol_status'} && ( $^O eq 'linux' || $^O eq 'freebsd' ) ) { + my $cciss; + if ( $^O eq 'freebsd' ) { + $cciss = 'ciss'; + } elsif ( $^O eq 'linux' ) { + $cciss = 'sg'; } - } ## end foreach my $arguments (@argumentsA) + # generate the initial device path that will be checked + my $sg_int = 0; + my $device = '/dev/' . $cciss . $sg_int; + + my $sg_process = 1; + if ( -e $device ) { + my $output = `which cciss_vol_status 2> /dev/null`; + if ( $? != 0 && !$opts{C} ) { + $sg_process = 0; + $drive_lines + = $drive_lines + . "# -C not given, but " + . $device + . " exists and cciss_vol_status is not present\n" + . "# in path or 'ccis_vol_status -V " + . $device + . "' is failing\n"; + } ## end if ( $? != 0 && !$opts{C} ) + } ## end if ( -e $device ) + my $seen_lines = {}; + while ( -e $device && $sg_process ) { + my $output = `cciss_vol_status -V $device 2> /dev/null`; + if ( $? != 0 && $output eq '' && !$opts{C} ) { + # just empty here as we just want to skip it if it fails and there is no C + # warning is above + } elsif ( $? != 0 && $output eq '' && $opts{C} ) { + my $drive_count = 0; + my $continue = 1; + while ($continue) { + my $output = `$smartctl -A $device -d cciss,$drive_count 2> /dev/null`; + if ( $? != 0 ) { + $continue = 0; + } else { + $continue = 0; + my $add_it = 0; + # if we have smart data for this device, process it + while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g && !$continue ) { + $continue = 1; + my $id; + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $id = $1; + $id =~ s/^\s+|\s+$//g; + } + if ( defined($id) && !defined( $seen_lines->{$id} ) ) { + $add_it = 1; + $seen_lines->{$id} = 1; + } + } ## end while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g...) + if ( $continue && $add_it ) { + $drive_lines + = $drive_lines + . $cciss . '0-' + . $drive_count . ' ' + . $device + . ' -d cciss,' + . $drive_count . "\n"; + } + } ## end else [ if ( $? != 0 ) ] + $drive_count++; + } ## end while ($continue) + } else { + my $sg_drive_int = 0; + my $drive_count = 0; + # count the connector lines, this will make sure failed are founded as well + while ( $output =~ /(connector +\d.*box +\d.*bay +\d.*)/g ) { + if ( !defined( $seen_lines->{$1} ) ) { + $seen_lines->{$1} = 1; + $drive_count++; + } + } + my $drive_int = 0; + while ( $drive_int < $drive_count ) { + $drive_lines + = $drive_lines . $cciss . '0-' . $drive_int . ' ' . $device . ' -d cciss,' . $drive_int . "\n"; - print "useSN=1\n" . 'smartctl=' . $smartctl . "\n" . $cache . $drive_lines; + $drive_int++; + } + } ## end else [ if ( $? != 0 && $output eq '' && !$opts{C})] + + $sg_int++; + $device = '/dev/' . $cciss . $sg_int; + } ## end while ( -e $device && $sg_process ) + } ## end if ( $scan_modes->{'cciss_vol_status'} && ...) + + my $useSN = 1; + if ( $opts{S} ) { + $useSN = 0; + } + + print '# scan_modes=' + . $opts{G} + . "\nuseSN=" + . $useSN . "\n" + . 'smartctl=' + . $smartctl . "\n" + . $cache + . $drive_lines; exit 0; } ## end if ( defined( $opts{g} ) ) @@ -213,7 +417,11 @@ open( my $readfh, "<", $config ) or die "Can't open '" . $config . "'"; read( $readfh, $config_file, 1000000 ); close($readfh); -#parse the config file and remove comments and empty lines +# +# +# parse the config file and remove comments and empty lines +# +# my @configA = split( /\n/, $config_file ); @configA = grep( !/^$/, @configA ); @configA = grep( !/^\#/, @configA ); @@ -269,6 +477,11 @@ if ( !defined( $opts{u} ) ) { } } ## end if ( !defined( $opts{u} ) ) +# +# +# Process each disk +# +# foreach my $line (@disks) { my $disk; my $name; @@ -278,12 +491,11 @@ foreach my $line (@disks) { $disk = $line; $name = $line; } - my $output; if ( $disk !~ /\// ) { $disk = '/dev/' . $disk; } - $output = `$smartctl -A $disk`; - my %IDs = ( + my $output = `$smartctl -A $disk`; + my %IDs = ( '5' => 'null', '10' => 'null', '173' => 'null', @@ -299,218 +511,294 @@ foreach my $line (@disks) { '198' => 'null', '199' => 'null', '231' => 'null', + '232' => 'null', '233' => 'null', '9' => 'null', 'disk' => $disk, 'serial' => undef, 'selftest_log' => undef, 'health_pass' => 0, + max_temp => 'null', + exit => $?, ); $IDs{'disk'} =~ s/^\/dev\///; - my @outputA; - - if ( $output =~ /NVMe Log/ ) { - # we have an NVMe drive with annoyingly different output - my %mappings = ( - 'Temperature' => 194, - 'Power Cycles' => 12, - 'Power On Hours' => 9, - 'Percentage Used' => 231, - ); - foreach ( split( /\n/, $output ) ) { - if (/:/) { - my ( $key, $val ) = split(/:/); - $val =~ s/^\s+|\s+$|\D+//g; - if ( exists( $mappings{$key} ) ) { - if ( $mappings{$key} == 231 ) { - $IDs{ $mappings{$key} } = 100 - $val; - } else { - $IDs{ $mappings{$key} } = $val; - } - } - } ## end if (/:/) - } ## end foreach ( split( /\n/, $output ) ) - + # if polling exited non-zero above, no reason running the rest of the checks + my $disk_id = $name; + if ( $IDs{exit} != 0 ) { + $to_return->{data}{exit_nonzero}++; } else { - @outputA = split( /\n/, $output ); - my $outputAint = 0; - while ( defined( $outputA[$outputAint] ) ) { - my $line = $outputA[$outputAint]; - $line =~ s/^ +//; - $line =~ s/ +/ /g; - - if ( $line =~ /^[0123456789]+ / ) { - my @lineA = split( /\ /, $line, 10 ); - my $raw = $lineA[9]; - my $normalized = $lineA[3]; - my $id = $lineA[0]; - - # Crucial SSD - # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left - if ( $id == 202 ) { - $IDs{231} = $raw; - } + my @outputA; + + if ( $output =~ /NVMe Log/ ) { + # we have an NVMe drive with annoyingly different output + my %mappings = ( + 'Temperature' => 194, + 'Power Cycles' => 12, + 'Power On Hours' => 9, + 'Percentage Used' => 231, + ); + foreach ( split( /\n/, $output ) ) { + if (/:/) { + my ( $key, $val ) = split(/:/); + $val =~ s/^\s+|\s+$|\D+//g; + if ( exists( $mappings{$key} ) ) { + if ( $mappings{$key} == 231 ) { + $IDs{ $mappings{$key} } = 100 - $val; + } else { + $IDs{ $mappings{$key} } = $val; + } + } + } ## end if (/:/) + } ## end foreach ( split( /\n/, $output ) ) - # single int raw values - if ( ( $id == 5 ) - || ( $id == 10 ) - || ( $id == 173 ) - || ( $id == 183 ) - || ( $id == 184 ) - || ( $id == 187 ) - || ( $id == 196 ) - || ( $id == 197 ) - || ( $id == 198 ) - || ( $id == 199 ) ) - { - my @rawA = split( /\ /, $raw ); - $IDs{$id} = $rawA[0]; - } ## end if ( ( $id == 5 ) || ( $id == 10 ) || ( $id...)) - - # single int normalized values - if ( ( $id == 177 ) - || ( $id == 231 ) - || ( $id == 233 ) ) - { - $IDs{$id} = int($normalized); - } + } else { + @outputA = split( /\n/, $output ); + my $outputAint = 0; + while ( defined( $outputA[$outputAint] ) ) { + my $line = $outputA[$outputAint]; + $line =~ s/^ +//; + $line =~ s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[9]; + my $normalized = $lineA[3]; + my $id = $lineA[0]; + + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 + && $line =~ /Percent_Lifetime_Remain/ ) + { + $IDs{231} = $raw; + } - # 9, power on hours - if ( $id == 9 ) { - my @runtime = split( /[\ h]/, $raw ); - $IDs{$id} = $runtime[0]; - } + # single int raw values + if ( ( $id == 5 ) + || ( $id == 10 ) + || ( $id == 173 ) + || ( $id == 183 ) + || ( $id == 184 ) + || ( $id == 187 ) + || ( $id == 196 ) + || ( $id == 197 ) + || ( $id == 198 ) + || ( $id == 199 ) ) + { + my @rawA = split( /\ /, $raw ); + $IDs{$id} = $rawA[0]; + } ## end if ( ( $id == 5 ) || ( $id == 10 ) || ( $id...)) + + # single int normalized values + if ( ( $id == 177 ) + || ( $id == 230 ) + || ( $id == 231 ) + || ( $id == 232 ) + || ( $id == 233 ) ) + { + # annoying non-standard disk + # WDC WDS500G2B0A + # 230 Media_Wearout_Indicator 0x0032 100 100 --- Old_age Always - 0x002e000a002e + # 232 Available_Reservd_Space 0x0033 100 100 004 Pre-fail Always - 100 + # 233 NAND_GB_Written_TLC 0x0032 100 100 --- Old_age Always - 9816 + + if ( $id == 230 + && $line =~ /Media_Wearout_Indicator/ ) + { + $IDs{233} = int($normalized); + } elsif ( $id == 232 + && $line =~ /Available_Reservd_Space/ ) + { + $IDs{232} = int($normalized); + } else { + # only set 233 if it has not been set yet + # if it was set already then the above did it and we don't want + # to overwrite it + if ( $id == 233 && $IDs{233} eq "null" ) { + $IDs{$id} = int($normalized); + } elsif ( $id != 233 ) { + $IDs{$id} = int($normalized); + } + } ## end else [ if ( $id == 230 && $line =~ /Media_Wearout_Indicator/)] + } ## end if ( ( $id == 177 ) || ( $id == 230 ) || (...)) + + # 9, power on hours + if ( $id == 9 ) { + my @runtime = split( /[\ h]/, $raw ); + $IDs{$id} = $runtime[0]; + } - # 188, Command_Timeout - if ( $id == 188 ) { - my $total = 0; - my @rawA = split( /\ /, $raw ); - my $rawAint = 0; - while ( defined( $rawA[$rawAint] ) ) { - $total = $total + $rawA[$rawAint]; - $rawAint++; + # 188, Command_Timeout + if ( $id == 188 ) { + my $total = 0; + my @rawA = split( /\ /, $raw ); + my $rawAint = 0; + while ( defined( $rawA[$rawAint] ) ) { + $total = $total + $rawA[$rawAint]; + $rawAint++; + } + $IDs{$id} = $total; + } ## end if ( $id == 188 ) + + # 190, airflow temp + # 194, temp + if ( ( $id == 190 ) + || ( $id == 194 ) ) + { + my ($temp) = split( /\ /, $raw ); + $IDs{$id} = $temp; } - $IDs{$id} = $total; - } ## end if ( $id == 188 ) - - # 190, airflow temp - # 194, temp - if ( ( $id == 190 ) - || ( $id == 194 ) ) - { - my ($temp) = split( /\ /, $raw ); - $IDs{$id} = $temp; - } - } ## end if ( $line =~ /^[0123456789]+ / ) + } ## end if ( $line =~ /^[0123456789]+ / ) - # SAS Wrapping - # Section by Cameron Munroe (munroenet[at]gmail.com) + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) - # Elements in Grown Defect List. - # Marking as 5 Reallocated_Sector_Ct + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct + if ( $line =~ "Elements in grown defect list:" ) { - if ( $line =~ "Elements in grown defect list:" ) { + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[5]; - my @lineA = split( /\ /, $line, 10 ); - my $raw = $lineA[5]; + # Reallocated Sector Count ID + $IDs{5} = $raw; - # Reallocated Sector Count ID - $IDs{5} = $raw; + } - } + # Current Drive Temperature + # Marking as 194 Temperature_Celsius + if ( $line =~ "Current Drive Temperature:" ) { - # Current Drive Temperature - # Marking as 194 Temperature_Celsius + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[3]; - if ( $line =~ "Current Drive Temperature:" ) { + # Temperature C ID + $IDs{194} = $raw; - my @lineA = split( /\ /, $line, 10 ); - my $raw = $lineA[3]; + } - # Temperature C ID - $IDs{194} = $raw; + # End of SAS Wrapper - } + $outputAint++; + } ## end while ( defined( $outputA[$outputAint] ) ) + } ## end else [ if ( $output =~ /NVMe Log/ ) ] - # End of SAS Wrapper - - $outputAint++; - } ## end while ( defined( $outputA[$outputAint] ) ) - } ## end else [ if ( $output =~ /NVMe Log/ ) ] - - #get the selftest logs - $output = `$smartctl -l selftest $disk`; - @outputA = split( /\n/, $output ); - my @completed = grep( /Completed without error/, @outputA ); - $IDs{'completed'} = scalar @completed; - my @interrupted = grep( /Interrupted/, @outputA ); - $IDs{'interrupted'} = scalar @interrupted; - my @read_failure = grep( /read failure/, @outputA ); - $IDs{'read_failure'} = scalar @read_failure; - my @unknown_failure = grep( /unknown failure/, @outputA ); - $IDs{'unknown_failure'} = scalar @unknown_failure; - my @extended = grep( /Extended/, @outputA ); - $IDs{'extended'} = scalar @extended; - my @short = grep( /Short/, @outputA ); - $IDs{'short'} = scalar @short; - my @conveyance = grep( /Conveyance/, @outputA ); - $IDs{'conveyance'} = scalar @conveyance; - my @selective = grep( /Selective/, @outputA ); - $IDs{'selective'} = scalar @selective; - - # if we have logs, actually grab the log output - if ( $IDs{'completed'} > 0 - || $IDs{'interrupted'} > 0 - || $IDs{'read_failure'} > 0 - || $IDs{'extended'} > 0 - || $IDs{'short'} > 0 - || $IDs{'conveyance'} > 0 - || $IDs{'selective'} > 0 ) - { - my @log_lines; - push( @log_lines, @extended, @short, @conveyance, @selective ); - $IDs{'selftest_log'} = join( "\n", sort(@log_lines) ); - } ## end if ( $IDs{'completed'} > 0 || $IDs{'interrupted'...}) - - # get the drive serial number, if needed - my $disk_id = $name; - $output=`$smartctl -i $disk`; - while ( $output =~ /(?i)Serial Number:(.*)/g ) { - $IDs{'serial'} = $1; - $IDs{'serial'} =~ s/^\s+|\s+$//g; - } - if ($useSN) { - $disk_id = $IDs{'serial'}; - } + #get the selftest logs + $output = `$smartctl -l selftest $disk`; + @outputA = split( /\n/, $output ); + my @completed = grep( /Completed/, @outputA ); + $IDs{'completed'} = scalar @completed; + my @interrupted = grep( /Interrupted/, @outputA ); + $IDs{'interrupted'} = scalar @interrupted; + my @read_failure = grep( /read failure/, @outputA ); + $IDs{'read_failure'} = scalar @read_failure; + my @unknown_failure = grep( /unknown failure/, @outputA ); + $IDs{'unknown_failure'} = scalar @unknown_failure; + my @extended = grep( /\d.*\ ([Ee]xtended|[Ll]ong).*(?![Dd]uration)/, @outputA ); + $IDs{'extended'} = scalar @extended; + my @short = grep( /[Ss]hort/, @outputA ); + $IDs{'short'} = scalar @short; + my @conveyance = grep( /[Cc]onveyance/, @outputA ); + $IDs{'conveyance'} = scalar @conveyance; + my @selective = grep( /[Ss]elective/, @outputA ); + $IDs{'selective'} = scalar @selective; + my @offline = grep( /(\d|[Bb]ackground|[Ff]oreground)+\ +[Oo]ffline/, @outputA ); + $IDs{'offline'} = scalar @offline; + + # if we have logs, actually grab the log output + if ( $IDs{'completed'} > 0 + || $IDs{'interrupted'} > 0 + || $IDs{'read_failure'} > 0 + || $IDs{'extended'} > 0 + || $IDs{'short'} > 0 + || $IDs{'conveyance'} > 0 + || $IDs{'selective'} > 0 + || $IDs{'offline'} > 0 ) + { + my @headers = grep( /(Num\ +Test.*LBA| Description .*[Hh]ours)/, @outputA ); + + my @log_lines; + push( @log_lines, @extended, @short, @conveyance, @selective, @offline ); + $IDs{'selftest_log'} = join( "\n", @headers, sort(@log_lines) ); + } ## end if ( $IDs{'completed'} > 0 || $IDs{'interrupted'...}) + + # get the drive serial number, if needed + $disk_id = $name; + $output = `$smartctl -i $disk`; + # generally upper case, HP branded drives seem to report with lower case n + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $IDs{'serial'} = $1; + $IDs{'serial'} =~ s/^\s+|\s+$//g; + } + if ($useSN) { + $disk_id = $IDs{'serial'}; + } - while ( $output =~ /(?i)Model Family:(.*)/g ) { - $IDs{'model_family'} = $1; - $IDs{'model_family'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Model Family:(.*)/g ) { + $IDs{'model_family'} = $1; + $IDs{'model_family'} =~ s/^\s+|\s+$//g; + } - while ( $output =~ /(?i)Device Model:(.*)/g ) { - $IDs{'device_model'} = $1; - $IDs{'device_model'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Device Model:(.*)/g ) { + $IDs{'device_model'} = $1; + $IDs{'device_model'} =~ s/^\s+|\s+$//g; + } - while ( $output =~ /(?i)Model Number:(.*)/g ) { - $IDs{'model_number'} = $1; - $IDs{'model_number'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Model Number:(.*)/g ) { + $IDs{'model_number'} = $1; + $IDs{'model_number'} =~ s/^\s+|\s+$//g; + } - while ( $output =~ /(?i)Firmware Version:(.*)/g ) { - $IDs{'fw_version'} = $1; - $IDs{'fw_version'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Firmware Version:(.*)/g ) { + $IDs{'fw_version'} = $1; + $IDs{'fw_version'} =~ s/^\s+|\s+$//g; + } - $output = `$smartctl -H $disk`; - if ( $output =~ /SMART\ overall\-health\ self\-assessment\ test\ result\:\ PASSED/ ) { - $IDs{'health_pass'} = 1; - } + # mainly HP drives + while ( $output =~ /(?i)Vendor:(.*)/g ) { + $IDs{'vendor'} = $1; + $IDs{'vendor'} =~ s/^\s+|\s+$//g; + } + + # mainly HP drives + while ( $output =~ /(?i)Product:(.*)/g ) { + $IDs{'product'} = $1; + $IDs{'product'} =~ s/^\s+|\s+$//g; + } - $to_return->{data}{disks}{$disk_id} = \%IDs; + # mainly HP drives + while ( $output =~ /(?i)Revision:(.*)/g ) { + $IDs{'revision'} = $1; + $IDs{'revision'} =~ s/^\s+|\s+$//g; + } + + # figure out what to use for the max temp, if there is one + if ( $IDs{'190'} =~ /^\d+$/ ) { + $IDs{max_temp} = $IDs{'190'}; + } elsif ( $IDs{'194'} =~ /^\d+$/ ) { + $IDs{max_temp} = $IDs{'194'}; + } + if ( $IDs{'194'} =~ /^\d+$/ && defined( $IDs{max_temp} ) && $IDs{'194'} > $IDs{max_temp} ) { + $IDs{max_temp} = $IDs{'194'}; + } + $output = `$smartctl -H $disk`; + if ( $output =~ /SMART\ overall\-health\ self\-assessment\ test\ result\:\ PASSED/ ) { + $IDs{'health_pass'} = 1; + } elsif ( $output =~ /SMART\ Health\ Status\:\ OK/ ) { + $IDs{'health_pass'} = 1; + } + + if ( !$IDs{'health_pass'} ) { + $to_return->{data}{unhealthy}++; + } + } ## end else [ if ( $IDs{exit} != 0 ) ] + + # only bother to save this if useSN is not being used + if ( !$useSN ) { + $to_return->{data}{disks}{$disk_id} = \%IDs; + } } ## end foreach my $line (@disks) my $toReturn = $json->encode($to_return); @@ -519,14 +807,16 @@ if ( !$opts{p} ) { $toReturn = $toReturn . "\n"; } -if ($opts{Z}) { - my $compressed = encode_base64( gzip($toReturn) ); +if ( $opts{Z} ) { + my $toReturnCompressed; + gzip \$toReturn => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; if ( length($compressed) < length($toReturn) ) { - $toReturn=$compressed; + $toReturn = $compressed; } -} +} ## end if ( $opts{Z} ) if ( !$noWrite ) { open( my $writefh, ">", $cache ) or die "Can't open '" . $cache . "'"; From 1328a11f48a9ef28123f160e1d6d3786d0ae3bc7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 28 Jun 2023 08:37:37 -0500 Subject: [PATCH 251/332] various smart-v1 fixes (#477) * use $sg_int when composing lines for cciss devs * more cleanup for cciss stuff for when it does not recognize a device * use -i with smart for the cciss -C test * add in virt checking * fix regex typos for virt check --- snmp/smart-v1 | 72 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 29 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index d3b9bbdd6..9e58e1d96 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -269,22 +269,36 @@ if ( defined( $opts{g} ) ) { # second pass, putting the lines together my %current_disk; foreach my $arguments (@argumentsA) { + my $not_virt = 1; + + # check to see if we have a virtual device + my @virt_check = split( /\n/, `smartctl -i $arguments 2> /dev/null` ); + foreach my $virt_check_line (@virt_check) { + if ( $virt_check_line =~ /(?i)Product\:.*LOGICAL VOLUME/ ) { + $not_virt = 0; + } + } + my $name = $arguments; $name =~ s/ .*//; $name =~ s/\/dev\///; - if ( $found_disks_names{$name} == 0 ) { - # If no other devices, just name it after the base device. - $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; - } else { - # if more than one, start at zero and increment, apennding comma number to the base device name - if ( defined( $current_disk{$name} ) ) { - $current_disk{$name}++; + # only add it if not a virtual RAID drive + # HP RAID virtual disks will show up with very basical but totally useless smart data + if ($not_virt) { + if ( $found_disks_names{$name} == 0 ) { + # If no other devices, just name it after the base device. + $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; } else { - $current_disk{$name} = 0; + # if more than one, start at zero and increment, apennding comma number to the base device name + if ( defined( $current_disk{$name} ) ) { + $current_disk{$name}++; + } else { + $current_disk{$name} = 0; + } + $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; } - $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; - } + } ## end if ($not_virt) } ## end foreach my $arguments (@argumentsA) } ## end if ( $scan_modes->{'scan-open'} || $scan_modes...) @@ -333,25 +347,20 @@ if ( defined( $opts{g} ) ) { my $drive_count = 0; my $continue = 1; while ($continue) { - my $output = `$smartctl -A $device -d cciss,$drive_count 2> /dev/null`; + my $output = `$smartctl -i $device -d cciss,$drive_count 2> /dev/null`; if ( $? != 0 ) { $continue = 0; } else { - $continue = 0; my $add_it = 0; - # if we have smart data for this device, process it - while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g && !$continue ) { - $continue = 1; - my $id; - while ( $output =~ /(?i)Serial Number:(.*)/g ) { - $id = $1; - $id =~ s/^\s+|\s+$//g; - } - if ( defined($id) && !defined( $seen_lines->{$id} ) ) { - $add_it = 1; - $seen_lines->{$id} = 1; - } - } ## end while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g...) + my $id; + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $id = $1; + $id =~ s/^\s+|\s+$//g; + } + if ( defined($id) && !defined( $seen_lines->{$id} ) ) { + $add_it = 1; + $seen_lines->{$id} = 1; + } if ( $continue && $add_it ) { $drive_lines = $drive_lines @@ -365,8 +374,7 @@ if ( defined( $opts{g} ) ) { $drive_count++; } ## end while ($continue) } else { - my $sg_drive_int = 0; - my $drive_count = 0; + my $drive_count = 0; # count the connector lines, this will make sure failed are founded as well while ( $output =~ /(connector +\d.*box +\d.*bay +\d.*)/g ) { if ( !defined( $seen_lines->{$1} ) ) { @@ -377,10 +385,16 @@ if ( defined( $opts{g} ) ) { my $drive_int = 0; while ( $drive_int < $drive_count ) { $drive_lines - = $drive_lines . $cciss . '0-' . $drive_int . ' ' . $device . ' -d cciss,' . $drive_int . "\n"; + = $drive_lines + . $cciss + . $sg_int . '-' + . $drive_int . ' ' + . $device + . ' -d cciss,' + . $drive_int . "\n"; $drive_int++; - } + } ## end while ( $drive_int < $drive_count ) } ## end else [ if ( $? != 0 && $output eq '' && !$opts{C})] $sg_int++; From 4bd6909f99ce92439594aba87aa48d50f2b98043 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 28 Jun 2023 11:23:39 -0500 Subject: [PATCH 252/332] add the ability to run tests on all specified devices via -t for smart-v1 (#478) --- snmp/smart-v1 | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 9e58e1d96..20da1a944 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -76,6 +76,7 @@ Switches: -g Guess at the config and print it to STDOUT -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g +-t Run the specified smart self test on all the devices. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status @@ -121,9 +122,11 @@ sub main::HELP_MESSAGE { -Z GZip+Base64 compress the results. -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g +-t Run the specified smart self test on all the devices. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status + Scan Modes: - scan :: Use "--scan" with smartctl. "scan-open" will take presidence. @@ -140,7 +143,7 @@ Scan Modes: #gets the options my %opts = (); -getopts( 'ugc:pZhvCSG', \%opts ); +getopts( 'ugc:pZhvCSGt:', \%opts ); if ( $opts{h} ) { &HELP_MESSAGE; @@ -472,10 +475,61 @@ while ( defined( $configA[$configA_int] ) ) { $configA_int++; } ## end while ( defined( $configA[$configA_int] ) ) +# +# +# run the specified self test on all disks if asked +# +# +if ( defined( $opts{t} ) ) { + + # make sure we have something that atleast appears sane for the test name + my $valid_tesks = { + 'offline' => 1, + 'short' => 1, + 'long' => 1, + 'conveyance' => 1, + 'afterselect,on' => 1, + }; + if ( !defined( $valid_tesks->{ $opts{t} } ) && $opts{t} !~ /select,(\d+[\-\+]\d+|next|next\+\d+|redo\+\d+)/ ) { + print '"' . $opts{t} . "\" does not appear to be a valid test\n"; + exit 1; + } + + print "Running the SMART $opts{t} on all devices in the config...\n\n"; + + foreach my $line (@disks) { + my $disk; + my $name; + if ( $line =~ /\ / ) { + ( $name, $disk ) = split( /\ /, $line, 2 ); + } else { + $disk = $line; + $name = $line; + } + if ( $disk !~ /\// ) { + $disk = '/dev/' . $disk; + } + + print "\n------------------------------------------------------------------\nDoing " + . $smartctl . ' -t ' + . $opts{t} . ' ' + . $disk + . " ...\n\n"; + print `$smartctl -t $opts{t} $disk` . "\n"; + + } ## end foreach my $line (@disks) + + exit 0; +} ## end if ( defined( $opts{t} ) ) + #if set to 1, no cache will be written and it will be printed instead my $noWrite = 0; +# +# # if no -u, it means we are being called from snmped +# +# if ( !defined( $opts{u} ) ) { # if the cache file exists, print it, otherwise assume one is not being used if ( -f $cache ) { From 6af12e74b861004fac4be7561c56a8c0b322d4b3 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 2 Jul 2023 19:01:13 -0500 Subject: [PATCH 253/332] smart-v1: add the ability to run tests on all configured devs via the extend and fix when useSN=1 (#479) * add the ability to run tests on all specified devices via -t * properly save the results when exit is non-zero and useSN=1 --- snmp/smart-v1 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 20da1a944..0df9abd73 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -866,6 +866,8 @@ foreach my $line (@disks) { # only bother to save this if useSN is not being used if ( !$useSN ) { $to_return->{data}{disks}{$disk_id} = \%IDs; + } elsif ( $IDs{exit} == 0 ) { + $to_return->{data}{disks}{$disk_id} = \%IDs; } } ## end foreach my $line (@disks) From 4778070b90f31d6c249dbcfb181ae05911ce232c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 3 Jul 2023 15:49:18 -0500 Subject: [PATCH 254/332] smart-v1: send useSN value along in the data (#480) * send useSN along in the data * remove a extra , --- snmp/smart-v1 | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 0df9abd73..91c2710f9 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -187,13 +187,6 @@ if ( !$opts{g} ) { } } -my $to_return = { - data => { disks => {}, exit_nonzero => 0, unhealthy => 0, }, - version => 1, - error => 0, - errorString => '', -}; - # # # guess if asked @@ -550,6 +543,12 @@ if ( !defined( $opts{u} ) ) { # Process each disk # # +my $to_return = { + data => { disks => {}, exit_nonzero => 0, unhealthy => 0, useSN => $useSN }, + version => 1, + error => 0, + errorString => '', +}; foreach my $line (@disks) { my $disk; my $name; From 998e24fabcbdf2969300b47371002f6f92353297 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 4 Jul 2023 23:32:30 -0500 Subject: [PATCH 255/332] add the logsize extend (#481) * add initial logsize extend * add long chomp * pretty it and now make save the return file even if -b was not specified * errpr->error * clean up the code a bit more * more cleanup * add some more set bits * de-fuck it * add total size as well as the max and min stats between alls ets * add no_minus_d * no_minus_d fix * add .json * add log * minor logic tweaks * make no_minus_d the default * completely rework it and make it overall more stable * fix set size handling * no longer include no_minus_d and also fix date chomping * add old cache file removal and docs --- snmp/logsize | 489 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 489 insertions(+) create mode 100755 snmp/logsize diff --git a/snmp/logsize b/snmp/logsize new file mode 100755 index 000000000..cecb7ea31 --- /dev/null +++ b/snmp/logsize @@ -0,0 +1,489 @@ +#!/usr/bin/env perl + +=head1 NAME + +logsize - LinbreNMS JSON extend for getting log file size monitoring. + +=head1 SYNOPSIS + +logsize [B<-b>] [B<-f> ] + +=head1 SWITCHES + +=head2 -b + +Compress the return via GZip+Base64. + +=head2 -f + +The config file to use. + +=head1 SETUP + +Install the depends. + + # FreeBSD + pkg install p5-File-Find-Rule p5-JSON p5-TOML p5-Time-Piece p5-MIME-Base64 p5-File-Slurp p5-Statistics-Lite + # Debian + apt-get install cpanminus + cpanm File::Find::Rule JSON TOML Time::Piece MIME::Base64 File::Slurp Statistics::Lite + +Create the cache dir, by default "/var/cache/logsize_extend/". + +Either make sure SNMPD can write to the cache dir, by default "/var/cache/logsize_extend/", or +set it up in cron and make sure SNMPD can write to it. + +Then set it up in SNMPD. + + + # if running it via cron + extend logsize /usr/local/etc/snmp/extends/logsize -b + + # if using cron + extend logsize /bin/cat /var/cache/logsize_extend/extend_return + +=head1 CONFIG + +The config format used is TOML. + +Please note that variable part of log_end and log_chomp is dynamically generated at +run time only if those various are undef. log_end and log_chomp if you want to custamize +them are better placed in dir specific sections. + +In general best to leave these defaults alone. + + - .cache_dir :: The cache dir to use. + - Default :: /var/cache/logsize_extend/ + + - .log_end :: Log file ends to look for. $today_name is '%F' and + $today_name_alt1 is '%Y%m%d'. + - Default :: [ '*.log', '*.today', '*.json', '*log', + '*-$today_name', '*-$today_name_alt1' ] + + - .max_age :: How long to keep a file in the cache in days. + - Default :: 30 + + - .log_chomp :: The regexp to use for chomping the the logfiles to get the base + log file name to use for reporting. $today_name is '%F' and + $today_name_alt1 is '%Y%m%d'. + - Default :: ((\-\d\d\d\d\d\d\d\d)*\.log|\.today|\.json|\-$today_name|\-$today_name_alt1)$ + +The log specific sections resize under .set so if we want to create a set named var_log, the hash +would be .set.var_log . + + [sets.var_log] + dir="/var/log/" + +Sets inherit all the configured .log_end and the .log_chomp variables. Each set must have +the value dir defined. + + - .sets.*.dir :: The directory to look under for logs. + - Default :: undef + +So if we want to create a set named foobar that looks under /var/log/foo for files ending in foo or bar, +it would be like below. + + [sets.foobar] + dir="/var/log/foo/" + log_end=["*.foo", "*.bar"] + log_chomp="\.(foo|bar)$" + +Multiple sets may be defined. Below creates var_log, suricata, and suricata_flows. + + [sets.var_log] + dir="/var/log/" + [sets.suricata] + dir="/var/log/suricata/" + [sets.suricata_flows] + dir="/var/log/suricata/flows/current" + +=head1 RETURNED DATA + +This is in in reference to .data in the returned JSON. + + - .failes_sets :: A hash where the keys are they name of the failed set + and values are the error in question. + - .max :: Max size of all log files. + - .mean :: Mean size of all log files. + - .median :: Median size of all log files. + - .min :: Min size of all log files. + - .sets.*.files :: A hash where the keys are the names of the log files found for the current + set and the value is the size of the file. + - .sets.*.mode :: Mode size of log files in the current set. + - .sets.*.max :: Max size of log files in the current set. + - .sets.*.mean :: Mean size of log files in the current set. + - .sets.*.median :: Median size of log files in the current set. + - .sets.*.min :: Min size of log files in the current set. + - .sets.*.mode :: Mode size of log files in the current set. + - .sets.*.size :: Total size of the current set. + - .sets.*.unseen :: A list of files seen in the past 7 days but not currently present. + - .size :: Total size of all sets. + +=cut + +use warnings; +use strict; +use File::Find::Rule; +use JSON; +use Getopt::Std; +use TOML; +use Time::Piece; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Statistics::Lite qw(:all); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "LibreNMS logsize extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + print ' + +-f Path to the config file. + Default :: /usr/local/etc/logsize.conf + +-b Gzip+Base64 compress the output. +'; +} + +my $return_json = { + error => 0, + errorString => '', + version => 1, + data => { + sets => {}, + failed_sets => {}, + max => undef, + mean => undef, + median => undef, + mode => undef, + min => undef, + size => 0, + }, +}; + +# get current time and time stamp of today +my $t = localtime; +my $today_name = $t->strftime('%F'); +my $today_name_alt1 = $t->strftime('%Y%m%d'); + +#gets the options +my %opts = (); +getopts( 'f:b', \%opts ); +if ( !defined( $opts{f} ) ) { + $opts{f} = '/usr/local/etc/logsize.conf'; +} + +# if the config does not exist or is not readable, no point in continuing +if ( !-f $opts{f} ) { + $return_json->{error} = 1; + $return_json->{errorString} = $opts{f} . ' is not a file or does not eixst'; + print encode_json($return_json) . "\n"; + exit 1; +} elsif ( !-r $opts{f} ) { + $return_json->{error} = 2; + $return_json->{errorString} = $opts{f} . ' is not readable'; + print encode_json($return_json) . "\n"; + exit 2; +} + +# reads in the config +my $config; +my $err; +eval { + my $raw_toml = read_file( $opts{f} ); + ( $config, $err ) = from_toml($raw_toml); +}; +if ($@) { + $return_json->{error} = 3; + $return_json->{errorString} = $opts{f} . ' errored reading or parsing... ' . $@; + print encode_json($return_json) . "\n"; + exit 3; +} elsif ( !$config ) { + $return_json->{error} = 4; + $return_json->{errorString} = $opts{f} . ' errored parsing... ' . $err; + print encode_json($return_json) . "\n"; + exit 4; +} + +# can't do anything if there are no sets +if ( !defined( $config->{sets} ) ) { + $return_json->{error} = 5; + $return_json->{errorString} = $opts{f} . ' does not contain any defined sets'; + print encode_json($return_json) . "\n"; + exit 5; +} + +# set the default cache dir +if ( !defined( $config->{cache_dir} ) ) { + $config->{cache_dir} = '/var/cache/logsize_extend/'; +} + +# make sure we have something we can use for log end +if ( !defined( $config->{log_end} ) ) { + $config->{log_end} = [ '*.log', '*.today', '*.json', '*log', '*-' . $today_name, '*-' . $today_name_alt1 ]; +} else { + if ( ref( $config->{log_end} ) ne 'ARRAY' ) { + $return_json->{error} = 8; + $return_json->{errorString} = 'The cache_dir, "' . $config->{cache_dir} . '", is not a '; + print encode_json($return_json) . "\n"; + exit 8; + } +} + +# set the default log chomp +if ( !defined( $config->{log_chomp} ) ) { + $config->{log_chomp} + = '((\-\d\d\d\d\d\d\d\d)*\.log|\.today|\.json|\-' . $today_name . '|\-' . $today_name_alt1 . ')$'; +} + +# how long to keep a file in the cache +if ( !defined( $config->{max_age} ) ) { + $config->{max_age} = 30; +} + +# if it exists, make sure it is a directory +if ( -e $config->{cache_dir} && !-d $config->{cache_dir} ) { + $return_json->{error} = 6; + $return_json->{errorString} = 'The cache_dir, "' . $config->{cache_dir} . '", is not a '; + print encode_json($return_json) . "\n"; + exit 6; +} elsif ( !-e $config->{cache_dir} ) { + eval { mkdir( $config->{cache_dir} ) or die('failed'); }; + if ($@) { + $return_json->{error} = 7; + $return_json->{errorString} = 'The cache_dir, "' . $config->{cache_dir} . '", could not be created. '; + print encode_json($return_json) . "\n"; + exit 7; + } +} + +## +## load the cache now +## + +# gets time objects for now and a day ago +my $t_minus_1d = localtime; +my $t_minus_2d = localtime; +my $t_minus_3d = localtime; +my $t_minus_4d = localtime; +my $t_minus_5d = localtime; +my $t_minus_6d = localtime; +my $t_minus_7d = localtime; +$t_minus_1d -= 86400; +$t_minus_2d -= ( 86400 * 2 ); +$t_minus_3d -= ( 86400 * 3 ); +$t_minus_4d -= ( 86400 * 4 ); +$t_minus_5d -= ( 86400 * 5 ); +$t_minus_6d -= ( 86400 * 6 ); +$t_minus_7d -= ( 86400 * 7 ); + +my $today_cache_file = $config->{cache_dir} . '/' . $today_name; + +my $today_minus_1d_name = $t_minus_1d->strftime('%F'); +my $today_minus_2d_name = $t_minus_2d->strftime('%F'); +my $today_minus_3d_name = $t_minus_3d->strftime('%F'); +my $today_minus_4d_name = $t_minus_4d->strftime('%F'); +my $today_minus_5d_name = $t_minus_5d->strftime('%F'); +my $today_minus_6d_name = $t_minus_6d->strftime('%F'); +my $today_minus_7d_name = $t_minus_7d->strftime('%F'); + +my $minus_d_hash = { + today_minus_1d_file => $config->{cache_dir} . '/' . $today_minus_1d_name, + today_minus_2d_file => $config->{cache_dir} . '/' . $today_minus_2d_name, + today_minus_3d_file => $config->{cache_dir} . '/' . $today_minus_3d_name, + today_minus_4d_file => $config->{cache_dir} . '/' . $today_minus_4d_name, + today_minus_5d_file => $config->{cache_dir} . '/' . $today_minus_5d_name, + today_minus_6d_file => $config->{cache_dir} . '/' . $today_minus_6d_name, + today_minus_7d_file => $config->{cache_dir} . '/' . $today_minus_7d_name, +}; + +my $today_cache = { sets => {} }; + +my $today_minus_cache = {}; +my @minus_d = ( '1d', '2d', '3d', '4d', '5d', '6d', '7d' ); +foreach my $d (@minus_d) { + eval { $today_minus_cache->{$d} = decode_json( read_file( $minus_d_hash->{ 'today_minus_' . $d . '_file' } ) ); }; + if ($@) { + $today_minus_cache->{$d} = { sets => {} }; + } +} + +## +## process each set +## +my @sets = keys( %{ $config->{sets} } ); +my $found_sets = 0; +my @set_sizes; +foreach my $set (@sets) { + + # if any set fails, add it to the list of failed sets + eval { + if ( ref( $config->{sets}{$set} ) ne 'HASH' ) { + die( 'set "' . $set . '" is a ' . ref( $config->{sets}{$set} ) . ' and not a HASH' ); + } + if ( !defined( $config->{sets}{$set}{dir} ) ) { + die( 'set "' . $set . '" has no directory specified' ); + } + + if ( !defined( $config->{sets}{$set}{log_end} ) ) { + $config->{sets}{$set}{log_end} = $config->{log_end}; + } + + if ( !defined( $config->{sets}{$set}{log_chomp} ) ) { + $config->{sets}{$set}{log_chomp} = $config->{log_chomp}; + } + my $chomp = $config->{sets}{$set}{log_chomp}; + + my @files = File::Find::Rule->canonpath()->maxdepth(1)->file()->name( @{ $config->{sets}{$set}{log_end} } ) + ->in( $config->{sets}{$set}{dir} ); + + $return_json->{data}{sets}{$set} = { + files => {}, + max => undef, + mean => undef, + median => undef, + mode => undef, + min => undef, + size => 0, + unseen => [], + }; + + $today_cache->{sets}{$set}{files} = {}; + + # will later be used for regexp for chomping the start of the full path + my $quoted_dir = quotemeta( $config->{sets}{$set}{dir} ); + + my %m_times; + my %seen; + my %log_sizes; # make sure we don't have any twice + foreach my $log (@files) { + my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat($log); + + $log =~ s/^$quoted_dir//; + $log =~ s/^\///; + $log =~ s/$chomp//; + + # if we find a log twice, make sure it is the new one + if ( !defined( $m_times{$log} ) || $mtime > $m_times{$log} ) { + $seen{$log} = 1; + $m_times{$log} = $mtime; + $log_sizes{$log} = $size; + $return_json->{data}{sets}{$set}{files}{$log} = $size; + + # save the basic info for currently + $today_cache->{sets}{$set}{files}{$log} = { + dev => $dev, + ino => $ino, + rdev => $rdev, + size => $size, + mode => $mode, + nlink => $nlink, + uid => $uid, + gid => $gid, + atime => $atime, + mtime => $mtime, + ctime => $ctime, + blksize => $blksize, + blocks => $blocks + }; + + } ## end if ( !defined( $m_times{$log} ) || $mtime ...) + } ## end foreach my $log (@files) + + # compute the stats for log sizes + my @size_keys = keys(%log_sizes); + my @sizes; + foreach my $item (@size_keys) { + push( @sizes, $return_json->{data}{sets}{$set}{files}{$item} ); + } + $return_json->{data}{sets}{$set}{max} = max(@sizes); + $return_json->{data}{sets}{$set}{mean} = mean(@sizes); + $return_json->{data}{sets}{$set}{median} = median(@sizes); + $return_json->{data}{sets}{$set}{mode} = mode(@sizes); + $return_json->{data}{sets}{$set}{min} = min(@sizes); + $return_json->{data}{sets}{$set}{size} = sum(@sizes); + + push( @set_sizes, $return_json->{data}{sets}{$set}{size} ); + + # looks for missing files and adds them to unseen + my %unseen; + foreach my $d (@minus_d) { + my @old_logs = keys( %{ $today_minus_cache->{$d}{sets}{$set}{files} } ); + foreach my $item (@old_logs) { + if ( !defined( $return_json->{data}{sets}{$set}{files}{$item} ) && !defined( $unseen{$item} ) ) { + $unseen{$item} = 1; + push( @{ $return_json->{data}{sets}{$set}{unseen} }, $item ); + } + + } + } ## end foreach my $d (@minus_d) + }; + + # if the above died, add it to a list of failed sets + if ($@) { + $return_json->{data}{failed_sets}{$set} = $@; + } + + $found_sets++; +} ## end foreach my $set (@sets) + +# compute the over all stats +$return_json->{data}{max} = max(@set_sizes); +$return_json->{data}{mean} = mean(@set_sizes); +$return_json->{data}{median} = median(@set_sizes); +$return_json->{data}{mode} = mode(@set_sizes); +$return_json->{data}{min} = min(@set_sizes); +$return_json->{data}{size} = sum(@set_sizes); + +# if this is not atleast one, then no sets are defined, even if the hash exists +if ( $found_sets < 1 ) { + $return_json->{error} = 8; + $return_json->{errorString} = $opts{f} . ' lacks defined log sets'; + print encode_json($return_json) . "\n"; + exit 8; +} + +## +## encode the return and print it +## +my $return_string = encode_json($return_json) . "\n"; +eval { write_file( $config->{cache_dir} . "/extend_raw", $return_string ); }; +if ( !$opts{b} ) { + eval { write_file( $config->{cache_dir} . "/extend_return", $return_string ); }; + print $return_string; +} else { + my $toReturnCompressed; + gzip \$return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) > length($return_string) ) { + eval { write_file( $config->{cache_dir} . "/extend_return", $return_string ); }; + print $return_string; + } else { + eval { write_file( $config->{cache_dir} . "/extend_return", $compressed ); }; + print $compressed; + } +} ## end else [ if ( !$opts{b} ) ] + +## +## save the cache +## +eval { write_file( $today_cache_file, encode_json($today_cache) . "\n" ); }; + +## +## remove old cache files +## +my $older_than = $t->epoch - ( $config->{max_age} * 86400 ); +my @old_cache_files + = File::Find::Rule->canonpath()->maxdepth(1)->file()->mtime( '<' . $older_than )->in( $config->{cache_dir} ); + +#use Data::Dumper; print Dumper(@old_cache_files); +foreach my $old_file (@old_cache_files) { + unlink($old_file); +} From 4e78072daf24066ec9d176d299f0a364d205f975 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 6 Jul 2023 13:27:56 -0500 Subject: [PATCH 256/332] smart-v1: add handling unknown cciss devs (#482) * add -u option * add a small fix for when smart fails and useSN is false * some more possible cciss error handling * rename -u for with guess to -U to avoid conflict * add U to getopts --- snmp/smart-v1 | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 91c2710f9..fab2af3bd 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -77,6 +77,7 @@ Switches: -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g -t Run the specified smart self test on all the devices. +-U When calling cciss_vol_status, call it with -u. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status @@ -89,7 +90,8 @@ Guess Modes: - cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, and then optionally checking for disks via smrtctl if -C is given. Should be noted - though that -C will not find drives that are currently missing/failed. + though that -C will not find drives that are currently missing/failed. If -U is given, + cciss_vol_status will be called with -u. =cut @@ -111,7 +113,7 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.2.0\n"; + print "SMART SNMP extend 0.3.0\n"; } sub main::HELP_MESSAGE { @@ -123,6 +125,7 @@ sub main::HELP_MESSAGE { -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g -t Run the specified smart self test on all the devices. +-U When calling cciss_vol_status, call it with -u. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status @@ -136,14 +139,15 @@ Scan Modes: - cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, and then optionally checking for disks via smrtctl if -C is given. Should be noted - though that -C will not find drives that are currently missing/failed. + though that -C will not find drives that are currently missing/failed. If -U is given, + cciss_vol_status will be called with -u. '; } ## end sub main::HELP_MESSAGE #gets the options my %opts = (); -getopts( 'ugc:pZhvCSGt:', \%opts ); +getopts( 'ugc:pZhvCSGt:U', \%opts ); if ( $opts{h} ) { &HELP_MESSAGE; @@ -314,6 +318,11 @@ if ( defined( $opts{g} ) ) { $cciss = 'sg'; } + my $uarg = ''; + if ( $opts{U} ) { + $uarg = '-u'; + } + # generate the initial device path that will be checked my $sg_int = 0; my $device = '/dev/' . $cciss . $sg_int; @@ -335,7 +344,7 @@ if ( defined( $opts{g} ) ) { } ## end if ( -e $device ) my $seen_lines = {}; while ( -e $device && $sg_process ) { - my $output = `cciss_vol_status -V $device 2> /dev/null`; + my $output = `cciss_vol_status -V $uarg $device 2> /dev/null`; if ( $? != 0 && $output eq '' && !$opts{C} ) { # just empty here as we just want to skip it if it fails and there is no C # warning is above @@ -561,6 +570,7 @@ foreach my $line (@disks) { if ( $disk !~ /\// ) { $disk = '/dev/' . $disk; } + my $output = `$smartctl -A $disk`; my %IDs = ( '5' => 'null', @@ -865,9 +875,16 @@ foreach my $line (@disks) { # only bother to save this if useSN is not being used if ( !$useSN ) { $to_return->{data}{disks}{$disk_id} = \%IDs; - } elsif ( $IDs{exit} == 0 ) { + } elsif ( $IDs{exit} == 0 && defined($disk_id) ) { $to_return->{data}{disks}{$disk_id} = \%IDs; } + + # smartctl will in some cases exit zero when it can't pull data for cciss + # so if we get a zero exit, but no serial then it means something errored + # and the device is likely dead + if ( $IDs{exit} == 0 && !defined( $IDs{serial} ) ) { + $to_return->{data}{unhealthy}++; + } } ## end foreach my $line (@disks) my $toReturn = $json->encode($to_return); From f846574884e556fa3f2c84fb805886ec0b0845c7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 6 Jul 2023 16:36:29 -0500 Subject: [PATCH 257/332] smart-v1: fix handling of cciss when it a disk is being replaced (#483) * only add connectors that have not been seen for cciss * rework seen logic to also have a ignore logic as well for cciss * add a comment explaining why it is being ignored * minor version bump --- snmp/smart-v1 | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index fab2af3bd..545282b99 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -113,7 +113,7 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.3.0\n"; + print "SMART SNMP extend 0.3.1\n"; } sub main::HELP_MESSAGE { @@ -342,7 +342,8 @@ if ( defined( $opts{g} ) ) { . "' is failing\n"; } ## end if ( $? != 0 && !$opts{C} ) } ## end if ( -e $device ) - my $seen_lines = {}; + my $seen_lines = {}; + my $ignore_lines = {}; while ( -e $device && $sg_process ) { my $output = `cciss_vol_status -V $uarg $device 2> /dev/null`; if ( $? != 0 && $output eq '' && !$opts{C} ) { @@ -381,12 +382,25 @@ if ( defined( $opts{g} ) ) { } else { my $drive_count = 0; # count the connector lines, this will make sure failed are founded as well - while ( $output =~ /(connector +\d.*box +\d.*bay +\d.*)/g ) { - if ( !defined( $seen_lines->{$1} ) ) { - $seen_lines->{$1} = 1; + my $seen_conectors = {}; + while ( $output =~ /(connector +\d+[IA]\ +box +\d+\ +bay +\d+.*)/g ) { + my $cciss_drive_line = $1; + my $connector = $cciss_drive_line; + $connector =~ s/(.*\ bay +\d+).*/$1/; + if ( !defined( $seen_lines->{$cciss_drive_line} ) + && !defined( $seen_conectors->{$connector} ) + && !defined( $ignore_lines->{$cciss_drive_line} ) ) + { + $seen_lines->{$cciss_drive_line} = 1; + $seen_conectors->{$connector} = 1; $drive_count++; + } else { + # going to be a connector we've already seen + # which will happen when it is processing replacement drives + # so save this as a device to ignore + $ignore_lines->{$cciss_drive_line} = 1; } - } + } ## end while ( $output =~ /(connector +\d+[IA]\ +box +\d+\ +bay +\d+.*)/g) my $drive_int = 0; while ( $drive_int < $drive_count ) { $drive_lines From 2c6ae8b659d2e7a4270f3f66bc7bc76fb8c4f49a Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 2 Aug 2023 23:21:39 +0200 Subject: [PATCH 258/332] check for puppet v7 summary file (#485) --- snmp/puppet_agent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index 8afa87d1e..a44683451 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -21,6 +21,7 @@ summary_files = [ + "/var/cache/puppet/public/last_run_summary.yaml", "/var/cache/puppet/state/last_run_summary.yaml", "/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml", "/opt/puppetlabs/puppet/public/last_run_summary.yaml", From 3af477845fc7ffbf9a7c182363d05a038400a008 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 21 Sep 2023 23:46:40 -0500 Subject: [PATCH 259/332] add new dhcp extend (#488) --- snmp/dhcp | 319 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 319 insertions(+) create mode 100755 snmp/dhcp diff --git a/snmp/dhcp b/snmp/dhcp new file mode 100755 index 000000000..67ad5dc2f --- /dev/null +++ b/snmp/dhcp @@ -0,0 +1,319 @@ +#!/usr/bin/env perl +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=head1 NAME + +dhcp - LibreNMS ISC-DHCPD stats extend + +=head1 SYNOPSIS + +dhcp [B<-Z>] [B<-d>] [B<-p>] [B<-l> ] + +=head1 FLAGS + +=head2 -l + +Path to the lease file. + +=head2 -Z + +Enable GZip+Base64 compression. + +=head2 -d + +Do not de-dup. + +This is done via making sure the combination of UID, CLTT, IP, HW address, +client hostname, and state are unique. + +=head1 Return JSON Data Hash + + - .all_networks.cur :: Current leases for all networks + - .all_networks.max :: Max possible leases for all networks + - .all_networks.percent :: Percent of total pool usage. + + - .networks.[].cur :: Current leases for the network. + - .networks.[].max :: Max possible leases for thenetworks + - .networks.[].network :: Subnet of the network. + - .networks.[].percent :: Percent of network usage. + + - .pools.[].cur :: Current leases for the pool. + - .pools.[].max :: Max possible leases for pool. + - .pools.[].first_ip :: First IP of the pool. + - .pools.[].last_ip :: Last IP of the pool. + - .pools.[].percent :: Percent of pool usage. + + - .found_leases.[].client_hostname :: Hostname the client passed during the request. + - .found_leases.[].cltt :: The CLTT for the requist. + - .found_leases.[].ends :: Unix time of of when the lease ends. + - .found_leases.[].hw_address :: Hardware address for the client that made the request. + - .found_leases.[].ip :: IP address of the client that made the request. + - .found_leases.[].starts :: Unix time of of when the lease starts. + - .found_leases.[].state :: State of the lease. + - .found_leases.[].uid :: UID passed during the request. + +=cut + +use strict; +use warnings; +use Getopt::Std; +use JSON -convert_blessed_universally; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Net::ISC::DHCPd::Leases; + +my %opts; +getopts( 'l:Zdp', \%opts ); + +if ( !defined( $opts{l} ) ) { + # if freebsd, set it to the default path as used by the version installed via ports + # + # additional elsifs should be added as they become known, but default works for most Linux distros + if ( $^O eq 'freebsd' ) { + $opts{l} = '/var/db/dhcpd/dhcpd.leases'; + } else { + $opts{l} = '/var/lib/dhcpd/dhcpd.leases'; + } +} ## end if ( !defined( $opts{l} ) ) + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "LibreNMS ISC-DHCPD extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + print ' +-l Path to the lease file. +-Z Enable GZip+Base64 compression. +-d Do not de-dup. +'; + + exit; +} + +my $to_return = { + data => { + lease_file => $opts{l}, + found_leases => [], + leases => { + abandoned => 0, + active => 0, + backup => 0, + bootp => 0, + expired => 0, + free => 0, + released => 0, + reset => 0, + total => 0, + }, + networks => [], + pools => [], + all_networks => {}, + }, + version => 3, + error => 0, + errorString => '', +}; + +if ( !-f $opts{l} && !-r $opts{l} ) { + $to_return->{error} = 2; + $to_return->{errorString} = '"' . $opts{l} . '" does not exist, is not a file, or is not readable'; + print decode_json($to_return) . "\n"; + exit; +} + +my $found_leases = {}; + +my $leases; +eval { + my $leases_obj = Net::ISC::DHCPd::Leases->new( file => $opts{l} ); + $leases_obj->parse; + $leases = $leases_obj->leases; +}; +if ($@) { + $to_return->{error} = 1; + $to_return->{errorString} = 'Reading leases failed... ' . $@; + print decode_json($to_return) . "\n"; + exit; +} + +use Data::Dumper; + +foreach my $lease ( @{$leases} ) { + if ( !defined( $lease->{uid} ) ) { + $lease->{uid} = ''; + } + if ( !defined( $lease->{vendor_class_identifier} ) ) { + $lease->{vendor_class_identifier} = ''; + } + if ( !defined( $lease->{cltt} ) ) { + $lease->{cltt} = ''; + } + if ( !defined( $lease->{state} ) ) { + $lease->{state} = ''; + } + if ( !defined( $lease->{ip_address} ) ) { + $lease->{ip_address} = ''; + } + if ( !defined( $lease->{hardware_address} ) ) { + $lease->{hardware_address} = ''; + } + if ( !defined( $lease->{client_hostname} ) ) { + $lease->{client_hostname} = ''; + } +} ## end foreach my $lease ( @{$leases} ) + +# dedup or copy lease info as is +if ( !$opts{d} ) { + foreach my $lease ( @{$leases} ) { + $found_leases->{ $lease->{uid} + . $lease->{cltt} + . $lease->{uid} + . $lease->{ip_address} + . $lease->{client_hostname} + . $lease->{state} + . $lease->{hardware_address} } = $lease; + } + foreach my $lease_key ( keys( %{$found_leases} ) ) { + push( + @{ $to_return->{data}{found_leases} }, + { + uid => $found_leases->{$lease_key}{uid}, + cltt => $found_leases->{$lease_key}{cltt}, + state => $found_leases->{$lease_key}{state}, + ip => $found_leases->{$lease_key}{ip_address}, + hw_address => $found_leases->{$lease_key}{hardware_address}, + starts => $found_leases->{$lease_key}{starts}, + ends => $found_leases->{$lease_key}{ends}, + client_hostname => $found_leases->{$lease_key}{client_hostname}, + } + ); + } ## end foreach my $lease_key ( keys( %{$found_leases} ...)) +} else { + foreach my $lease ( @{$leases} ) { + push( + @{ $to_return->{data}{found_leases} }, + { + uid => $lease->{uid}, + cltt => $lease->{cltt}, + state => $lease->{state}, + ip => $lease->{ip_address}, + hw_address => $lease->{hardware_address}, + starts => $lease->{starts}, + ends => $lease->{ends}, + client_hostname => $lease->{client_hostname}, + } + ); + } ## end foreach my $lease ( @{$leases} ) +} ## end else [ if ( !$opts{d} ) ] + +#print Dumper($leases); + +# total the lease info types +foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { + $to_return->{data}{leases}{total}++; + if ( $lease->{state} eq 'free' ) { + $to_return->{data}{leases}{free}++; + } elsif ( $lease->{state} eq 'abandoned' ) { + $to_return->{data}{leases}{abandoned}++; + } elsif ( $lease->{state} eq 'active' ) { + $to_return->{data}{leases}{active}++; + } elsif ( $lease->{state} eq 'backup' ) { + $to_return->{data}{leases}{backup}++; + } elsif ( $lease->{state} eq 'bootp' ) { + $to_return->{data}{leases}{bootp}++; + } elsif ( $lease->{state} eq 'expired' ) { + $to_return->{data}{leases}{expired}++; + } elsif ( $lease->{state} eq 'released' ) { + $to_return->{data}{leases}{released}++; + } elsif ( $lease->{state} eq 'reset' ) { + $to_return->{data}{leases}{reset}++; + } +} ## end foreach my $lease ( @{ $to_return->{data}{found_leases...}}) + +my $cmd_output = `dhcpd-pools -s i -A -l $opts{l} 2> /dev/null`; +my $category = ''; +for my $line ( split( /\n/, $cmd_output ) ) { + $line =~ s/^ +//; + my @line_split = split( /[\ \t]+/, $line ); + if ( $line =~ /^Ranges\:/ ) { + $category = 'pools'; + } elsif ( $line =~ /^Shared\ networks\:/ ) { + $category = 'networks'; + } elsif ( $line =~ /^Sum\ of\ all\ ranges\:/ ) { + $category = 'all_networks'; + } elsif ( $category eq 'pools' && defined( $line_split[4] ) && $line_split[4] =~ /^\d+$/ ) { + push( + @{ $to_return->{data}{pools} }, + { + first_ip => $line_split[1], + last_ip => $line_split[3], + max => $line_split[4], + cur => $line_split[5], + percent => $line_split[6], + } + ); + } elsif ( $category eq 'networks' + && defined( $line_split[1] ) + && $line_split[1] =~ /^\d+$/ + && defined( $line_split[2] ) + && $line_split[2] =~ /^\d+$/ ) + { + push( + @{ $to_return->{data}{networks} }, + { + network => $line_split[0], + max => $line_split[1], + cur => $line_split[2], + percent => $line_split[3], + } + ); + } elsif ( $category eq 'all_networks' ) { + $to_return->{data}{all_networks}{max} = $line_split[2]; + $to_return->{data}{all_networks}{cur} = $line_split[3]; + $to_return->{data}{all_networks}{percent} = $line_split[4]; + } +} ## end for my $line ( split( /\n/, $cmd_output ) ) + +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{p} ) { + $json->pretty; +} +my $toReturn = $json->encode($to_return) . "\n"; +if ( $opts{Z} ) { + my $toReturnCompressed; + gzip \$toReturn => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) < length($toReturn) ) { + $toReturn = $compressed; + } +} ## end if ( $opts{Z} ) + +print $toReturn; + +exit; + From 00a68dc6ed2f2ccfe15d9c927204710aecf24834 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 22 Sep 2023 01:51:14 -0500 Subject: [PATCH 260/332] base64 encode possible fields for DHCP that may possibly include binary data (#489) * base64 encode uid and client hostname for dhcp lease info * cleanup vendor_class_identifier as well and note the changes --- snmp/dhcp | 75 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 18 deletions(-) diff --git a/snmp/dhcp b/snmp/dhcp index 67ad5dc2f..97e4e8e6a 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -72,6 +72,14 @@ client hostname, and state are unique. - .found_leases.[].starts :: Unix time of of when the lease starts. - .found_leases.[].state :: State of the lease. - .found_leases.[].uid :: UID passed during the request. + - .found_leases.[].vendor_class_identifier :: Vendor class identifier passed during the request. + +The following are Base64 encoded as they may include binary that breaks either SNMP or +the PHP JSON decoder. + + - .found_leases.[].vendor_class_identifier + - .found_leases.[].uid :: UID passed during the request. + - .found_leases.[].vendor_class_identifier =cut @@ -100,7 +108,7 @@ if ( !defined( $opts{l} ) ) { $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "LibreNMS ISC-DHCPD extend 0.0.1\n"; + print "LibreNMS ISC-DHCPD extend 0.0.2\n"; } sub main::HELP_MESSAGE { @@ -197,33 +205,65 @@ if ( !$opts{d} ) { . $lease->{hardware_address} } = $lease; } foreach my $lease_key ( keys( %{$found_leases} ) ) { + my $uid = $found_leases->{$lease_key}{uid}; + if ( $uid ne '' ) { + $uid = encode_base64($uid); + chomp($uid); + } + my $client_hostname = $found_leases->{$lease_key}{client_hostname}; + if ( $client_hostname ne '' ) { + $client_hostname = encode_base64($client_hostname); + chomp($client_hostname); + } + my $vendor_class_identifier = $found_leases->{$lease_key}{vendor_class_identifier}; + if ( $vendor_class_identifier ne '' ) { + $vendor_class_identifier = encode_base64($vendor_class_identifier); + chomp($vendor_class_identifier); + } push( @{ $to_return->{data}{found_leases} }, { - uid => $found_leases->{$lease_key}{uid}, - cltt => $found_leases->{$lease_key}{cltt}, - state => $found_leases->{$lease_key}{state}, - ip => $found_leases->{$lease_key}{ip_address}, - hw_address => $found_leases->{$lease_key}{hardware_address}, - starts => $found_leases->{$lease_key}{starts}, - ends => $found_leases->{$lease_key}{ends}, - client_hostname => $found_leases->{$lease_key}{client_hostname}, + uid => $uid, + cltt => $found_leases->{$lease_key}{cltt}, + state => $found_leases->{$lease_key}{state}, + ip => $found_leases->{$lease_key}{ip_address}, + hw_address => $found_leases->{$lease_key}{hardware_address}, + starts => $found_leases->{$lease_key}{starts}, + ends => $found_leases->{$lease_key}{ends}, + client_hostname => $client_hostname, + vendor_class_identifier => $vendor_class_identifier, } ); } ## end foreach my $lease_key ( keys( %{$found_leases} ...)) } else { foreach my $lease ( @{$leases} ) { + my $uid = $lease->{uid}; + if ( $uid ne '' ) { + $uid = encode_base64($uid); + chomp($uid); + } + my $client_hostname = $lease->{client_hostname}; + if ( $client_hostname ne '' ) { + $client_hostname = encode_base64($client_hostname); + chomp($client_hostname); + } + my $vendor_class_identifier = $lease->{vendor_class_identifier}; + if ( $vendor_class_identifier ne '' ) { + $vendor_class_identifier = encode_base64($vendor_class_identifier); + chomp($vendor_class_identifier); + } push( @{ $to_return->{data}{found_leases} }, { - uid => $lease->{uid}, - cltt => $lease->{cltt}, - state => $lease->{state}, - ip => $lease->{ip_address}, - hw_address => $lease->{hardware_address}, - starts => $lease->{starts}, - ends => $lease->{ends}, - client_hostname => $lease->{client_hostname}, + uid => $uid, + cltt => $lease->{cltt}, + state => $lease->{state}, + ip => $lease->{ip_address}, + hw_address => $lease->{hardware_address}, + starts => $lease->{starts}, + ends => $lease->{ends}, + client_hostname => $client_hostname, + vendor_class_identifier => $vendor_class_identifier, } ); } ## end foreach my $lease ( @{$leases} ) @@ -316,4 +356,3 @@ if ( $opts{Z} ) { print $toReturn; exit; - From 22bc2cedf5908aa1fa719a8cec5a9fe6924c2c89 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 24 Sep 2023 13:49:07 -0500 Subject: [PATCH 261/332] for dhcp dedup, don't use uid twice and don't use cltt (#490) --- snmp/dhcp | 2 -- 1 file changed, 2 deletions(-) diff --git a/snmp/dhcp b/snmp/dhcp index 97e4e8e6a..3a8da3939 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -197,8 +197,6 @@ foreach my $lease ( @{$leases} ) { if ( !$opts{d} ) { foreach my $lease ( @{$leases} ) { $found_leases->{ $lease->{uid} - . $lease->{cltt} - . $lease->{uid} . $lease->{ip_address} . $lease->{client_hostname} . $lease->{state} From 38d57626be3802da3f0027ecba4203cdfa042e90 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 1 Oct 2023 12:17:01 -0500 Subject: [PATCH 262/332] dhcp extend nolonger requires dhcpd-pools (#491) * more work on implementing dhcp config parsing * no longer require non-perl depends --- snmp/dhcp | 282 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 231 insertions(+), 51 deletions(-) diff --git a/snmp/dhcp b/snmp/dhcp index 3a8da3939..66466c2d8 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -47,6 +47,17 @@ Do not de-dup. This is done via making sure the combination of UID, CLTT, IP, HW address, client hostname, and state are unique. +=head2 -n + +If no shared networks are defined, what to use for generating the network names +for reporting purposes. + + - cidr :: Use the cidr for the defined subnets. + + - cidr+range :: Use the cidr+range for the defined subnets. + +Default is 'cidr'. + =head1 Return JSON Data Hash - .all_networks.cur :: Current leases for all networks @@ -54,15 +65,19 @@ client hostname, and state are unique. - .all_networks.percent :: Percent of total pool usage. - .networks.[].cur :: Current leases for the network. - - .networks.[].max :: Max possible leases for thenetworks - - .networks.[].network :: Subnet of the network. + - .networks.[].max :: Max possible leases for the networks + - .networks.[].network :: Name of the network. + - .networks.[].subnets :: Array of subnets on the network. - .networks.[].percent :: Percent of network usage. + - .networks.[].pools :: Pool ranges used. - .pools.[].cur :: Current leases for the pool. - .pools.[].max :: Max possible leases for pool. - .pools.[].first_ip :: First IP of the pool. - .pools.[].last_ip :: Last IP of the pool. - .pools.[].percent :: Percent of pool usage. + - .pools.[].cidr :: CIDR for this subnet. + - .pools.[].$option :: Additional possible DHCP subnet option. - .found_leases.[].client_hostname :: Hostname the client passed during the request. - .found_leases.[].cltt :: The CLTT for the requist. @@ -90,9 +105,18 @@ use JSON -convert_blessed_universally; use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Net::ISC::DHCPd::Leases; +use Net::ISC::DHCPd::Config; my %opts; -getopts( 'l:Zdp', \%opts ); +getopts( 'l:Zdpc:n:', \%opts ); + +if ( !defined( $opts{n} ) ) { + $opts{n} = 'cidr'; +} else { + if ( $opts{n} ne 'cidr' && $opts{n} ne 'cidr+range' ) { + $opts{n} = 'cidr'; + } +} if ( !defined( $opts{l} ) ) { # if freebsd, set it to the default path as used by the version installed via ports @@ -105,6 +129,17 @@ if ( !defined( $opts{l} ) ) { } } ## end if ( !defined( $opts{l} ) ) +if ( !defined( $opts{c} ) ) { + # if freebsd, set it to the default path as used by the version installed via ports + # + # additional elsifs should be added as they become known, but default works for most Linux distros + if ( $^O eq 'freebsd' ) { + $opts{c} = '/usr/local/etc/dhcpd.conf'; + } else { + $opts{c} = '/etc/dhcp/dhcpd.conf'; + } +} ## end if ( !defined( $opts{c} ) ) + $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { @@ -114,12 +149,13 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print ' -l Path to the lease file. +-c Path to the dhcpd.conf file. -Z Enable GZip+Base64 compression. -d Do not de-dup. '; exit; -} +} ## end sub main::HELP_MESSAGE my $to_return = { data => { @@ -138,7 +174,7 @@ my $to_return = { }, networks => [], pools => [], - all_networks => {}, + all_networks => { cur => 0, max => 0, percent => 0, }, }, version => 3, error => 0, @@ -152,8 +188,14 @@ if ( !-f $opts{l} && !-r $opts{l} ) { exit; } +# hash for storing found leases for later deduping my $found_leases = {}; +## +## +## read in the leases +## +## my $leases; eval { my $leases_obj = Net::ISC::DHCPd::Leases->new( file => $opts{l} ); @@ -167,8 +209,11 @@ if ($@) { exit; } -use Data::Dumper; - +## +## +## process found leases +## +## foreach my $lease ( @{$leases} ) { if ( !defined( $lease->{uid} ) ) { $lease->{uid} = ''; @@ -193,7 +238,11 @@ foreach my $lease ( @{$leases} ) { } } ## end foreach my $lease ( @{$leases} ) -# dedup or copy lease info as is +## +## +## dedup or copy lease info as is +## +## if ( !$opts{d} ) { foreach my $lease ( @{$leases} ) { $found_leases->{ $lease->{uid} @@ -267,9 +316,11 @@ if ( !$opts{d} ) { } ## end foreach my $lease ( @{$leases} ) } ## end else [ if ( !$opts{d} ) ] -#print Dumper($leases); - -# total the lease info types +## +## +## total the lease info types +## +## foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { $to_return->{data}{leases}{total}++; if ( $lease->{state} eq 'free' ) { @@ -291,50 +342,179 @@ foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { } } ## end foreach my $lease ( @{ $to_return->{data}{found_leases...}}) -my $cmd_output = `dhcpd-pools -s i -A -l $opts{l} 2> /dev/null`; -my $category = ''; -for my $line ( split( /\n/, $cmd_output ) ) { - $line =~ s/^ +//; - my @line_split = split( /[\ \t]+/, $line ); - if ( $line =~ /^Ranges\:/ ) { - $category = 'pools'; - } elsif ( $line =~ /^Shared\ networks\:/ ) { - $category = 'networks'; - } elsif ( $line =~ /^Sum\ of\ all\ ranges\:/ ) { - $category = 'all_networks'; - } elsif ( $category eq 'pools' && defined( $line_split[4] ) && $line_split[4] =~ /^\d+$/ ) { - push( - @{ $to_return->{data}{pools} }, - { - first_ip => $line_split[1], - last_ip => $line_split[3], - max => $line_split[4], - cur => $line_split[5], - percent => $line_split[6], +## +## +## read in the config +## +## +my $config_obj; +eval { + $config_obj = Net::ISC::DHCPd::Config->new( file => $opts{c} ); + $config_obj->parse; +}; +if ($@) { + $to_return->{error} = 3; + $to_return->{errorString} = 'Reading leases failed... ' . $@; + print decode_json($to_return) . "\n"; + exit; +} + +## +## +## process found subnets +## +## +my $pools = {}; +my @subnets = $config_obj->subnets; +foreach my $subnet (@subnets) { + my @ranges = $subnet->ranges; + my $subnet_cidr_obj = $subnet->address; + my $subnet_cidr = $subnet_cidr_obj->addr; + foreach my $range (@ranges) { + my $lower = $range->lower; + my $upper = $range->upper; + my $pool_name = $lower->addr . '-' . $upper->addr; + my $subnet_addr = $subnet_cidr_obj->addr; + my $subnet_cidr = $subnet_cidr_obj->cidr; + my $max = $upper->bigint - $lower->bigint; + $pools->{$pool_name} = { + first_ip => $lower->addr, + lower => $lower, + last_ip => $upper->addr, + upper => $upper, + subnet => $subnet_addr, + cidr => $subnet_cidr, + max => $max, + cur => 0, + percent => 0, + }; + my @options = $subnet->options; + + foreach my $option (@options) { + $pools->{$pool_name}{ $option->name } = $option->value; + } + } ## end foreach my $range (@ranges) +} ## end foreach my $subnet (@subnets) + +## +## +## process found networks and subnets contained on in +## +## +my $networks = {}; +my @found_subnets = $config_obj->sharednetworks; +my $undef_network_name_int = 0; +foreach my $network (@found_subnets) { + my $name = $network->name; + if ( !defined($name) || $name eq '' ) { + $name = 'undef' . $undef_network_name_int; + $undef_network_name_int++; + } + if ( !defined( $networks->{$name} ) ) { + $networks->{$name} = []; + } + + @subnets = $network->subnets; + foreach my $subnet (@subnets) { + my @ranges = $subnet->ranges; + my $subnet_cidr_obj = $subnet->address; + my $subnet_cidr = $subnet_cidr_obj->addr; + foreach my $range (@ranges) { + my $lower = $range->lower; + my $upper = $range->upper; + my $pool_name = $lower->addr . '-' . $upper->addr; + my $max = $upper->bigint - $lower->bigint; + $pools->{$pool_name} = { + first_ip => $lower->addr, + lower => $lower, + last_ip => $upper->addr, + upper => $upper, + subnet => $subnet_cidr, + max => $max, + cur => 0, + percent => 0, + }; + my @options = $subnet->options; + foreach my $option (@options) { + $pools->{$pool_name}{ $option->name } = $option->value; } - ); - } elsif ( $category eq 'networks' - && defined( $line_split[1] ) - && $line_split[1] =~ /^\d+$/ - && defined( $line_split[2] ) - && $line_split[2] =~ /^\d+$/ ) - { - push( - @{ $to_return->{data}{networks} }, - { - network => $line_split[0], - max => $line_split[1], - cur => $line_split[2], - percent => $line_split[3], + + push( @{ $networks->{$name} }, $pool_name ); + } ## end foreach my $range (@ranges) + } ## end foreach my $subnet (@subnets) +} ## end foreach my $network (@found_subnets) + +## +## +## puts the pools array together +## +## +foreach my $pool_key ( keys( %{$pools} ) ) { + my $lower = $pools->{$pool_key}{lower}; + delete( $pools->{$pool_key}{lower} ); + my $upper = $pools->{$pool_key}{upper}; + delete( $pools->{$pool_key}{upper} ); + + # check each lease for if it is between the upper and lower IPs + # then increment current if the state is active + foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { + my $lease_ip = NetAddr::IP->new( $lease->{ip} ); + if ( $lower <= $lease_ip && $lease_ip <= $upper ) { + if ( $lease->{state} eq 'active' ) { + $pools->{$pool_key}{cur}++; } - ); - } elsif ( $category eq 'all_networks' ) { - $to_return->{data}{all_networks}{max} = $line_split[2]; - $to_return->{data}{all_networks}{cur} = $line_split[3]; - $to_return->{data}{all_networks}{percent} = $line_split[4]; + } } -} ## end for my $line ( split( /\n/, $cmd_output ) ) + $pools->{$pool_key}{percent} = ( $pools->{$pool_key}{cur} / $pools->{$pool_key}{max}->numify() ) * 100; + $pools->{$pool_key}{max} = $pools->{$pool_key}{max}->bstr; + + # add the current and max to all_networks(reall all subnets)... + $to_return->{data}{all_networks}{cur} = $to_return->{data}{all_networks}{cur} + $pools->{$pool_key}{cur}; + $to_return->{data}{all_networks}{max} = $to_return->{data}{all_networks}{max} + $pools->{$pool_key}{max}; + + push( @{ $to_return->{data}{pools} }, $pools->{$pool_key} ); +} ## end foreach my $pool_key ( keys( %{$pools} ) ) +$to_return->{data}{all_networks}{percent} + = ( $to_return->{data}{all_networks}{cur} / $to_return->{data}{all_networks}{max} ) * 100; + +## +## +## put the networks section together +## +## +my @network_keys = keys( %{$networks} ); +if ( !defined( $network_keys[0] ) ) { + foreach my $pool_key ( keys( %{$pools} ) ) { + $networks->{ $pools->{$pool_key}{cidr} } = [$pool_key]; + } + @network_keys = keys( %{$networks} ); +} +foreach my $network (@network_keys) { + my $cur = 0; + my $max = 0; + foreach my $pool_name ( @{ $networks->{$network} } ) { + $cur = $cur + $pools->{$pool_name}{cur}; + $max = $max + $pools->{$pool_name}{max}; + } + my $percent = ( $cur / $max ) * 100; + push( + @{ $to_return->{data}{networks} }, + { + cur => $cur, + max => $max, + network => $network, + percent => $percent, + pools => $networks->{$network}, + } + ); +} ## end foreach my $network (@network_keys) + +## +## +## handle printing the output +## +## my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{p} ) { $json->pretty; From 6a7c04e5498271f293d77b5a1a7b0db1694216a5 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 4 Oct 2023 19:30:54 -0500 Subject: [PATCH 263/332] add -w to dhcp (#492) --- snmp/dhcp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/snmp/dhcp b/snmp/dhcp index 66466c2d8..461790389 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -58,6 +58,10 @@ for reporting purposes. Default is 'cidr'. +=head2 -w + +Write the the output to this file. + =head1 Return JSON Data Hash - .all_networks.cur :: Current leases for all networks @@ -106,9 +110,10 @@ use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Net::ISC::DHCPd::Leases; use Net::ISC::DHCPd::Config; +use File::Slurp; my %opts; -getopts( 'l:Zdpc:n:', \%opts ); +getopts( 'l:Zdpc:n:w:', \%opts ); if ( !defined( $opts{n} ) ) { $opts{n} = 'cidr'; @@ -533,4 +538,8 @@ if ( $opts{Z} ) { print $toReturn; +if ($opts{w}) { + write_file($opts{w}, $toReturn); +} + exit; From eba08461be6d487d261784c0fe4c2e9955955b8a Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Sat, 7 Oct 2023 21:36:37 -0500 Subject: [PATCH 264/332] Add pacman packages support (#493) --- agent-local/pacman | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100755 agent-local/pacman diff --git a/agent-local/pacman b/agent-local/pacman new file mode 100755 index 000000000..d0b4f4ae0 --- /dev/null +++ b/agent-local/pacman @@ -0,0 +1,22 @@ +#!/bin/bash +# Cache the file for 30 minutes +# If you want to override this, put the command in cron. +# We cache because it is a 1sec delay, which is painful for the poller +if [ -x /usr/bin/pacman ]; then + DATE=$(date +%s) + FILE=/var/cache/librenms/agent-local-pacman + + [ -d /var/cache/librenms ] || mkdir -p /var/cache/librenms + + if [ ! -e $FILE ]; then + pacman -Qi | awk '/^Name/{name=$3} /^Version/{version=$3} /^Architecture/{arch=$3} /^Installed Size/{print name, version, arch, $4$5}' > $FILE + fi + FILEMTIME=$(stat -c %Y $FILE) + FILEAGE=$(($DATE-$FILEMTIME)) + if [ $FILEAGE -gt 1800 ]; then + pacman -Qi | awk '/^Name/{name=$3} /^Version/{version=$3} /^Architecture/{arch=$3} /^Installed Size/{print name, version, arch, $4$5}' > $FILE + fi + echo "<<>>" + cat $FILE +fi + From e34cde03850b49083fa122a36078ff8f14eba016 Mon Sep 17 00:00:00 2001 From: calvin_thefreak <5560381+calvinthefreak@users.noreply.github.com> Date: Wed, 8 Nov 2023 05:16:49 +0100 Subject: [PATCH 265/332] Quick and dirty fix for snmp extend to respond correctly. (#494) - Added "grep -v" to the output command. - Added a note for Users that don't understand why LibreNMS can't get the values when snmp user is not in docker group. --- snmp/mailcow-dockerized-postfix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/snmp/mailcow-dockerized-postfix b/snmp/mailcow-dockerized-postfix index 8fd536481..473321e6c 100644 --- a/snmp/mailcow-dockerized-postfix +++ b/snmp/mailcow-dockerized-postfix @@ -20,6 +20,9 @@ # please adjust librenms_poller_interval according to your LibreNMS setup - default to 5 minutes # requirements: mailcow-dockerized and pflogsumm # +# Note to users that struggle with the setup: Make sure, that your SNMP Daemon can use the docker command +# So please make sure, that the e.G. Debian-snmp user is added to the docker group! +# import json import re @@ -47,7 +50,7 @@ def cli_command(): + libre_to_mcd_postfix(librenms_poller_interval) + "m " + cli_get_docker_container() - + "| pflogsumm --smtpd-stats" + + "| pflogsumm --smtpd-stats 2>&1 | grep -v 'Use of uninitialized value'" ) return cli_part From 4a1368980ae709b66f6560482685c1bf017000d7 Mon Sep 17 00:00:00 2001 From: Garcia MICHEL Date: Wed, 8 Nov 2023 05:21:53 +0100 Subject: [PATCH 266/332] Add OpenMediaVault detection to distro script (#484) --- snmp/distro | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/distro b/snmp/distro index da06f943a..843eef062 100755 --- a/snmp/distro +++ b/snmp/distro @@ -78,6 +78,9 @@ elif [ "${OS}" = "Linux" ] ; then if [ -f /etc/dogtag ]; then DIST=$(cat /etc/dogtag) fi + if [ -f /usr/sbin/omv-sysinfo ]; then + DIST="${DIST}/OpenMediaVault $(/usr/sbin/omv-sysinfo 00-omv-version | grep Release | cut -d: -f2 | sed 's/\s//g')" + fi elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" From 7e18c9a212eba75c69c995e124ed234be0836861 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 7 Nov 2023 20:25:48 -0800 Subject: [PATCH 267/332] Adding Socket Statistics Script (#486) * Adding Socket Statistics Script * Adding the no-header argument to the ss command. Fixing a bug where the unknown netid type was discarded before it was converted from question-marks to unknown * Now that headers are removed from the ss command, moving the datastructure logic out of command execution logic so it executes at least once even if there's no output. * Moved 'always-on' global arguments into a GLOBAL_ARGS constant list. --- snmp/ss.py | 370 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 370 insertions(+) create mode 100644 snmp/ss.py diff --git a/snmp/ss.py b/snmp/ss.py new file mode 100644 index 000000000..638a24079 --- /dev/null +++ b/snmp/ss.py @@ -0,0 +1,370 @@ +#!/usr/bin/env python +# +# Name: Socket Statistics Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "ss" output for ingestion into +# LibreNMS via the ss application. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/ss.py +# 2. Edit your snmpd.conf and include: +# extend ss /etc/snmp/ss.py +# 3. (Optional) Create a /etc/snmp/ss.json file and specify: +# a.) "ss_cmd" - String path to the ss binary: ["/sbin/ss"] +# b.) "socket_types" - A comma-delimited list of socket types to include. +# The following socket types are valid: dccp, icmp6, +# mptcp, p_dgr, p_raw, raw, sctp, tcp, ti_dg, ti_rd, +# ti_sq, ti_st, u_dgr, u_seq, u_str, udp, unknown, +# v_dgr, v_dgr, xdp. Please note that the "unknown" +# socket type is represented in ss output with the +# netid "???". Please also note that the p_dgr and +# p_raw socket types are specific to the "link" +# address family; the ti_dg, ti_rd, ti_sq, and ti_st +# socket types are specific to the "tipc" address +# family; the u_dgr, u_seq, and u_str socket types +# are specific to the "unix" address family; and the +# v_dgr and v_str socket types are specific to the +# "vsock" address family. Filtering out the parent +# address families for the aforementioned will also +# filter out their specific socket types. Specifying +# "all" includes all of the socket types. For +# example: to include only tcp, udp, icmp6 sockets, +# you would specify "tcp,udp,icmp6": ["all"] +# c.) "addr_families" - A comma-delimited list of address families to +# include. The following families are valid: +# inet, inet6, link, netlink, tipc, unix, vsock. As +# mentioned above under (b), filtering out the link, +# tipc, unix, or vsock address families will also +# filter out their respective socket types. +# Specifying "all" includes all of the families. +# For example: to include only inet and inet6 +# families, you would specify "inet,inet6": ["all"] +# ``` +# { +# "ss_cmd": "/sbin/ss", +# "socket_types": "all", +# "addr_families": "all" +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. + +import json +import subprocess +import sys + +CONFIG_FILE = "/etc/snmp/ss.json" +SOCKET_MAPPINGS = { + "dccp": { + "args": ["--dccp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "inet": { + "args": ["--family", "inet"], + "netids": ["dccp", "mptcp", "raw", "sctp", "tcp", "udp", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "inet6": { + "args": ["--family", "inet6"], + "netids": ["dccp", "icmp6", "mptcp", "raw", "sctp", "tcp", "udp", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "link": { + "args": ["--family", "link"], + "netids": ["p_dgr", "p_raw", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "mptcp": { + "args": ["--mptcp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "netlink": { + "args": ["--family", "netlink"], + "netids": [], + "addr_family": True, + "socket_type": False, + }, + "raw": { + "args": ["--raw"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "sctp": { + "args": ["--sctp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "tcp": { + "args": ["--tcp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "tipc": { + "args": ["--family", "tipc"], + "netids": ["ti_dg", "ti_rd", "ti_sq", "ti_st", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "udp": { + "args": ["--udp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "unix": { + "args": ["--family", "unix"], + "netids": ["u_dgr", "u_seq", "u_str"], + "addr_family": True, + "socket_type": False, + }, + "vsock": { + "args": ["--family", "vsock"], + "netids": ["v_dgr", "v_str", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "xdp": { + "args": ["--xdp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, +} +GLOBAL_ARGS = ["--all", "--no-header"] +ADDR_FAMILY_ALLOW_LIST = [] +SOCKET_ALLOW_LIST = [] + +# Populate the state allow lists. +for gentype_key, gentype_values in SOCKET_MAPPINGS.items(): + if gentype_values["socket_type"]: + SOCKET_ALLOW_LIST.append(gentype_key) + if gentype_values["addr_family"]: + ADDR_FAMILY_ALLOW_LIST.append(gentype_key) + for gentype_netid in gentype_values["netids"]: + SOCKET_ALLOW_LIST.append(gentype_netid) + +SS_CMD = ["/sbin/ss"] + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": f"{error_name}: '{err}'", + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + ss_cmd: The full ss command to execute. + socket_allow_list: A list of the socket types to parse output for. + """ + ss_cmd = SS_CMD.copy() + socket_allow_list = SOCKET_ALLOW_LIST.copy() + addr_family_allow_list = ADDR_FAMILY_ALLOW_LIST.copy() + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r", encoding="utf-8") as json_file: + config_file = json.load(json_file) + ss_cmd = [config_file["ss_cmd"]] + socket_allow_list_clean = list( + map(str.lower, config_file["socket_types"].split(",")) + ) + addr_family_allow_list_clean = list( + map(str.lower, config_file["addr_families"].split(",")) + ) + if "all" not in socket_allow_list_clean: + socket_allow_list = socket_allow_list_clean + if "all" not in addr_family_allow_list_clean: + addr_family_allow_list = addr_family_allow_list_clean + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Verify the socket types specified by the user are valid. + err = "" + for socket_type in socket_allow_list: + if socket_type in SOCKET_ALLOW_LIST: + continue + if not err: + err = "Invalid socket types specified: " + err += socket_type + " " + if err: + error_handler("Configuration File Error", err.strip()) + + # Verify the address families specified by the user are valid. + err = "" + for addr_family in addr_family_allow_list: + if addr_family in ADDR_FAMILY_ALLOW_LIST: + continue + if not err: + err = "Invalid address families specified: " + err += addr_family + " " + if err: + error_handler("Configuration File Error", err.strip()) + + # Create and return full ss command and allow lists. + return ss_cmd, socket_allow_list, addr_family_allow_list + + +def command_executor(ss_cmd, socket_type): + """ + command_executor(): Execute the ss command and return the output. + + Inputs: + ss_cmd: The full ss command to execute. + socket_type: The type of socket to collect data for. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + ss_socket_cmd = ss_cmd.copy() + ss_socket_cmd.extend(SOCKET_MAPPINGS[socket_type]["args"]) + ss_socket_cmd.extend(GLOBAL_ARGS) + + try: + # Execute ss command + poutput = subprocess.check_output( + ss_socket_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + +def socket_parser(line, gentype, ss_data, socket_allow_list): + """ + socket_parser(): Parses a socket line for its current status. + That status type is added to the global ss_data + variable if it does not exist or incremented if + it does. The totals for the socket type are + incremented as well. + + Inputs: + line: The sockets's status line from the ss stdout. + gentype: The socket or address family to parse data for. + ss_data: All of the socket data as a dictionary. + socket_allow_list: List of sockets to parse data for. + Outputs: + None + """ + line_parsed = line.strip().split() + + netid = None + state = None + + try: + if SOCKET_MAPPINGS[gentype]["netids"]: + netid = line_parsed[0] + state = line_parsed[1] + else: + state = line_parsed[0] + except IndexError as err: + error_handler("Command Output Parsing Error", err) + + if SOCKET_MAPPINGS[gentype]["netids"]: + # Special case to convert the question-marks symbol + # to a safe string. + if netid == "???": + netid = "unknown" + + # Omit filtered sockets from the address families. + if netid not in socket_allow_list: + return ss_data + + ss_data[netid][state] = ( + 1 if state not in ss_data[netid] else (ss_data[netid][state] + 1) + ) + ss_data[netid]["TOTAL"] = ( + 1 if "TOTAL" not in ss_data[netid] else (ss_data[netid]["TOTAL"] + 1) + ) + else: + ss_data[state] = 1 if state not in ss_data else (ss_data[state] + 1) + ss_data["TOTAL"] = 1 if "TOTAL" not in ss_data else (ss_data["TOTAL"] + 1) + + return ss_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, + and socket stdout parsing. Then it prints out the expected json output + for the ss application. + + Inputs: + None + Outputs: + None + """ + output_data = {"errorString": "", "error": 0, "version": 1, "data": {}} + + # Parse configuration file. + ss_cmd, socket_allow_list, addr_family_allow_list = config_file_parser() + + # Execute ss command for socket types. + for gentype in list(SOCKET_MAPPINGS.keys()): + # Skip socket types and address families disabled by the user. + if ( + SOCKET_MAPPINGS[gentype]["socket_type"] and gentype not in socket_allow_list + ) or ( + SOCKET_MAPPINGS[gentype]["addr_family"] + and gentype not in addr_family_allow_list + ): + continue + + # Build the initial output_data datastructures. + output_data["data"][gentype] = {} + for netid in SOCKET_MAPPINGS[gentype]["netids"]: + # Skip the netid if the socket is not allowed. + if netid not in socket_allow_list: + continue + output_data["data"][gentype][netid] = {} + + for line in command_executor(ss_cmd, gentype).decode("utf-8").split("\n"): + if not line: + continue + + output_data["data"][gentype] = socket_parser( + line, + gentype, + output_data["data"][gentype], + socket_allow_list, + ) + + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From f20430637f947203f2e5575cacd67515cad6400b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 19 Nov 2023 04:33:41 -0600 Subject: [PATCH 268/332] borg backup extend (#495) --- snmp/borgbackup | 446 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 446 insertions(+) create mode 100755 snmp/borgbackup diff --git a/snmp/borgbackup b/snmp/borgbackup new file mode 100755 index 000000000..d20c49ce7 --- /dev/null +++ b/snmp/borgbackup @@ -0,0 +1,446 @@ +#!/usr/bin/env perl + +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=head1 NAME + +borgbackup - LibreNMS JSON SNMP extend for gathering backups for borg + +=head1 VERSION + +0.0.1 + +=head1 SYNOPSIS + +borgbackup [B<-c> ] [B<-o> ] + +borgbackup [B<--help>|B<-h>] + +borgbackup [B<--version>|B<-v>] + +=head1 DESCRIPTION + +This uses 'borg info $repo --json' to fetch info on the specified borg repos +and write the info out to files. + +The information is then writen out two two files under the output directory. + + - extend_return :: This file contains the data for the extend in + gzip+base64 compressed format if applicable. + + - pretty :: Pretty printed and sorted JSON. + +This is done for three reasons. The first is SNMPD and the users with read perms +for the repos are likely to be different. The second is lock time out, even with +1 second, means the command likely won't complete in a timely manner for larger +repos. + +For SNMPD generally going to be setup like this. + + extend borgbackup /bin/cat /var/cache/borgbackup_extend + +Then the extend is set to be ran via cron. + + */5 * * * * /etc/snmp/extends/borgbackup + +=head1 FLAGS + +=head2 -c + +The config file to use for the extend. + +Default :: /usr/local/etc/borgbackup_extend.ini + +=head2 -o + +The output directory write the pretty JSON file to and the file to use +for the SNMP extend. + +Default :: /var/cache/borgbackup_extend + +=head2 -h|--help + +Print help info. + +=head2 -v|--version + +Print version info. + +=head1 CONFIG + +The config file is a ini file and handled by L. + + - mode :: single or multi, for if this is a single repo or for + multiple repos. + - Default :: single + + - repo :: Directory for the borg backup repo. + - Default :: undef + + - passphrase :: Passphrase for the borg backup repo. + - Default :: undef + + - passcommand :: Passcommand for the borg backup repo. + - Default :: undef + +For single repos all those variables are in the root section of the config, +so lets the repo is at '/backup/borg' with a passphrase of '1234abc'. + + repo=/backup/borg + repo=1234abc + +For multi, each section outside of the root represents a repo. So if there is +'/backup/borg1' with a passphrase of 'foobar' and '/backup/derp' with a passcommand +of 'pass show backup' it would be like below. + + mode=multi + + [borg1] + repo=/backup/borg1 + passphrase=foobar + + [derp] + repo=/backup/derp + passcommand=pass show backup + +If 'passphrase' and 'passcommand' are both specified, then passcommand is used. + +=head1 JSON RETURN + +The return is a LibreNMS JSON style SNMP extend as defined at +L + +The following key info is relevant to the .data . + + - .mode :: The mode it was ran in, either single or multi. + +Totaled info is in the hash .totals. + + - .totals.errored :: Total number of repos that info could not be fetched for. + - Type :: repos + + - .totals.locked :: Total number of locked repos + - Type :: repos + + - .totals.locked_for :: Longest time any repo has been locked. + - Type :: seconds + + - .totals.time_since_last_modified :: Largest time - mtime for the repo directory + - Type :: seconds + + - .total.total_chunks :: Total number of checks between all repos. + - Type :: chunks + + - .total.total_csize :: Total compressed size of all archives in all repos. + - Type :: bytes + + - .total.total_size :: Total uncompressed size of all archives in all repos. + - Type :: bytes + + - .total.total_unique_chunks :: Total number of unique chuckes in all repos. + - Type :: chunks + + - .total.unique_csize :: Total deduplicated size of all archives in all repos. + - Type :: bytes + + - .total.unique_size :: Total number of chunks in all repos. + - Type :: chunks + +Each repo then has it's own hash under .repo . + + - .repo.$repo.error :: If defined, this is the error encounted when + attempting to get repo info. + - Type :: string + + - .repo.$repo.locked_for :: How long the repo has been locked for if + locked. If it is not locked this is undef. + - Type :: seconds + + - .repo.$repo.time_since_last_modified :: time - mtime for the repo directory + - Type :: seconds + + - .repo.$repo.total_chunks :: Total number of checks for the repo. + - Type :: chunks + + - .repo.$repo.total_csize :: Total compressed size of all archives for the repo. + - Type :: bytes + + - .repo.$repo.total_size :: Total uncompressed size of all archives the repo. + - Type :: bytes + + - .repo.$repo.total_unique_chunks :: Total number of unique chuckes the repo. + - Type :: chunks + + - .repo.$repo.unique_csize :: Total deduplicated size of all archives the repo. + - Type :: bytes + + - .repo.$repo.unique_size :: Total number of chunks in the repo. + - Type :: chunks + +=cut + +use strict; +use warnings; +use Config::Tiny; +use JSON; +use Getopt::Long; +use File::Slurp; +use File::Path qw(make_path); +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use String::ShellQuote; +use Pod::Usage; + +our $output_dir = '/var/cache/borgbackup_extend'; +my $config_file = '/usr/local/etc/borgbackup_extend.ini'; +my $version; +my $help; +GetOptions( + 'c=s' => \$config_file, + 'o=s' => \$output_dir, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +# save the return +sub finish { + my (%opts) = @_; + + if ( !-e $output_dir ) { + make_path($output_dir) or die( 'could not create the output dir, "' . $output_dir . '",' ); + } elsif ( -e $output_dir && !-d $output_dir ) { + die( '"' . $output_dir . '" exists, but is not a directory' ); + } + + my $j = JSON->new; + + my $return_string = $j->encode( $opts{to_return} ); + + my $compressed_string; + gzip \$return_string => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) > length($return_string) ) { + write_file( $output_dir . '/extend_return', $return_string ); + } else { + write_file( $output_dir . '/extend_return', $compressed ); + } + + $j->pretty(1); + $j->canonical(1); + $return_string = $j->encode( $opts{to_return} ); + + write_file( $output_dir . '/pretty', $return_string ); + + print $return_string; + + exit $opts{to_return}->{error}; +} ## end sub finish + +my $to_return = { + data => { + mode => 'single', + totals => { + total_chunks => 0, + total_csize => 0, + total_size => 0, + total_unique_chunks => 0, + unique_csize => 0, + unique_size => 0, + locked => 0, + time_since_last_modified => undef, + errored => 0, + locked_for => undef, + }, + repos => {}, + }, + version => 1, + error => 0, + errorString => '', +}; + +# attempt to read in the config +my $config; +eval { + my $raw_config = read_file($config_file); + ($config) = Config::Tiny->read_string($raw_config); +}; +if ($@) { + $to_return->{error} = 1; + $to_return->{errorString} = 'Failed reading config file "' . $config_file . '"... ' . $@; + finish( to_return => $to_return ); +} + +if ( !defined( $config->{_}{mode} ) ) { + $config->{_}{mode} = 'single'; +} elsif ( $config->{_}{mode} ne 'single' && $config->{_}{mode} ne 'multi' ) { + $to_return->{error} = 2; + $to_return->{errorString} = '"' . $config->{_}{mode} . '" mode is not set to single or multi'; + finish( to_return => $to_return ); +} + +# get a list of repos to use +my @repos; +if ( $config->{_}{mode} eq 'single' ) { + # if single, just create a single repo + push( @repos, 'single' ); + $config->{single} = {}; + + # make sure we have passcommand or passphrase with passphrase being used as the default + if ( !defined( $config->{_}{passcommand} ) && !defined( $config->{_}{passphrase} ) ) { + $to_return->{error} = 3; + $to_return->{errorString} = 'Neither passcommand or passphrase defined'; + finish( to_return => $to_return ); + } elsif ( $config->{_}{passphrase} ) { + $config->{single}{passphrase} = $config->{_}{passphrase}; + } elsif ( $config->{_}{passcommand} ) { + $config->{single}{passcommand} = $config->{_}{passcommand}; + } + + # make sure have a repo specified + if ( !defined( $config->{_}{repo} ) ) { + $to_return->{error} = 4; + $to_return->{errorString} = 'repo is not defined'; + finish( to_return => $to_return ); + } + $config->{single}{repo} = $config->{_}{repo}; + +} else { + # we don't want _ as that is the root of the ini file + @repos = grep( !/^\_$/, keys( %{$config} ) ); +} + +my @totals + = ( 'total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size', 'locked' ); +my @stats = ( 'total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size' ); + +foreach my $repo (@repos) { + my $process = 1; + + # unset borg pass bits + delete( $ENV{BORG_PASSPHRASE} ); + delete( $ENV{BORG_PASSCOMMAND} ); + + my $repo_info = { + total_chunks => 0, + total_csize => 0, + total_size => 0, + total_unique_chunks => 0, + unique_csize => 0, + unique_size => 0, + locked => 0, + time_since_last_modified => undef, + error => undef, + locked_for => undef, + }; + + if ( !defined( $config->{$repo}{passcommand} ) && !defined( $config->{$repo}{passphrase} ) ) { + $to_return->{error} = 3; + $to_return->{errorString} + = $to_return->{errorString} . "\n" . 'Neither passcommand or passphrase defined for ' . $repo; + $process = 0; + } + + if ( !defined( $config->{$repo}{repo} ) ) { + $to_return->{error} = 4; + $to_return->{errorString} = $to_return->{errorString} . "\n" . 'repo is not defined for ' . $repo; + $process = 0; + } + + if ($process) { + my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat( $config->{$repo}{repo} ); + + my $time_diff = time - $mtime; + $repo_info->{time_since_last_modified} = $time_diff; + + # if we don't have a largest time diff or if it is larger than then + # the old one save the time diff + if ( !defined( $to_return->{data}{totals}{time_since_last_modified} ) + || $to_return->{data}{totals}{time_since_last_modified} < $time_diff ) + { + $to_return->{data}{totals}{time_since_last_modified} = $time_diff; + } + + if ( defined( $config->{$repo}{passcommand} ) ) { + $ENV{BORG_PASSCOMMAND} = $config->{$repo}{passcommand}; + } else { + $ENV{BORG_PASSPHRASE} = $config->{$repo}{passphrase}; + } + + my $command = 'borg info ' . shell_quote( $config->{$repo}{repo} ) . ' --json 2>&1'; + my $output_raw = `$command`; + + my $info; + eval { $info = decode_json($output_raw); }; + if ($@) { + my $error = $@; + if ( $output_raw =~ /lock.*lock\.exclusive/ ) { + $repo_info->{locked} = 1; + + my $lock_file = $config->{$repo}{repo} . '/lock.exclusive'; + ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat($lock_file); + $repo_info->{locked_for} = time - $ctime; + } else { + $repo_info->{error} = $error; + } + } else { + if ( defined( $info->{cache} ) && defined( $info->{cache}{stats} ) ) { + for my $stat (@stats) { + $repo_info->{$stat} = $info->{cache}{stats}{$stat}; + } + } + } + + for my $total (@totals) { + $to_return->{data}{totals}{$total} = $to_return->{data}{totals}{$total} + $repo_info->{$total}; + } + + if ( defined( $repo_info->{error} ) ) { + $to_return->{data}{totals}{errored}++; + } + + if ( !defined( $to_return->{data}{totals}{locked_for} ) + || $to_return->{data}{totals}{locked_for} < $repo_info->{locked_for} ) + { + $to_return->{data}{totals}{locked_for} = $repo_info->{locked_for}; + } + } ## end if ($process) + + $to_return->{data}{repos}{$repo} = $repo_info; +} ## end foreach my $repo (@repos) + +finish( to_return => $to_return ); From f6d6ff5b88bd47e738754fe72c4a4e5eb4e78d08 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 19 Nov 2023 15:31:32 -0600 Subject: [PATCH 269/332] borgbackup mtime fix for repo (#496) --- snmp/borgbackup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/borgbackup b/snmp/borgbackup index d20c49ce7..c6c88fb11 100755 --- a/snmp/borgbackup +++ b/snmp/borgbackup @@ -381,7 +381,7 @@ foreach my $repo (@repos) { if ($process) { my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) - = stat( $config->{$repo}{repo} ); + = stat( $config->{$repo}{repo} . '/nonce' ); my $time_diff = time - $mtime; $repo_info->{time_since_last_modified} = $time_diff; From 0c18d34fd1b6eb51d4ef6785967193f149930586 Mon Sep 17 00:00:00 2001 From: Dan Langille Date: Sat, 16 Dec 2023 21:53:04 -0500 Subject: [PATCH 270/332] Update URL for ZFS (#498) Correct the URL --- snmp/zfs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs b/snmp/zfs index d80e73e2e..32800e688 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -4,7 +4,7 @@ This is a SNMP extend for ZFS for use with LibreNMS. -For more information, see L. +For more information, see L. =head1 SWITCHES From 0f7ad4d23e95bb7404345f322c906c937d24e4c3 Mon Sep 17 00:00:00 2001 From: Peter Childs Date: Sun, 17 Dec 2023 13:26:21 +1030 Subject: [PATCH 271/332] ensure returned values do not exceed max of RRD file (#497) --- snmp/mysql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/mysql b/snmp/mysql index 530637352..d7ac4f09a 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -158,6 +158,8 @@ if (!isset($called_by_script_server)) { $output[] = $item; } list($short, $val) = explode(":", $item); + # ensure returned values do not exceed max limits in RRD + $val = $val % ( 124999999999 + 1 ); echo(strtolower($short).":".strtolower($val)."\n"); } debug(array("Final result", $output)); From f3c31fbdf88f5137f9ac8f562753b701e176ab1b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Dec 2023 21:10:57 -0600 Subject: [PATCH 272/332] fix for issues/501 and do assorted cleanup while there (#502) --- snmp/zfs | 197 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 116 insertions(+), 81 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 32800e688..6a14acdad 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -1,8 +1,14 @@ #!/usr/bin/env perl -=head1 DESCRIPTION +=head1 NAME + +zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS -This is a SNMP extend for ZFS for use with LibreNMS. +=head1 VERSION + +0.1.0 + +=head1 DESCRIPTION For more information, see L. @@ -29,11 +35,11 @@ in the return. The requirements may be installed via CPAN like below for Linux. - apt-get install cpanminus zlib1g-dev + apt-get install cpanminus File::Slurp MIME::Base64 JSON Or on FreeBSD via pkg... - pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-Gzip-Faster + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 =cut @@ -62,31 +68,59 @@ Or on FreeBSD via pkg... # Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska # for zfs-stats and figuring out the math for all the stats +# +# Thanks to dlangille for pointing out the issues on 14 and Bobzikwick figuring out the fix in issues/501 use strict; use warnings; use JSON; -use Getopt::Std; +use Getopt::Long; use File::Slurp; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; -$Getopt::Std::STANDARD_HELP_VERSION = 1; +#$Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS v3 stats extend 0.0.1\n"; + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); } sub main::HELP_MESSAGE { - + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } #this will be dumped to json at the end my %tojson; #gets the options -my %opts = (); -getopts( 'pbs', \%opts ); +my %opts; +my $opts_p; +my $opts_b; +my $opts_s; +my $version; +my $help; +#getopts( 'pbs', \%opts ); +GetOptions( + p => \$opts_p, + b => \$opts_b, + s => \$opts_s, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); +$opts{p} = $opts_p; +$opts{b} = $opts_b; +$opts{s} = $opts_s; + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} #process each pool and shove them into JSON my $zpool_output = `/sbin/zpool list -pH`; @@ -118,39 +152,33 @@ while ( defined( $pools[$pools_int] ) ) { $newPool{dedup}, $newPool{health}, $newPool{altroot} ) = split( /\,/, $pool ); - if ($opts{s}) { + if ( $opts{s} ) { $newPool{status} = `zpool status $newPool{name}`; } if ( $newPool{health} eq 'ONLINE' ) { $newPool{health} = 0; $tojson{online}++; - } - elsif ( $newPool{health} eq 'DEGRADED' ) { + } elsif ( $newPool{health} eq 'DEGRADED' ) { $newPool{health} = 1; $tojson{health} = 0; $tojson{degraded}++; - } - elsif ( $newPool{health} eq 'OFFLINE' ) { + } elsif ( $newPool{health} eq 'OFFLINE' ) { $newPool{health} = 2; $tojson{offline}++; - } - elsif ( $newPool{health} eq 'FAULTED' ) { + } elsif ( $newPool{health} eq 'FAULTED' ) { $newPool{health} = 3; $tojson{health} = 0; $tojson{faulted}++; - } - elsif ( $newPool{health} eq 'UNAVAIL' ) { + } elsif ( $newPool{health} eq 'UNAVAIL' ) { $newPool{health} = 4; $tojson{health} = 0; $tojson{unavail}++; - } - elsif ( $newPool{health} eq 'REMOVED' ) { + } elsif ( $newPool{health} eq 'REMOVED' ) { $newPool{health} = 5; $tojson{health} = 0; $tojson{removed}++; - } - else { + } else { $newPool{health} = 6; $tojson{health} = 0; $tojson{unknown}++; @@ -188,7 +216,7 @@ while ( defined( $pools[$pools_int] ) ) { push( @toShoveIntoJSON, \%newPool ); $pools_int++; -} +} ## end while ( defined( $pools[$pools_int] ) ) $tojson{pools} = \@toShoveIntoJSON; # @@ -209,10 +237,9 @@ if ( $^O eq 'freebsd' ) { $var =~ s/^.*\.arcstats\.//; $stats_stuff->{$var} = $val; } - } + } ## end foreach my $stat (@sysctls_pull) -} -elsif ( $^O eq 'linux' ) { +} elsif ( $^O eq 'linux' ) { my @arcstats_lines = read_file('/proc/spl/kstat/zfs/arcstats'); foreach my $line (@arcstats_lines) { chomp($line); @@ -222,30 +249,30 @@ elsif ( $^O eq 'linux' ) { } # does not seem to exist for me, but some of these don't seem to be created till needed -if ( !defined( $stats_stuff->{"recycle_miss"} ) ) { - $stats_stuff->{"recycle_miss"} = 0; +if ( !defined( $stats_stuff->{recycle_miss} ) ) { + $stats_stuff->{recycle_miss} = 0; } ## ## ARC misc ## -$tojson{deleted} = $stats_stuff->{"deleted"}; -$tojson{evict_skip} = $stats_stuff->{"evict_skip"}; -$tojson{mutex_skip} = $stats_stuff->{'mutex_miss'}; -$tojson{recycle_miss} = $stats_stuff->{"recycle_miss"}; +$tojson{deleted} = $stats_stuff->{deleted}; +$tojson{evict_skip} = $stats_stuff->{evict_skip}; +$tojson{mutex_skip} = $stats_stuff->{mutex_miss}; +$tojson{recycle_miss} = $stats_stuff->{recycle_miss}; ## ## ARC size ## -my $target_size_percent = $stats_stuff->{"c"} / $stats_stuff->{"c_max"} * 100; -my $arc_size_percent = $stats_stuff->{"size"} / $stats_stuff->{"c_max"} * 100; -my $target_size_adaptive_ratio = $stats_stuff->{"c"} / $stats_stuff->{"c_max"}; -my $min_size_percent = $stats_stuff->{"c_min"} / $stats_stuff->{"c_max"} * 100; - -$tojson{arc_size} = $stats_stuff->{"size"}; -$tojson{target_size_max} = $stats_stuff->{"c_max"}; -$tojson{target_size_min} = $stats_stuff->{"c_min"}; -$tojson{target_size} = $stats_stuff->{"c"}; +my $target_size_percent = $stats_stuff->{c} / $stats_stuff->{c_max} * 100; +my $arc_size_percent = $stats_stuff->{size} / $stats_stuff->{c_max} * 100; +my $target_size_adaptive_ratio = $stats_stuff->{c} / $stats_stuff->{c_max}; +my $min_size_percent = $stats_stuff->{c_min} / $stats_stuff->{c_max} * 100; + +$tojson{arc_size} = $stats_stuff->{size}; +$tojson{target_size_max} = $stats_stuff->{c_max}; +$tojson{target_size_min} = $stats_stuff->{c_min}; +$tojson{target_size} = $stats_stuff->{c}; $tojson{target_size_per} = $target_size_percent; $tojson{arc_size_per} = $arc_size_percent; $tojson{target_size_arat} = $target_size_adaptive_ratio; @@ -255,39 +282,47 @@ $tojson{min_size_per} = $min_size_percent; ## ARC size breakdown ## my $mfu_size; +if ( defined( $stats_stuff->{mfu_size} ) ) { + $mfu_size = $stats_stuff->{mfu_size}; +} my $recently_used_percent; my $frequently_used_percent; -if ( $stats_stuff->{"size"} >= $stats_stuff->{"c"} ) { - $mfu_size = $stats_stuff->{"size"} - $stats_stuff->{"p"}; - $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"size"} * 100; - $frequently_used_percent = $mfu_size / $stats_stuff->{"size"} * 100; +if ( !defined( $stats_stuff->{p} ) && defined( $stats_stuff->{mfu_size} ) ) { + $stats_stuff->{p} = $stats_stuff->{size} - $stats_stuff->{mfu_size}; } -else { - $mfu_size = $stats_stuff->{"c"} - $stats_stuff->{"p"}; - $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"c"} * 100; - $frequently_used_percent = $mfu_size / $stats_stuff->{"c"} * 100; +if ( $stats_stuff->{size} >= $stats_stuff->{c} ) { + if ( !defined($mfu_size) ) { + $mfu_size = $stats_stuff->{size} - $stats_stuff->{p}; + } + $recently_used_percent = $stats_stuff->{p} / $stats_stuff->{size} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{size} * 100; +} else { + if ( !defined($mfu_size) ) { + $mfu_size = $stats_stuff->{c} - $stats_stuff->{p}; + } + $recently_used_percent = $stats_stuff->{p} / $stats_stuff->{c} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{c} * 100; } -$tojson{p} = $stats_stuff->{"p"}; +$tojson{p} = $stats_stuff->{p}; ## ## ARC efficiency ## -my $arc_hits = $stats_stuff->{"hits"}; -my $arc_misses = $stats_stuff->{"misses"}; -my $demand_data_hits = $stats_stuff->{"demand_data_hits"}; -my $demand_data_misses = $stats_stuff->{"demand_data_misses"}; -my $demand_metadata_hits = $stats_stuff->{"demand_metadata_hits"}; -my $demand_metadata_misses = $stats_stuff->{"demand_metadata_misses"}; -my $mfu_ghost_hits = $stats_stuff->{"mfu_ghost_hits"}; -my $mfu_hits = $stats_stuff->{"mfu_hits"}; -my $mru_ghost_hits = $stats_stuff->{"mru_ghost_hits"}; -my $mru_hits = $stats_stuff->{"mru_hits"}; -my $prefetch_data_hits = $stats_stuff->{"prefetch_data_hits"}; -my $prefetch_data_misses = $stats_stuff->{"prefetch_data_misses"}; -my $prefetch_metadata_hits = $stats_stuff->{"prefetch_metadata_hits"}; -my $prefetch_metadata_misses = $stats_stuff->{"prefetch_metadata_misses"}; - +my $arc_hits = $stats_stuff->{hits}; +my $arc_misses = $stats_stuff->{misses}; +my $demand_data_hits = $stats_stuff->{demand_data_hits}; +my $demand_data_misses = $stats_stuff->{demand_data_misses}; +my $demand_metadata_hits = $stats_stuff->{demand_metadata_hits}; +my $demand_metadata_misses = $stats_stuff->{demand_metadata_misses}; +my $mfu_ghost_hits = $stats_stuff->{mfu_ghost_hits}; +my $mfu_hits = $stats_stuff->{mfu_hits}; +my $mru_ghost_hits = $stats_stuff->{mru_ghost_hits}; +my $mru_hits = $stats_stuff->{mru_hits}; +my $prefetch_data_hits = $stats_stuff->{prefetch_data_hits}; +my $prefetch_data_misses = $stats_stuff->{prefetch_data_misses}; +my $prefetch_metadata_hits = $stats_stuff->{prefetch_metadata_hits}; +my $prefetch_metadata_misses = $stats_stuff->{prefetch_metadata_misses}; ## ## ARC efficiency, common ## @@ -315,8 +350,7 @@ if ( $prefetch_data_total != 0 ) { my $anon_hits_percent; if ( $anon_hits != 0 ) { $anon_hits_percent = $anon_hits / $arc_hits * 100; -} -else { +} else { $anon_hits_percent = 0; } @@ -395,34 +429,35 @@ $tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses}; ## my %head_hash; -$head_hash{'data'} = \%tojson; -$head_hash{'version'} = 3; -$head_hash{'error'} = 0; -$head_hash{'errorString'} = ''; +$head_hash{data} = \%tojson; +$head_hash{version} = 3; +$head_hash{error} = 0; +$head_hash{errorString} = ''; my $j = JSON->new; -if ( $opts{p} && ! $opts{b} ) { +if ( $opts{p} && !$opts{b} ) { $j->pretty(1); } my $return_string = $j->encode( \%head_hash ); -if ( !$opts{p} && ! $opts{b} ) { - print $return_string."\n"; +if ( !$opts{p} && !$opts{b} ) { + print $return_string. "\n"; exit 0; -}elsif (!$opts{b}) { +} elsif ( !$opts{b} ) { print $return_string; exit 0; } -my $compressed = encode_base64( gzip($return_string) ); +my $compressed_string; +gzip \$return_string => \$compressed_string; +my $compressed = encode_base64($compressed_string); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; if ( length($compressed) > length($return_string) ) { - print $return_string."\n"; -} -else { + print $return_string. "\n"; +} else { print $compressed; } From 040b755964bf1827341978aa9a37df18182d2bf6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 8 Jan 2024 16:59:01 -0600 Subject: [PATCH 273/332] add ifAlias (#503) --- snmp/ifAlias | 187 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100755 snmp/ifAlias diff --git a/snmp/ifAlias b/snmp/ifAlias new file mode 100755 index 000000000..e31e09dce --- /dev/null +++ b/snmp/ifAlias @@ -0,0 +1,187 @@ +#!/bin/sh +# (c) 2013-2017, f0o@devilcode.org, olb@nebkha.net +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +DISTRO_BIN="/usr/bin/distro" +BASE='.1.3.6.1.2.1.31.1.1.1.18' +GET_TYPE="$1" +GET_OID="$2" + +UNAME="$(/usr/bin/uname)" + +if [ "$(echo "${UNAME}" | grep -ci 'bsd$')" -eq 1 ]; then + UNAME="BSD" +fi + +# cache ip link output +if [ "${UNAME}" = 'Linux' ]; then + IP_LINK="$(ip link)" +else + IFCONFIG="$(ifconfig)" +fi + +# Get interface id from GET_OID script parameter depending on the get type -g +# or -n. +# +# snmpd specify two behaviors: GETNEXT and GET. +# +# script -g +# +# : Should return OID value +# +# script -n +# +# : Should return next OID value +# +# Note that interface id are not necessarly following incrementally. +# We need tho find the next interface id (which is not necessarily n+1). +# +interface_id() +{ + N= + L= + ID="${GET_OID#"$BASE".}" + + case "$GET_TYPE" in + -g) + echo "$ID" + return 0 + ;; + -n) + if [ "$ID" = "$BASE" ] + then + if [ "${UNAME}" = 'Linux' ]; then + # find the first iface_id + echo "$IP_LINK" | grep -oE "^[0-9]+:" | head -n 1 | cut -d':' -f 1 + return 0 + else + echo "${IFCONFIG}" | head -n 1 | cut -d: -f 1 + return 0 + fi + else + # find the next iface_id + if [ "${UNAME}" = 'Linux' ]; then + for N in $(echo "$IP_LINK" | grep -oE "^[0-9]+:" | cut -d':' -f 1) + do + if [ "$L" = "$ID" ] || [ -z "$ID" ]; then + printf '%s' "$N" + return 0 + fi + L="$N" + done + else + for N in $(echo "${IFCONFIG}" | grep -E '^[A-Za-z]+' | cut -d: -f1 | cat -n -b | sed 's/^ *//' | sed 's/[\t\ ].*//'); do + if [ "$L" = "$ID" ] || [ -z "$ID" ]; then + printf '%s' "$N" + return 0 + fi + L="$N" + done + fi + fi + ;; + esac + return 1 +} + +interface_name() +{ + if [ "${UNAME}" = 'Linux' ]; then + echo "$IP_LINK" | grep -oE "^$1: [^:@ ]*" | cut -d " " -f 2 + else + echo "${IFCONFIG}" | grep -E '^[A-Za-z]+' | cut -d: -f1 | head -n "$1" | tail -n 1 + fi +} + +alias_from_interfaces_config_file() +{ + CONFIG_FILE= + + if [ -x "$DISTRO_BIN" ]; then + if [ "${UNAME}" = 'Linux' ]; then + DISTRO_VAR="$($DISTRO_BIN | cut -d " " -f 1)" + else + DISTRO_VAR="${UNAME}" + fi + + case "${DISTRO_VAR}" in + Debian) + CONFIG_FILE="/etc/network/interfaces" + ;; + Ubuntu) + CONFIG_FILE="/etc/network/interfaces" + ;; + Gentoo) + CONFIG_FILE="/etc/conf.d/net" + ;; + CentOS|RedHat|SuSE|Mandriva|Mandrake) + CONFIG_FILE="/etc/sysconfig/network-scripts/ifcfg-$1" + ;; + Archlinux) + CONFIG_FILE="/etc/conf.d/net-conf-$1" + ;; + BSD) + CONFIG_FILE="/etc/rc.conf" + ;; + esac + fi + if [ "$CONFIG_FILE" ]; then + # echo squashes possible multi line replies to a single line + FOUND_LINES="$(grep -i "^# $1:" $CONFIG_FILE | sed "s/^# $1: //i")" + if [ "$(echo "${FOUND_LINES}" | wc -l)" -ge 1 ]; then + echo "${FOUND_LINES}" + return 0 + fi + fi + if [ "${UNAME}" = "Linux" ] && [ -d '/etc/network/interfaces.d' ]; then + if [ "$(find /etc/network/interfaces.d/ -type f | wc -l)" -ge 1 ]; then + # echo squashes possible multi line replies to a single line + TO_ECHO_AND_MAKE_LINT_HAPPY="$(grep -r -i "^# $1:" '/etc/network/interfaces.d/' | sed "s/^# $1: //i")" + echo "${TO_ECHO_AND_MAKE_LINT_HAPPY}" + fi + fi +} + +alias_from_ip_link() +{ + case "${UNAME}" in + Linux) + ip link show "$1" | grep -e "^[[:space:]]*alias[[:space:]]" | sed -e 's/^[[:space:]]*alias //' + ;; + BSD) + if [ "$(ifconfig "$1" | grep 'description:' | head -n 1 | cut -d: -f 2- | wc -l)" -eq 1 ]; then + ifconfig "$1" | grep 'description:' | head -n 1 | cut -d: -f 2- | sed 's/^ //' + else + echo "$1" + fi + ;; + *) echo "$1" ;; + esac +} + +IFACE_ID="$(interface_id)" +[ "$IFACE_ID" ] || exit + +IFACE="$(interface_name "$IFACE_ID")" + +VALUE= +# we first try to get alias from interface config file +[ "$VALUE" ] || VALUE="$(alias_from_interfaces_config_file "$IFACE")" +# then from ip link show $IFACE output +[ "$VALUE" ] || VALUE="$(alias_from_ip_link "$IFACE")" + +echo "${BASE}.${IFACE_ID}" +echo "string" +echo "$VALUE" +exit 0 From fa4d96c9cef28f8c1b15b5a214757d0a0f67f7a6 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 18 Jan 2024 02:09:24 +0100 Subject: [PATCH 274/332] replace nan with 0 (#505) * replace nan with 0 replace nan with 0 of pool has a sice of zero * . --- snmp/dhcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/dhcp.py b/snmp/dhcp.py index a43b38760..b95747722 100755 --- a/snmp/dhcp.py +++ b/snmp/dhcp.py @@ -121,7 +121,7 @@ "network": p[0], "max": p[1], "cur": p[2], - "percent": p[3], + "percent": 0 if p[3] == "nan" else p[3], } ) continue From f8a8dc8143b9b758699b2db454a015300b7e9da2 Mon Sep 17 00:00:00 2001 From: tevkar Date: Thu, 22 Feb 2024 16:27:36 +0100 Subject: [PATCH 275/332] Deliver output for a specific memcached instance (#504) To fix issues around 'no memcache output'. Associated with the relevant branch for librenms. --- snmp/memcached | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/snmp/memcached b/snmp/memcached index f0d7844ee..c8f79d07e 100755 --- a/snmp/memcached +++ b/snmp/memcached @@ -11,12 +11,16 @@ if (! class_exists('Memcached')) { exit; } +$server='localhost'; +$port=11211; $m = new Memcached(); -$m->addServer('localhost', 11211); +$m->addServer($server, $port); echo json_encode(array( - 'data' => $m->getStats(), + // 'data' => $m->getStats(), + 'data' => ($m->getStats())["$server:$port"], 'error' => $m->getLastErrorCode(), 'errorString' => $m->getLastErrorMessage(), 'version' => '1.1', )); + From a729c36e51591db9b2bea1e0440730d98f36a67e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Bouynot?= Date: Thu, 22 Feb 2024 16:32:24 +0100 Subject: [PATCH 276/332] Fix for systems with more than 4 GPU and recent nvidia-smi version (#506) --- snmp/nvidia | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/snmp/nvidia b/snmp/nvidia index 8bb900f35..0495dc78a 100644 --- a/snmp/nvidia +++ b/snmp/nvidia @@ -17,14 +17,13 @@ sed='/usr/bin/env sed' # 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3 $nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' -lines=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l) +gpu=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l) # if we are less than 5 then all GPUs were printed -if [ "$lines" -lt 5 ]; then +if [ "$gpu" -lt 5 ]; then exit 0; fi -gpu=5 loop=1 while [ $loop -eq 1 ] do From 7a50deb3989955863205c0a8ef03c5e826ec03f3 Mon Sep 17 00:00:00 2001 From: Peca Nesovanovic <59750439+Npeca75@users.noreply.github.com> Date: Mon, 26 Feb 2024 17:48:21 +0100 Subject: [PATCH 277/332] [ups-nut] Add temperature readout (#508) * Add temperature readout expose Battery Temperature value * Update ups-nut.sh --- snmp/ups-nut.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index e8dd3a824..b75580a4b 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -39,3 +39,7 @@ do fi done +UPSTEMP="ups\.temperature: [0-9.]+" +OUT=$(echo "$TMP" | grep -Eo "$UPSTEMP" | awk '{print $2}' | LANG=C sort | head -n 1) +[ -n "$OUT" ] && echo "$OUT" || echo "Unknown" + From 109961ac84cba83826ccc1834e29e58f946ef90b Mon Sep 17 00:00:00 2001 From: Anton Lundin Date: Tue, 27 Feb 2024 12:52:14 +0100 Subject: [PATCH 278/332] Add a plain bash version of memcached helper This is for servers which don't have php installed. --- snmp/memcached.sh | 49 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100755 snmp/memcached.sh diff --git a/snmp/memcached.sh b/snmp/memcached.sh new file mode 100755 index 000000000..627a2f730 --- /dev/null +++ b/snmp/memcached.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +declare -A stats + +exec 200<>/dev/tcp/localhost/11211 +echo "stats" >&200 +echo "quit" >&200 + +while read -r pre var val ; do + if [ "$pre" = "END" ] ; then + break + elif [ "$pre" = "STAT" ] ; then + val="${val/$'\r'/}" + if [ "$var" = "rusage_system" ] || [ "$var" = "rusage_user" ] ; then + val=$(bc -l <<< "scale=0 ; ($val * 1000)/1") + var+="_microseconds" + fi + stats["$var"]=$val + fi +done <&200 + +exec 200>&- + +cat </dev/null ; then + #echo -nE "s:${#var}:\"$var\";i:$val;" + echo "\"$var\": $val," + else + #echo -nE "s:${#var}:\"$var\";s:${#val}:\"$val\";" + echo "\"$var\": \"$val\"," + fi +done +echo '"dummy":"value"' + +cat < Date: Sat, 2 Mar 2024 09:31:40 -0600 Subject: [PATCH 279/332] initial nfs stuff for freebsd done --- snmp/nfs | 622 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 622 insertions(+) create mode 100755 snmp/nfs diff --git a/snmp/nfs b/snmp/nfs new file mode 100755 index 000000000..93e88ceca --- /dev/null +++ b/snmp/nfs @@ -0,0 +1,622 @@ +#!/usr/bin/env perl + +## +## +## General Notes +## +## +# +# FreeBSD used as the design basis given better stats produced and as well +# as actually documented. + +### +### +### Linux Notes +### +### +# +# What the following map to if if there is a FreeBSD equivalent is not clear. +# +# fs_locations +# test_stateid +# fsid_present +# open_conf +# confirm +# null + +use strict; +use warnings; +use Getopt::Std; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; + +#the version of returned data +my $VERSION = 1; + +my $pretty; +my $cache_base = '/var/cache/nfs.json'; +my $write; +my $compress = 1; +my $version; +my $help; +GetOptions( + p => \$pretty, + 'b=s' => \$compress, + 'o=s' => \$cache_base, + 'w' => \$write, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + is_client => 0, + is_server => 0, + mounts => [], + mounted_by => [], + stats => { + client_rpc_null => 0, + client_rpc_Getattr => 0, + client_rpc_Setattr => 0, + client_rpc_Lookup => 0, + client_rpc_Readlink => 0, + client_rpc_Read => 0, + client_rpc_Write => 0, + client_rpc_Create => 0, + client_rpc_Remove => 0, + client_rpc_Rename => 0, + client_rpc_Link => 0, + client_rpc_Symlink => 0, + client_rpc_Mkdir => 0, + client_rpc_Rmdir => 0, + client_rpc_Readdir => 0, + client_rpc_RdirPlus => 0, + client_rpc_Access => 0, + client_rpc_Mknod => 0, + client_rpc_Fsstat => 0, + client_rpc_FSinfo => 0, + client_rpc_pathConf => 0, + client_rpc_Commit => 0, + client_rpc_SetClId => 0, + client_rpc_SetClIdCf => 0, + client_rpc_Lock => 0, + client_rpc_LockT => 0, + client_rpc_LockU => 0, + client_rpc_Open => 0, + client_rpc_OpenCfr => 0, + client_rpc_OpenDownGr => 0, + client_rpc_Close => 0, + client_rpc_RelLckOwn => 0, + client_rpc_FreeStateID => 0, + client_rpc_PutRootFH => 0, + client_rpc_DelegRet => 0, + client_rpc_GetAcl => 0, + client_rpc_SetAcl => 0, + client_rpc_ExchangeId => 0, + client_rpc_CreateSess => 0, + client_rpc_DestroySess => 0, + client_rpc_DestroyClId => 0, + client_rpc_LayoutGet => 0, + client_rpc_GetDevInfo => 0, + client_rpc_LayoutCommit => 0, + client_rpc_LayoutReturn => 0, + client_rpc_ReclaimCompl => 0, + client_rpc_ReadDataS => 0, + client_rpc_WriteDataS => 0, + client_rpc_CommitDataS => 0, + client_rpc_OpenLayout => 0, + client_rpc_CreateLayout => 0, + client_rpc_BindConnSess => 0, + client_rpc_LookupOpen => 0, + client_rpc_IOAdvise => 0, + client_rpc_Allocate => 0, + client_rpc_Copy => 0, + client_rpc_Seek => 0, + client_rpc_SeekDataS => 0, + client_rpc_GetExtattr => 0, + client_rpc_SetExtattr => 0, + client_rpc_RmExtattr => 0, + client_rpc_ListExtattr => 0, + client_rpc_Deallocate => 0, + client_rpc_LayoutError => 0, + client_OpenOwner => 0, + client_Opens => 0, + client_LockOwner => 0, + client_Locks => 0, + client_Delegs => 0, + client_LocalOwn => 0, + client_LocalOpen => 0, + client_LocalLown => 0, + client_LocalLock => 0, + client_Layouts => 0, + client_rpc_info_TimedOut => 0, + client_rpc_info_Invalid => 0, + client_rpc_info_X_Replies => 0, + client_rpc_info_Retries => 0, + client_rpc_info_Requests => 0, + client_cache_Attr_Hits => 0, + client_cache_Attr_Misses => 0, + client_cache_Lkup_Hits => 0, + client_cache_Lkup_Misses => 0, + client_cache_BioR_Hits => 0, + client_cache_BioR_Misses => 0, + client_cache_BioW_Hits => 0, + client_cache_BioW_Misses => 0, + client_cache_BioRL_Hits => 0, + client_cache_BioRL_Misses => 0, + client_cache_BioD_Hits => 0, + client_cache_BioD_Misses => 0, + client_cache_DirE_Hits => 0, + client_cache_DirE_Misses => 0, + server_Getattr => 0, + server_Setattr => 0, + server_Lookup => 0, + server_Readlink => 0, + server_Read => 0, + server_Write => 0, + server_Create => 0, + server_Remove => 0, + server_Rename => 0, + server_Link => 0, + server_Symlink => 0, + server_Mkdir => 0, + server_Rmdir => 0, + server_Readdir => 0, + server_RdirPlus => 0, + server_Access => 0, + server_Mknod => 0, + server_Fsstat => 0, + server_FSinfo => 0, + server_pathConf => 0, + server_Commit => 0, + server_LookupP => 0, + server_SetClId => 0, + server_SetClIdCf => 0, + server_Open => 0, + server_OpenAttr => 0, + server_OpenDwnGr => 0, + server_OpenCfrm => 0, + server_DelePurge => 0, + server_DelRet => 0, + server_GetFH => 0, + server_Lock => 0, + server_LockT => 0, + server_LockU => 0, + server_Close => 0, + server_Verify => 0, + server_NVerify => 0, + server_PutFH => 0, + server_PutPubFH => 0, + server_PutRootFH => 0, + server_Renew => 0, + server_RestoreFH => 0, + server_SaveFH => 0, + server_Secinfo => 0, + server_RelLockOwn => 0, + server_V4Create => 0, + server_BackChannelCt => 0, + server_BindConnToSes => 0, + server_ExchangeID => 0, + server_CreateSess => 0, + server_DestroySess => 0, + server_FreeStateID => 0, + server_GetDirDeleg => 0, + server_GetDevInfo => 0, + server_GetDevList => 0, + server_layoutCommit => 0, + server_LayoutGet => 0, + server_LayoutReturn => 0, + server_GetDirDeleg => 0, + server_GetDevInfo => 0, + server_GetDevList => 0, + server_layoutCommit => 0, + server_LayoutGet => 0, + server_LayoutReturn => 0, + server_SecInfNoName => 0, + server_Sequence => 0, + server_SetSSV => 0, + server_TestStateID => 0, + server_WantDeleg => 0, + server_DestroyClId => 0, + server_ReclaimCompl => 0, + server_Allocate => 0, + server_Copy => 0, + server_CopyNotify => 0, + server_Deallocate => 0, + server_IOAdvise => 0, + server_LayoutError => 0, + server_LayoutStats => 0, + server_OffloadCncl => 0, + server_OffloadStat => 0, + server_ReadPlus => 0, + server_Seek => 0, + server_WriteSame => 0, + server_Clone => 0, + server_GetExtattr => 0, + server_SetExtattr => 0, + server_ListExtattr => 0, + server_RmExtattr => 0, + server_Clients => 0, + server_OpenOwner => 0, + server_Opens => 0, + server_LockOwner => 0, + server_Locks => 0, + server_Delegs => 0, + server_Layouts => 0, + server_cache_Inprog => 0, + 'server_cache_Non-idem' => 0, + server_cache_Misses => 0, + server_cache_CacheSize => 0, + server_cache_TCPPeak => 0, + } +}; + +#### +#### +#### handle getting stats for FreeBSD +#### +#### +if ( $^O eq 'freebsd' ) { + my $output_raw = `nfsstat -E`; + my @output_split = split( /\n/, $output_raw ); + my $previous_line = ''; + foreach my $line (@output_split) { + if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Setattr}, + $data->{stats}{client_rpc_Lookup}, $data->{stats}{client_rpc_Readlink}, + $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_Remove}, + $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, + $data->{stats}{client_rpc_Symlink}, $data->{stats}{client_rpc_Mkdir} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_Rmdir}, $data->{stats}{client_rpc_Readdir}, + $data->{stats}{client_rpc_RdirPlus}, $data->{stats}{client_rpc_Access}, + $data->{stats}{client_rpc_Mknod}, $data->{stats}{client_rpc_Fsstat} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_pathConf}, + $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_SetClId}, + $data->{stats}{client_rpc_SetClIdCf}, $data->{stats}{client_rpc_Lock} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, + $data->{stats}{client_rpc_Open}, $data->{stats}{client_rpc_OpenCfr} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /OpenDownGr\ +Close/ ) { + $line =~ s/^ +//; + ( $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Close}, ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_FreeStateID}, + $data->{stats}{client_rpc_PutRootFH}, $data->{stats}{client_rpc_DelegRet}, + $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, + $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_DestroyClId}, + $data->{stats}{client_rpc_LayoutGet}, $data->{stats}{client_rpc_GetDevInfo} + ) = split( / +/m, $line ); + } elsif ( + $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ ) + { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_LayoutCommit}, $data->{stats}{client_rpc_LayoutReturn}, + $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_ReadDataS}, + $data->{stats}{client_rpc_WriteDataS}, $data->{stats}{client_rpc_CommitDataS} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_CreateLayout}, + $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_LookupOpen} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_IOAdvise}, $data->{stats}{client_rpc_Allocate}, + $data->{stats}{client_rpc_Copy}, $data->{stats}{client_rpc_Seek}, + $data->{stats}{client_rpc_SeekDataS}, $data->{stats}{client_rpc_GetExtattr} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_SetExtattr}, $data->{stats}{client_rpc_RmExtattr}, + $data->{stats}{client_rpc_ListExtattr}, $data->{stats}{client_rpc_Deallocate}, + $data->{stats}{client_rpc_LayoutError} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_OpenOwner}, $data->{stats}{client_Opens}, $data->{stats}{client_LockOwner}, + $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LockOwner} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_LocalOpen}, $data->{stats}{client_LocalLown}, + $data->{stats}{client_LocalLock}, $data->{stats}{client_Layouts} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_info_TimedOut}, $data->{stats}{client_rpc_info_Invalid}, + $data->{stats}{client_rpc_info_X_Replies}, $data->{stats}{client_rpc_info_Retries}, + $data->{stats}{client_rpc_info_Requests} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_cache_Attr_Hits}, $data->{stats}{client_cache_Attr_Misses}, + $data->{stats}{client_cache_Lkup_Hits}, $data->{stats}{client_cache_Lkup_Misses} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_cache_BioR_Hits}, $data->{stats}{client_cache_BioR_Misses}, + $data->{stats}{client_cache_BioW_Hits}, $data->{stats}{client_cache_BioW_Misses} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /BioRL\ Hits\ +BioRL\ Misse\ +BioD\ Hits\ +BioD\ Misses/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_cache_BioRL_Hits}, $data->{stats}{client_cache_BioRL_Misses}, + $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ ) { + $line =~ s/^ +//; + ( $data->{stats}{client_cache_DirE_Hits}, $data->{stats}{client_cache_DirE_Misses}, ) + = split( / +/m, $line ); + } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Getattr}, $data->{stats}{server_Setattr}, $data->{stats}{server_Lookup}, + $data->{stats}{server_Readlink}, $data->{stats}{server_Read}, $data->{stats}{server_Write}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Create}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, + $data->{stats}{server_Link}, $data->{stats}{server_Symlink}, $data->{stats}{server_Mkdir}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Rmdir}, $data->{stats}{server_Readdir}, $data->{stats}{server_RdirPlus}, + $data->{stats}{server_Access}, $data->{stats}{server_Mknod}, $data->{stats}{server_Fsstat}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_FSinfo}, $data->{stats}{server_pathConf}, $data->{stats}{server_Commit}, + $data->{stats}{server_LookupP}, $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, $data->{stats}{server_OpenDwnGr}, + $data->{stats}{server_OpenCfrm}, $data->{stats}{server_DelePurge}, $data->{stats}{server_DelRet}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_GetFH}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, + $data->{stats}{server_LockU}, $data->{stats}{server_Close}, $data->{stats}{server_Verify}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_NVerify}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, + $data->{stats}{server_PutRootFH}, $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, + $data->{stats}{server_RelLockOwn}, $data->{stats}{server_V4Create} + ) = split( / +/m, $line ); + } elsif ( $previous_line + =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ ) + { + $line =~ s/^ +//; + ( + $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, + $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, + $data->{stats}{server_DestroySess}, $data->{stats}{server_FreeStateID}, + ) = split( / +/m, $line ); + } elsif ( + $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ ) + { + $line =~ s/^ +//; + ( + $data->{stats}{server_GetDirDeleg}, $data->{stats}{server_GetDevInfo}, + $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, + $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, + $data->{stats}{server_SetSSV}, $data->{stats}{server_TestStateID}, + $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /ReclaimCompl/ ) { + $line =~ s/^ +//; + ( $data->{stats}{server_ReclaimCompl} ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Allocate}, $data->{stats}{server_Copy}, + $data->{stats}{server_CopyNotify}, $data->{stats}{server_Deallocate}, + $data->{stats}{server_IOAdvise}, $data->{stats}{server_LayoutError}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /LayoutStats\ +OffloadCncl\ +OffloadStat\ +ReadPlus\ +Seek\ +WriteSame/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_LayoutStats}, $data->{stats}{server_OffloadCncl}, + $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, + $data->{stats}{server_Seek}, $data->{stats}{server_WriteSame}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Clone}, $data->{stats}{server_GetExtattr}, + $data->{stats}{server_SetExtattr}, $data->{stats}{server_ListExtattr}, + $data->{stats}{server_RmExtattr} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Clients}, $data->{stats}{server_OpenOwner}, $data->{stats}{server_Opens}, + $data->{stats}{server_LockOwner}, $data->{stats}{server_Locks}, $data->{stats}{server_Delegs}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /^ *Layouts *$/ ) { + $line =~ s/^ +//; + $line =~ s/ +$//; + $data->{stats}{server_Clients} = $line; + } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_cache_Inprog}, $data->{stats}{'server_cache_Non-idem'}, + $data->{stats}{server_cache_Misses}, $data->{stats}{server_cache_CacheSize}, + $data->{stats}{server_cache_TCPPeak} + ) = split( / +/m, $line ); + } + $previous_line = $line; + } ## end foreach my $line (@output_split) +} ## end if ( $^O eq 'freebsd' ) + +#### +#### +#### handle getting stats for Linux +#### +#### +if ( $^O eq 'linux' ) { + +} + +#### +#### +#### figure out if is a client and/or server +#### +#### +my @stat_keys = keys( %{ $data->{stats} } ); +foreach my $item (@stat_keys) { + if ($item=~/^client/ && $data->{stats}{$item} > 0) { + $data->{is_client}=1 + }elsif ($item=~/^server/ && $data->{stats}{$item} > 0) { + $data->{is_server}=1 + } +} + +#### +#### +#### if server, call showmount +#### +#### +if ($data->{is_server}) { + my $output_raw = `showmount -a`; + my @output_split = split( /\n/, $output_raw ); + foreach my $line (@output_split) { + if ($line=~/\:\//) { + my ($host, $path)=split(/\:\//, $line); + push(@{$data->{mounted_by}}, {host=>$host, path=>'/'.$path}); + } + } +} + +#### +#### +#### if client, call nfsstat -m +#### +#### +if ($data->{is_client}) { + if ($^O eq 'freebsd') { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); + my $previous_line=''; + my $host; + my $rpath; + my $lpath; + foreach my $line (@output_split) { + if ($line =~ /\:\/.* on \//) { + $host=$line; + $host=~s/\:\/.*$//; + + $rpath=$line; + $rpath=~s/\ on\ \/.*$//; + $rpath=~s/^.*\:\///; + $rpath='/'.$rpath; + + $lpath=$line; + $lpath=~s/^.*\:\/.*\ on \///; + $lpath='/'.$lpath; + }elsif ($line =~ /\,/ && defined($host) && defined($rpath) && defined($lpath) ) { + my @flags; + my %opts; + my @line_split=split(/\,/, $line); + foreach my $item (@line_split) { + if ($item =~ /\=/) { + my ($var, $val)=split(/\=/, $item); + $opts{$var}=$val; + }else { + push(@flags, $item); + } + } + push(@{$data->{mounted}}, { host=>$host, rpath=>$rpath, lpath=>$lpath,flags=>\@flags, opts=>\%opts }); + } + } + }elsif ($^O eq 'linux') { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); + } +} + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $j = JSON->new; +if ($pretty) { + $j->pretty(1); +} +print $j->encode($to_return); +if ( !$pretty ) { + print "\n"; +} From 9f2d148fef470943cac82d5cc36b41bcd0ef3cd6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 2 Mar 2024 09:43:48 -0600 Subject: [PATCH 280/332] remove Getopt::Std --- snmp/nfs | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/nfs b/snmp/nfs index 93e88ceca..1b0c4ac77 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -26,7 +26,6 @@ use strict; use warnings; -use Getopt::Std; use Getopt::Long; use File::Slurp; use MIME::Base64; From 824144e9d54486fa3bd8605bee3e5b035bba4d6a Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 2 Mar 2024 14:55:59 -0600 Subject: [PATCH 281/332] nfsstat -m works for Linux now as well --- snmp/nfs | 121 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 80 insertions(+), 41 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 1b0c4ac77..3a59ead22 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -36,6 +36,9 @@ use JSON; #the version of returned data my $VERSION = 1; +# ensure sbin is in the path +$ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin'; + my $pretty; my $cache_base = '/var/cache/nfs.json'; my $write; @@ -537,10 +540,10 @@ if ( $^O eq 'linux' ) { #### my @stat_keys = keys( %{ $data->{stats} } ); foreach my $item (@stat_keys) { - if ($item=~/^client/ && $data->{stats}{$item} > 0) { - $data->{is_client}=1 - }elsif ($item=~/^server/ && $data->{stats}{$item} > 0) { - $data->{is_server}=1 + if ( $item =~ /^client/ && $data->{stats}{$item} > 0 ) { + $data->{is_client} = 1; + } elsif ( $item =~ /^server/ && $data->{stats}{$item} > 0 ) { + $data->{is_server} = 1; } } @@ -549,63 +552,99 @@ foreach my $item (@stat_keys) { #### if server, call showmount #### #### -if ($data->{is_server}) { - my $output_raw = `showmount -a`; - my @output_split = split( /\n/, $output_raw ); +if ( $data->{is_server} ) { + my $output_raw = `showmount -a`; + my @output_split = split( /\n/, $output_raw ); foreach my $line (@output_split) { - if ($line=~/\:\//) { - my ($host, $path)=split(/\:\//, $line); - push(@{$data->{mounted_by}}, {host=>$host, path=>'/'.$path}); + if ( $line =~ /\:\// ) { + my ( $host, $path ) = split( /\:\//, $line ); + push( @{ $data->{mounted_by} }, { host => $host, path => '/' . $path } ); } } -} +} ## end if ( $data->{is_server} ) #### #### #### if client, call nfsstat -m #### #### -if ($data->{is_client}) { - if ($^O eq 'freebsd') { - my $output_raw = `nfsstat -m`; - my @output_split = split( /\n/, $output_raw ); - my $previous_line=''; +if ( $data->{is_client} ) { + if ( $^O eq 'freebsd' ) { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); my $host; my $rpath; my $lpath; foreach my $line (@output_split) { - if ($line =~ /\:\/.* on \//) { - $host=$line; - $host=~s/\:\/.*$//; + if ( $line =~ /\:\/.* on \// ) { + $host = $line; + $host =~ s/\:\/.*$//; - $rpath=$line; - $rpath=~s/\ on\ \/.*$//; - $rpath=~s/^.*\:\///; - $rpath='/'.$rpath; + $rpath = $line; + $rpath =~ s/\ on\ \/.*$//; + $rpath =~ s/^.*\:\///; + $rpath = '/' . $rpath; - $lpath=$line; - $lpath=~s/^.*\:\/.*\ on \///; - $lpath='/'.$lpath; - }elsif ($line =~ /\,/ && defined($host) && defined($rpath) && defined($lpath) ) { + $lpath = $line; + $lpath =~ s/^.*\:\/.*\ on \///; + $lpath = '/' . $lpath; + } elsif ( $line =~ /\,/ && defined($host) && defined($rpath) && defined($lpath) ) { my @flags; my %opts; - my @line_split=split(/\,/, $line); + my @line_split = split( /\,/, $line ); foreach my $item (@line_split) { - if ($item =~ /\=/) { - my ($var, $val)=split(/\=/, $item); - $opts{$var}=$val; - }else { - push(@flags, $item); + if ( $item =~ /\=/ ) { + my ( $var, $val ) = split( /\=/, $item ); + $opts{$var} = $val; + } else { + push( @flags, $item ); } } - push(@{$data->{mounted}}, { host=>$host, rpath=>$rpath, lpath=>$lpath,flags=>\@flags, opts=>\%opts }); - } - } - }elsif ($^O eq 'linux') { - my $output_raw = `nfsstat -m`; - my @output_split = split( /\n/, $output_raw ); - } -} + push( + @{ $data->{mounted} }, + { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } + ); + } ## end elsif ( $line =~ /\,/ && defined($host) && defined...) + } ## end foreach my $line (@output_split) + } elsif ( $^O eq 'linux' ) { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); + my $host; + my $rpath; + my $lpath; + foreach my $line (@output_split) { + if ( $line =~ /^\/.*\ from\ .*\:\/.*/ ) { + $lpath = $line; + $lpath =~ s/\ from\ .*$//; + + $host = $line; + $host =~ s/.*\ from\ //; + $host =~ s/\:\/.*$//; + + $rpath = $line; + $rpath =~ s/^.*\:\///; + $rpath = '/' . $rpath; + } elsif ( $line =~ /Flags\:[\ \t]+/ && defined($lpath) && defined($host) && defined($rpath) ) { + $line =~ s/^.*Flags\:[\ \t]+//; + my @flags; + my %opts; + my @line_split = split( /\,/, $line ); + foreach my $item (@line_split) { + if ( $item =~ /\=/ ) { + my ( $var, $val ) = split( /\=/, $item ); + $opts{$var} = $val; + } else { + push( @flags, $item ); + } + } + push( + @{ $data->{mounted} }, + { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } + ); + } ## end elsif ( $line =~ /Flags\:[\ \t]+/ && defined(...)) + } ## end foreach my $line (@output_split) + } ## end elsif ( $^O eq 'linux' ) +} ## end if ( $data->{is_client} ) #add the data has to the return hash $to_return->{data} = $data; From 5861eb8d7e362defc23295150a0e130ef757bafd Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 3 Mar 2024 12:39:43 -0600 Subject: [PATCH 282/332] now works for Linux and FreeBSD --- snmp/nfs | 202 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 198 insertions(+), 4 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 3a59ead22..3adad5bc9 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -75,9 +75,23 @@ my $to_return = { my $data = { is_client => 0, is_server => 0, + os => $^O, mounts => [], mounted_by => [], stats => { + client_rpc_clone => 0, + client_rpc_layoutstats => 0, + client_rpc_getdevicelist => 0, + client_rpc_test_stateid => 0, + client_rpc_secinfo_no => 0, + client_rpc_get_lease_time => 0, + client_rpc_sequence => 0, + client_rpc_fsid_present => 0, + client_rpc_secinfo => 0, + client_rpc_fs_locations => 0, + client_rpc_server_caps => 0, + client_rpc_renew => 0, + client_rpc_confirm => 0, client_rpc_null => 0, client_rpc_Getattr => 0, client_rpc_Setattr => 0, @@ -271,6 +285,16 @@ my $data = { server_cache_Misses => 0, server_cache_CacheSize => 0, server_cache_TCPPeak => 0, + server_calls => 0, + server_badcalls => 0, + server_badfmt => 0, + server_badauth => 0, + server_badclnt => 0, + server_null => 0, + server_compound => 0, + 'server_op0-unused' => 0, + 'server_op1-unused' => 0, + 'server_op2-future' => 0, } }; @@ -530,8 +554,178 @@ if ( $^O eq 'freebsd' ) { #### #### if ( $^O eq 'linux' ) { - -} + my $output_raw = `nfsstat | sed 's/[0-9\.]*\%//g'`; + my @output_split = split( /\n/, $output_raw ); + my $previous_line = ''; + foreach my $line (@output_split) { + if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ ) { + ( + $data->{stats}{server_calls}, $data->{stats}{'server_badcalls'}, $data->{stats}{server_badfmt}, + $data->{stats}{server_badauth}, $data->{stats}{server_badclnt}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /null\ +compound/ ) { + ( $data->{stats}{server_null}, $data->{stats}{server_compound}, ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ ) { + ( + $data->{stats}{'server_op0-unused'}, $data->{stats}{'server_op1-unused'}, + $data->{stats}{'server_op2-future'}, $data->{stats}{server_Access}, + $data->{stats}{server_Close}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ ) { + ( + $data->{stats}{server_Commit}, $data->{stats}{server_Create}, $data->{stats}{server_DelePurge}, + $data->{stats}{server_Delegs}, $data->{stats}{server_Getattr}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ ) { + ( + $data->{stats}{server_GetFH}, $data->{stats}{server_Link}, $data->{stats}{server_Lock}, + $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ ) { + ( + $data->{stats}{server_Lookup}, $data->{stats}{server_LookupP}, $data->{stats}{server_NVerify}, + $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ ) { + ( + $data->{stats}{server_OpenCfrm}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_PutFH}, + $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ ) { + ( + $data->{stats}{server_Read}, $data->{stats}{server_Readdir}, $data->{stats}{server_Readlink}, + $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ ) { + ( + $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, $data->{stats}{server_SaveFH}, + $data->{stats}{server_Secinfo}, $data->{stats}{server_Setattr}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ ) { + ( + $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, + $data->{stats}{server_Verify}, $data->{stats}{server_Write}, + $data->{stats}{server_RelLockOwn}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ ) { + ( + $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, + $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, + $data->{stats}{server_DestroySess}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ ) { + ( + $data->{stats}{server_FreeStateID}, $data->{stats}{server_GetDirDeleg}, + $data->{stats}{server_GetDevInfo}, $data->{stats}{server_GetDevList}, + $data->{stats}{server_layoutCommit}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ ) { + ( + $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, + $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, + $data->{stats}{server_SetSSV}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ ) { + ( + $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, + $data->{stats}{server_DestroyClId}, $data->{stats}{server_ReclaimCompl}, + $data->{stats}{server_Allocate}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ ) { + ( + $data->{stats}{server_Copy}, $data->{stats}{server_CopyNotify}, + $data->{stats}{server_Deallocate}, $data->{stats}{server_IOAdvise}, + $data->{stats}{server_LayoutError}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ ) { + ( + $data->{stats}{server_Layouts}, $data->{stats}{server_OffloadCncl}, + $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, + $data->{stats}{server_Seek}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /write_same/ ) { + ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ ) { + ( + $data->{stats}{client_rpc_info_Requests}, + $data->{stats}{client_rpc_info_Retries}, + $data->{stats}{client_rpc_info_X_Replies} + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ ) { + ( + $data->{stats}{client_rpc_null}, $data->{stats}{client_rpc_Read}, + $data->{stats}{client_rpc_Write}, $data->{stats}{client_rpc_Commit}, + $data->{stats}{client_rpc_Open}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ ) { + ( + $data->{stats}{client_rpc_OpenCfr}, $data->{stats}{client_rpc_OpenLayout}, + $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Commit}, + $data->{stats}{client_rpc_Open}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ ) { + ( + $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_renew}, + $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_confirm}, + $data->{stats}{client_rpc_Lock}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ ) { + ( + $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, + $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Getattr}, + $data->{stats}{client_rpc_Lookup}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ ) { + ( + $data->{stats}{client_rpc_LookOpen}, $data->{stats}{client_rpc_Remove}, + $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, + $data->{stats}{client_rpc_Symlink}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ ) { + ( + $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_pathConf}, + $data->{stats}{client_rpc_statfs}, $data->{stats}{client_rpc_Readlink}, + $data->{stats}{client_rpc_Readlink}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ ) { + ( + $data->{stats}{client_rpc_server_caps}, $data->{stats}{client_rpc_DelegRet}, + $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl}, + $data->{stats}{client_rpc_fs_locations}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ ) { + ( + $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_secinfo}, + $data->{stats}{client_rpc_fsid_present}, $data->{stats}{client_rpc_ExchangeId}, + $data->{stats}{client_rpc_CreateSess}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ ) { + ( + $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_sequence}, + $data->{stats}{client_rpc_get_lease_time}, $data->{stats}{client_rpc_ReclaimCompl}, + $data->{stats}{client_rpc_LayoutGet}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ ) { + ( + $data->{stats}{client_rpc_GetDevInfo}, $data->{stats}{client_rpc_LayoutCommit}, + $data->{stats}{client_rpc_LayoutReturn}, $data->{stats}{client_rpc_secinfo_no}, + $data->{stats}{client_rpc_test_stateid}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ ) { + ( + $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_getdevicelist}, + $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_DestroyClId}, + $data->{stats}{client_rpc_Seek}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ ) { + ( + $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Deallocate}, + $data->{stats}{client_rpc_layoutstats}, $data->{stats}{client_rpc_clone}, + ) = split( /[\ \t]+/m, $line ); + } + $previous_line = $line; + } ## end foreach my $line (@output_split) +} ## end if ( $^O eq 'linux' ) #### #### @@ -601,7 +795,7 @@ if ( $data->{is_client} ) { } } push( - @{ $data->{mounted} }, + @{ $data->{mounts} }, { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } ); } ## end elsif ( $line =~ /\,/ && defined($host) && defined...) @@ -638,7 +832,7 @@ if ( $data->{is_client} ) { } } push( - @{ $data->{mounted} }, + @{ $data->{mounts} }, { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } ); } ## end elsif ( $line =~ /Flags\:[\ \t]+/ && defined(...)) From 052c7c59142f3d41c6fea3a43588c6f2e133cdc5 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 3 Mar 2024 13:22:54 -0600 Subject: [PATCH 283/332] add nfs --- snmp/nfs | 110 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 98 insertions(+), 12 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 3adad5bc9..82d9c449e 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -1,5 +1,59 @@ #!/usr/bin/env perl +=head1 NAME + +nfs - LibreNMS JSON style SNMP extend for NFS monitoring + +=head1 VERSION + +0.0.1 + +=head1 SYNOPSIS + +nfs [B<-w>] [B<-b>] [B<-o> ] + +nfs --help|-h + +nfs --version|-v + +=head1 SNMPD CONFIG + + extend nfs /etc/snmp/extends/nfs -b + +or if using cron... + + extend nfs cat /var/cache/nfs.json.snmp + +=head1 DESCRIPTION + +Uses showmount and nfsstat to gather information for the OSes below for NFS. + + FreeBSD + Linux + +=head1 FLAGS + +=head2 -w + +Write the results out. + +=head2 -b + +Print out the compressed data if GZip+Base64 is smaller. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/nfs.json', +meaning it will be written out to the two locations. + + /var/cache/nfs.json + /var/cache/nfs.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=cut + ## ## ## General Notes @@ -42,14 +96,13 @@ $ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin'; my $pretty; my $cache_base = '/var/cache/nfs.json'; my $write; -my $compress = 1; +my $compress; my $version; my $help; GetOptions( - p => \$pretty, - 'b=s' => \$compress, 'o=s' => \$cache_base, - 'w' => \$write, + w => \$write, + b => \$compress, v => \$version, version => \$version, h => \$help, @@ -844,11 +897,44 @@ if ( $data->{is_client} ) { $to_return->{data} = $data; #finally render the JSON -my $j = JSON->new; -if ($pretty) { - $j->pretty(1); -} -print $j->encode($to_return); -if ( !$pretty ) { - print "\n"; -} +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + write_file( $cache_base . '.snmp', $raw_json ); + } else { + write_file( $cache_base . '.snmp', $compressed ); + $print_compressed = 1; + } + + if ( $compress && $print_compressed ) { + print $compressed; + } else { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + print $raw_json; + } else { + print $compressed; + } +} ## end else [ if ($write) ] From 29bccd464ebc38521710c5192aff63950a5573ec Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 7 Mar 2024 19:20:31 -0600 Subject: [PATCH 284/332] now null for not found and more cleanup --- snmp/nfs | 604 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 316 insertions(+), 288 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 82d9c449e..a82159a24 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -132,222 +132,222 @@ my $data = { mounts => [], mounted_by => [], stats => { - client_rpc_clone => 0, - client_rpc_layoutstats => 0, - client_rpc_getdevicelist => 0, - client_rpc_test_stateid => 0, - client_rpc_secinfo_no => 0, - client_rpc_get_lease_time => 0, - client_rpc_sequence => 0, - client_rpc_fsid_present => 0, - client_rpc_secinfo => 0, - client_rpc_fs_locations => 0, - client_rpc_server_caps => 0, - client_rpc_renew => 0, - client_rpc_confirm => 0, - client_rpc_null => 0, - client_rpc_Getattr => 0, - client_rpc_Setattr => 0, - client_rpc_Lookup => 0, - client_rpc_Readlink => 0, - client_rpc_Read => 0, - client_rpc_Write => 0, - client_rpc_Create => 0, - client_rpc_Remove => 0, - client_rpc_Rename => 0, - client_rpc_Link => 0, - client_rpc_Symlink => 0, - client_rpc_Mkdir => 0, - client_rpc_Rmdir => 0, - client_rpc_Readdir => 0, - client_rpc_RdirPlus => 0, - client_rpc_Access => 0, - client_rpc_Mknod => 0, - client_rpc_Fsstat => 0, - client_rpc_FSinfo => 0, - client_rpc_pathConf => 0, - client_rpc_Commit => 0, - client_rpc_SetClId => 0, - client_rpc_SetClIdCf => 0, - client_rpc_Lock => 0, - client_rpc_LockT => 0, - client_rpc_LockU => 0, - client_rpc_Open => 0, - client_rpc_OpenCfr => 0, - client_rpc_OpenDownGr => 0, - client_rpc_Close => 0, - client_rpc_RelLckOwn => 0, - client_rpc_FreeStateID => 0, - client_rpc_PutRootFH => 0, - client_rpc_DelegRet => 0, - client_rpc_GetAcl => 0, - client_rpc_SetAcl => 0, - client_rpc_ExchangeId => 0, - client_rpc_CreateSess => 0, - client_rpc_DestroySess => 0, - client_rpc_DestroyClId => 0, - client_rpc_LayoutGet => 0, - client_rpc_GetDevInfo => 0, - client_rpc_LayoutCommit => 0, - client_rpc_LayoutReturn => 0, - client_rpc_ReclaimCompl => 0, - client_rpc_ReadDataS => 0, - client_rpc_WriteDataS => 0, - client_rpc_CommitDataS => 0, - client_rpc_OpenLayout => 0, - client_rpc_CreateLayout => 0, - client_rpc_BindConnSess => 0, - client_rpc_LookupOpen => 0, - client_rpc_IOAdvise => 0, - client_rpc_Allocate => 0, - client_rpc_Copy => 0, - client_rpc_Seek => 0, - client_rpc_SeekDataS => 0, - client_rpc_GetExtattr => 0, - client_rpc_SetExtattr => 0, - client_rpc_RmExtattr => 0, - client_rpc_ListExtattr => 0, - client_rpc_Deallocate => 0, - client_rpc_LayoutError => 0, - client_OpenOwner => 0, - client_Opens => 0, - client_LockOwner => 0, - client_Locks => 0, - client_Delegs => 0, - client_LocalOwn => 0, - client_LocalOpen => 0, - client_LocalLown => 0, - client_LocalLock => 0, - client_Layouts => 0, - client_rpc_info_TimedOut => 0, - client_rpc_info_Invalid => 0, - client_rpc_info_X_Replies => 0, - client_rpc_info_Retries => 0, - client_rpc_info_Requests => 0, - client_cache_Attr_Hits => 0, - client_cache_Attr_Misses => 0, - client_cache_Lkup_Hits => 0, - client_cache_Lkup_Misses => 0, - client_cache_BioR_Hits => 0, - client_cache_BioR_Misses => 0, - client_cache_BioW_Hits => 0, - client_cache_BioW_Misses => 0, - client_cache_BioRL_Hits => 0, - client_cache_BioRL_Misses => 0, - client_cache_BioD_Hits => 0, - client_cache_BioD_Misses => 0, - client_cache_DirE_Hits => 0, - client_cache_DirE_Misses => 0, - server_Getattr => 0, - server_Setattr => 0, - server_Lookup => 0, - server_Readlink => 0, - server_Read => 0, - server_Write => 0, - server_Create => 0, - server_Remove => 0, - server_Rename => 0, - server_Link => 0, - server_Symlink => 0, - server_Mkdir => 0, - server_Rmdir => 0, - server_Readdir => 0, - server_RdirPlus => 0, - server_Access => 0, - server_Mknod => 0, - server_Fsstat => 0, - server_FSinfo => 0, - server_pathConf => 0, - server_Commit => 0, - server_LookupP => 0, - server_SetClId => 0, - server_SetClIdCf => 0, - server_Open => 0, - server_OpenAttr => 0, - server_OpenDwnGr => 0, - server_OpenCfrm => 0, - server_DelePurge => 0, - server_DelRet => 0, - server_GetFH => 0, - server_Lock => 0, - server_LockT => 0, - server_LockU => 0, - server_Close => 0, - server_Verify => 0, - server_NVerify => 0, - server_PutFH => 0, - server_PutPubFH => 0, - server_PutRootFH => 0, - server_Renew => 0, - server_RestoreFH => 0, - server_SaveFH => 0, - server_Secinfo => 0, - server_RelLockOwn => 0, - server_V4Create => 0, - server_BackChannelCt => 0, - server_BindConnToSes => 0, - server_ExchangeID => 0, - server_CreateSess => 0, - server_DestroySess => 0, - server_FreeStateID => 0, - server_GetDirDeleg => 0, - server_GetDevInfo => 0, - server_GetDevList => 0, - server_layoutCommit => 0, - server_LayoutGet => 0, - server_LayoutReturn => 0, - server_GetDirDeleg => 0, - server_GetDevInfo => 0, - server_GetDevList => 0, - server_layoutCommit => 0, - server_LayoutGet => 0, - server_LayoutReturn => 0, - server_SecInfNoName => 0, - server_Sequence => 0, - server_SetSSV => 0, - server_TestStateID => 0, - server_WantDeleg => 0, - server_DestroyClId => 0, - server_ReclaimCompl => 0, - server_Allocate => 0, - server_Copy => 0, - server_CopyNotify => 0, - server_Deallocate => 0, - server_IOAdvise => 0, - server_LayoutError => 0, - server_LayoutStats => 0, - server_OffloadCncl => 0, - server_OffloadStat => 0, - server_ReadPlus => 0, - server_Seek => 0, - server_WriteSame => 0, - server_Clone => 0, - server_GetExtattr => 0, - server_SetExtattr => 0, - server_ListExtattr => 0, - server_RmExtattr => 0, - server_Clients => 0, - server_OpenOwner => 0, - server_Opens => 0, - server_LockOwner => 0, - server_Locks => 0, - server_Delegs => 0, - server_Layouts => 0, - server_cache_Inprog => 0, - 'server_cache_Non-idem' => 0, - server_cache_Misses => 0, - server_cache_CacheSize => 0, - server_cache_TCPPeak => 0, - server_calls => 0, - server_badcalls => 0, - server_badfmt => 0, - server_badauth => 0, - server_badclnt => 0, - server_null => 0, - server_compound => 0, - 'server_op0-unused' => 0, - 'server_op1-unused' => 0, - 'server_op2-future' => 0, + client_rpc_clone => undef, + client_rpc_layoutstats => undef, + client_rpc_getdevicelist => undef, + client_rpc_test_stateid => undef, + client_rpc_secinfo_no => undef, + client_rpc_get_lease_time => undef, + client_rpc_sequence => undef, + client_rpc_fsid_present => undef, + client_rpc_secinfo => undef, + client_rpc_fs_locations => undef, + client_rpc_server_caps => undef, + client_rpc_renew => undef, + client_rpc_confirm => undef, + client_rpc_null => undef, + client_rpc_Getattr => undef, + client_rpc_Setattr => undef, + client_rpc_Lookup => undef, + client_rpc_Readlink => undef, + client_rpc_Read => undef, + client_rpc_Write => undef, + client_rpc_Create => undef, + client_rpc_Remove => undef, + client_rpc_Rename => undef, + client_rpc_Link => undef, + client_rpc_Symlink => undef, + client_rpc_Mkdir => undef, + client_rpc_Rmdir => undef, + client_rpc_Readdir => undef, + client_rpc_RdirPlus => undef, + client_rpc_Access => undef, + client_rpc_Mknod => undef, + client_rpc_Fsstat => undef, + client_rpc_FSinfo => undef, + client_rpc_pathConf => undef, + client_rpc_Commit => undef, + client_rpc_SetClId => undef, + client_rpc_SetClIdCf => undef, + client_rpc_Lock => undef, + client_rpc_LockT => undef, + client_rpc_LockU => undef, + client_rpc_Open => undef, + client_rpc_OpenCfr => undef, + client_rpc_OpenDownGr => undef, + client_rpc_Close => undef, + client_rpc_RelLckOwn => undef, + client_rpc_FreeStateID => undef, + client_rpc_PutRootFH => undef, + client_rpc_DelegRet => undef, + client_rpc_GetAcl => undef, + client_rpc_SetAcl => undef, + client_rpc_ExchangeId => undef, + client_rpc_CreateSess => undef, + client_rpc_DestroySess => undef, + client_rpc_DestroyClId => undef, + client_rpc_LayoutGet => undef, + client_rpc_GetDevInfo => undef, + client_rpc_LayoutCommit => undef, + client_rpc_LayoutReturn => undef, + client_rpc_ReclaimCompl => undef, + client_rpc_ReadDataS => undef, + client_rpc_WriteDataS => undef, + client_rpc_CommitDataS => undef, + client_rpc_OpenLayout => undef, + client_rpc_CreateLayout => undef, + client_rpc_BindConnSess => undef, + client_rpc_LookupOpen => undef, + client_rpc_IOAdvise => undef, + client_rpc_Allocate => undef, + client_rpc_Copy => undef, + client_rpc_Seek => undef, + client_rpc_SeekDataS => undef, + client_rpc_GetExtattr => undef, + client_rpc_SetExtattr => undef, + client_rpc_RmExtattr => undef, + client_rpc_ListExtattr => undef, + client_rpc_Deallocate => undef, + client_rpc_LayoutError => undef, + client_OpenOwner => undef, + client_Opens => undef, + client_LockOwner => undef, + client_Locks => undef, + client_Delegs => undef, + client_LocalOwn => undef, + client_LocalOpen => undef, + client_LocalLown => undef, + client_LocalLock => undef, + client_Layouts => undef, + client_rpc_info_TimedOut => undef, + client_rpc_info_Invalid => undef, + client_rpc_info_X_Replies => undef, + client_rpc_info_Retries => undef, + client_rpc_info_Requests => undef, + client_cache_Attr_Hits => undef, + client_cache_Attr_Misses => undef, + client_cache_Lkup_Hits => undef, + client_cache_Lkup_Misses => undef, + client_cache_BioR_Hits => undef, + client_cache_BioR_Misses => undef, + client_cache_BioW_Hits => undef, + client_cache_BioW_Misses => undef, + client_cache_BioRL_Hits => undef, + client_cache_BioRL_Misses => undef, + client_cache_BioD_Hits => undef, + client_cache_BioD_Misses => undef, + client_cache_DirE_Hits => undef, + client_cache_DirE_Misses => undef, + server_Getattr => undef, + server_Setattr => undef, + server_Lookup => undef, + server_Readlink => undef, + server_Read => undef, + server_Write => undef, + server_Create => undef, + server_Remove => undef, + server_Rename => undef, + server_Link => undef, + server_Symlink => undef, + server_Mkdir => undef, + server_Rmdir => undef, + server_Readdir => undef, + server_RdirPlus => undef, + server_Access => undef, + server_Mknod => undef, + server_Fsstat => undef, + server_FSinfo => undef, + server_pathConf => undef, + server_Commit => undef, + server_LookupP => undef, + server_SetClId => undef, + server_SetClIdCf => undef, + server_Open => undef, + server_OpenAttr => undef, + server_OpenDwnGr => undef, + server_OpenCfrm => undef, + server_DelePurge => undef, + server_DelRet => undef, + server_GetFH => undef, + server_Lock => undef, + server_LockT => undef, + server_LockU => undef, + server_Close => undef, + server_Verify => undef, + server_NVerify => undef, + server_PutFH => undef, + server_PutPubFH => undef, + server_PutRootFH => undef, + server_Renew => undef, + server_RestoreFH => undef, + server_SaveFH => undef, + server_Secinfo => undef, + server_RelLockOwn => undef, + server_V4Create => undef, + server_BackChannelCt => undef, + server_BindConnToSes => undef, + server_ExchangeID => undef, + server_CreateSess => undef, + server_DestroySess => undef, + server_FreeStateID => undef, + server_GetDirDeleg => undef, + server_GetDevInfo => undef, + server_GetDevList => undef, + server_layoutCommit => undef, + server_LayoutGet => undef, + server_LayoutReturn => undef, + server_GetDirDeleg => undef, + server_GetDevInfo => undef, + server_GetDevList => undef, + server_layoutCommit => undef, + server_LayoutGet => undef, + server_LayoutReturn => undef, + server_SecInfNoName => undef, + server_Sequence => undef, + server_SetSSV => undef, + server_TestStateID => undef, + server_WantDeleg => undef, + server_DestroyClId => undef, + server_ReclaimCompl => undef, + server_Allocate => undef, + server_Copy => undef, + server_CopyNotify => undef, + server_Deallocate => undef, + server_IOAdvise => undef, + server_LayoutError => undef, + server_LayoutStats => undef, + server_OffloadCncl => undef, + server_OffloadStat => undef, + server_ReadPlus => undef, + server_Seek => undef, + server_WriteSame => undef, + server_Clone => undef, + server_GetExtattr => undef, + server_SetExtattr => undef, + server_ListExtattr => undef, + server_RmExtattr => undef, + server_Clients => undef, + server_OpenOwner => undef, + server_Opens => undef, + server_LockOwner => undef, + server_Locks => undef, + server_Delegs => undef, + server_Layouts => undef, + server_cache_Inprog => undef, + 'server_cache_Non-idem' => undef, + server_cache_Misses => undef, + server_cache_CacheSize => undef, + server_cache_TCPPeak => undef, + server_calls => undef, + server_badcalls => undef, + server_badfmt => undef, + server_badauth => undef, + server_badclnt => undef, + server_null => undef, + server_compound => undef, + 'server_op0-unused' => undef, + 'server_op1-unused' => undef, + 'server_op2-future' => undef, } }; @@ -360,60 +360,70 @@ if ( $^O eq 'freebsd' ) { my $output_raw = `nfsstat -E`; my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; + my $mode = ''; foreach my $line (@output_split) { - if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + if ( $line =~ /^[Cc]lient/ ) { + $mode = 'client'; + } elsif ( $line =~ /^[Ss]erver/ ) { + $mode = 'server'; + } + if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Setattr}, $data->{stats}{client_rpc_Lookup}, $data->{stats}{client_rpc_Readlink}, $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_Remove}, $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, $data->{stats}{client_rpc_Symlink}, $data->{stats}{client_rpc_Mkdir} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_Rmdir}, $data->{stats}{client_rpc_Readdir}, $data->{stats}{client_rpc_RdirPlus}, $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Mknod}, $data->{stats}{client_rpc_Fsstat} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ ) { + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_pathConf}, $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_SetClIdCf}, $data->{stats}{client_rpc_Lock} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ ) { + } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, $data->{stats}{client_rpc_Open}, $data->{stats}{client_rpc_OpenCfr} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenDownGr\ +Close/ ) { + } elsif ( $previous_line =~ /OpenDownGr\ +Close/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Close}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ ) { + } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ + && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_PutRootFH}, $data->{stats}{client_rpc_DelegRet}, $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ ) { + } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ + && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_DestroyClId}, $data->{stats}{client_rpc_LayoutGet}, $data->{stats}{client_rpc_GetDevInfo} ) = split( / +/m, $line ); - } elsif ( - $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ ) + } elsif ( $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ + && $mode eq 'client' ) { $line =~ s/^ +//; ( @@ -421,117 +431,124 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_ReadDataS}, $data->{stats}{client_rpc_WriteDataS}, $data->{stats}{client_rpc_CommitDataS} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ ) { + } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_CreateLayout}, $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_LookupOpen} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ ) { + } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_IOAdvise}, $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Copy}, $data->{stats}{client_rpc_Seek}, $data->{stats}{client_rpc_SeekDataS}, $data->{stats}{client_rpc_GetExtattr} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ ) { + } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ + && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_SetExtattr}, $data->{stats}{client_rpc_RmExtattr}, $data->{stats}{client_rpc_ListExtattr}, $data->{stats}{client_rpc_Deallocate}, $data->{stats}{client_rpc_LayoutError} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ ) { + } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_OpenOwner}, $data->{stats}{client_Opens}, $data->{stats}{client_LockOwner}, - $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LockOwner} + $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LocalOwn} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ ) { + } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_LocalOpen}, $data->{stats}{client_LocalLown}, $data->{stats}{client_LocalLock}, $data->{stats}{client_Layouts} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ ) { + } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_info_TimedOut}, $data->{stats}{client_rpc_info_Invalid}, $data->{stats}{client_rpc_info_X_Replies}, $data->{stats}{client_rpc_info_Retries}, $data->{stats}{client_rpc_info_Requests} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ ) { + } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_Attr_Hits}, $data->{stats}{client_cache_Attr_Misses}, $data->{stats}{client_cache_Lkup_Hits}, $data->{stats}{client_cache_Lkup_Misses} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ ) { + } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_BioR_Hits}, $data->{stats}{client_cache_BioR_Misses}, $data->{stats}{client_cache_BioW_Hits}, $data->{stats}{client_cache_BioW_Misses} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioRL\ Hits\ +BioRL\ Misse\ +BioD\ Hits\ +BioD\ Misses/ ) { + } elsif ( $previous_line =~ /BioRL Hits\ +BioRL\ +Misses\ +BioD Hits\ +BioD Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_BioRL_Hits}, $data->{stats}{client_cache_BioRL_Misses}, - $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses} + $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ ) { + } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_DirE_Hits}, $data->{stats}{client_cache_DirE_Misses}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Getattr}, $data->{stats}{server_Setattr}, $data->{stats}{server_Lookup}, $data->{stats}{server_Readlink}, $data->{stats}{server_Read}, $data->{stats}{server_Write}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Create}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, $data->{stats}{server_Link}, $data->{stats}{server_Symlink}, $data->{stats}{server_Mkdir}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Rmdir}, $data->{stats}{server_Readdir}, $data->{stats}{server_RdirPlus}, $data->{stats}{server_Access}, $data->{stats}{server_Mknod}, $data->{stats}{server_Fsstat}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ ) { + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_FSinfo}, $data->{stats}{server_pathConf}, $data->{stats}{server_Commit}, $data->{stats}{server_LookupP}, $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ ) { + } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_OpenCfrm}, $data->{stats}{server_DelePurge}, $data->{stats}{server_DelRet}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ ) { + } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_GetFH}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, $data->{stats}{server_Close}, $data->{stats}{server_Verify}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ ) { + } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_NVerify}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ ) { + } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, $data->{stats}{server_RelLockOwn}, $data->{stats}{server_V4Create} ) = split( / +/m, $line ); - } elsif ( $previous_line - =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ ) + } elsif ( + $previous_line =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ + && $mode eq 'server' ) { $line =~ s/^ +//; ( @@ -540,7 +557,8 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{server_DestroySess}, $data->{stats}{server_FreeStateID}, ) = split( / +/m, $line ); } elsif ( - $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ ) + $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ + && $mode eq 'server' ) { $line =~ s/^ +//; ( @@ -548,17 +566,21 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ ) { + } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ + && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, $data->{stats}{server_SetSSV}, $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ReclaimCompl/ ) { + } elsif ( $previous_line =~ /ReclaimCompl/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_ReclaimCompl} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ ) { + } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ + && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_Allocate}, $data->{stats}{server_Copy}, @@ -572,24 +594,24 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, $data->{stats}{server_Seek}, $data->{stats}{server_WriteSame}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ ) { + } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Clone}, $data->{stats}{server_GetExtattr}, $data->{stats}{server_SetExtattr}, $data->{stats}{server_ListExtattr}, $data->{stats}{server_RmExtattr} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ ) { + } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Clients}, $data->{stats}{server_OpenOwner}, $data->{stats}{server_Opens}, $data->{stats}{server_LockOwner}, $data->{stats}{server_Locks}, $data->{stats}{server_Delegs}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /^ *Layouts *$/ ) { + } elsif ( $previous_line =~ /^ +Layouts$/ && $mode eq 'server' ) { $line =~ s/^ +//; $line =~ s/ +$//; - $data->{stats}{server_Clients} = $line; - } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ ) { + $data->{stats}{server_Layouts} = $line; + } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_cache_Inprog}, $data->{stats}{'server_cache_Non-idem'}, @@ -610,167 +632,173 @@ if ( $^O eq 'linux' ) { my $output_raw = `nfsstat | sed 's/[0-9\.]*\%//g'`; my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; + my $mode = ''; foreach my $line (@output_split) { - if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ ) { + if ( $line =~ /^[Cc]lient/ ) { + $mode = 'client'; + } elsif ( $line =~ /^[Ss]erver/ ) { + $mode = 'server'; + } + if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { ( $data->{stats}{server_calls}, $data->{stats}{'server_badcalls'}, $data->{stats}{server_badfmt}, $data->{stats}{server_badauth}, $data->{stats}{server_badclnt}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +compound/ ) { + } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'client' ) { ( $data->{stats}{server_null}, $data->{stats}{server_compound}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ ) { + } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { ( $data->{stats}{'server_op0-unused'}, $data->{stats}{'server_op1-unused'}, $data->{stats}{'server_op2-future'}, $data->{stats}{server_Access}, $data->{stats}{server_Close}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ ) { + } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { ( $data->{stats}{server_Commit}, $data->{stats}{server_Create}, $data->{stats}{server_DelePurge}, $data->{stats}{server_Delegs}, $data->{stats}{server_Getattr}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ ) { + } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { ( $data->{stats}{server_GetFH}, $data->{stats}{server_Link}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ ) { + } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { ( $data->{stats}{server_Lookup}, $data->{stats}{server_LookupP}, $data->{stats}{server_NVerify}, $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ ) { + } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ && $mode eq 'server' ) { ( $data->{stats}{server_OpenCfrm}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ ) { + } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { ( $data->{stats}{server_Read}, $data->{stats}{server_Readdir}, $data->{stats}{server_Readlink}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ ) { + } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { ( $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, $data->{stats}{server_Setattr}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ ) { + } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { ( $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, $data->{stats}{server_Verify}, $data->{stats}{server_Write}, $data->{stats}{server_RelLockOwn}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ ) { + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { ( $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, $data->{stats}{server_DestroySess}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ ) { + } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ && $mode eq 'server' ) { ( $data->{stats}{server_FreeStateID}, $data->{stats}{server_GetDirDeleg}, $data->{stats}{server_GetDevInfo}, $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ ) { + } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ && $mode eq 'server' ) { ( $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, $data->{stats}{server_SetSSV}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ ) { + } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ && $mode eq 'server' ) { ( $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, $data->{stats}{server_ReclaimCompl}, $data->{stats}{server_Allocate}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ ) { + } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { ( $data->{stats}{server_Copy}, $data->{stats}{server_CopyNotify}, $data->{stats}{server_Deallocate}, $data->{stats}{server_IOAdvise}, $data->{stats}{server_LayoutError}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ ) { + } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ && $mode eq 'server' ) { ( $data->{stats}{server_Layouts}, $data->{stats}{server_OffloadCncl}, $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, $data->{stats}{server_Seek}, ) = split( /[\ \t]+/m, $line ); } elsif ( $previous_line =~ /write_same/ ) { - ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line ); + ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line && $mode eq 'client' ); } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ ) { ( $data->{stats}{client_rpc_info_Requests}, $data->{stats}{client_rpc_info_Retries}, $data->{stats}{client_rpc_info_X_Replies} ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ ) { + } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_null}, $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write}, $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_Open}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ ) { + } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_OpenCfr}, $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_Open}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ ) { + } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_renew}, $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_confirm}, $data->{stats}{client_rpc_Lock}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ ) { + } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Lookup}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ ) { + } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_LookOpen}, $data->{stats}{client_rpc_Remove}, $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, $data->{stats}{client_rpc_Symlink}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ ) { + } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_pathConf}, $data->{stats}{client_rpc_statfs}, $data->{stats}{client_rpc_Readlink}, $data->{stats}{client_rpc_Readlink}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ ) { + } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_server_caps}, $data->{stats}{client_rpc_DelegRet}, $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl}, $data->{stats}{client_rpc_fs_locations}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ ) { + } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_secinfo}, $data->{stats}{client_rpc_fsid_present}, $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ ) { + } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_sequence}, $data->{stats}{client_rpc_get_lease_time}, $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_LayoutGet}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ ) { + } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_GetDevInfo}, $data->{stats}{client_rpc_LayoutCommit}, $data->{stats}{client_rpc_LayoutReturn}, $data->{stats}{client_rpc_secinfo_no}, $data->{stats}{client_rpc_test_stateid}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ ) { + } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_getdevicelist}, $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_DestroyClId}, $data->{stats}{client_rpc_Seek}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ ) { + } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Deallocate}, $data->{stats}{client_rpc_layoutstats}, $data->{stats}{client_rpc_clone}, @@ -787,9 +815,9 @@ if ( $^O eq 'linux' ) { #### my @stat_keys = keys( %{ $data->{stats} } ); foreach my $item (@stat_keys) { - if ( $item =~ /^client/ && $data->{stats}{$item} > 0 ) { + if ( $item =~ /^client/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { $data->{is_client} = 1; - } elsif ( $item =~ /^server/ && $data->{stats}{$item} > 0 ) { + } elsif ( $item =~ /^server/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { $data->{is_server} = 1; } } From d1d4ae77111a33c49a8edd3ae1afff9256b45c41 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 9 Mar 2024 11:21:19 -0600 Subject: [PATCH 285/332] begin re-working nfs stats gather... freebsd done now for Linux --- snmp/nfs | 900 ++++++++++++++++++++++--------------------------------- 1 file changed, 358 insertions(+), 542 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index a82159a24..87f518458 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -132,20 +132,6 @@ my $data = { mounts => [], mounted_by => [], stats => { - client_rpc_clone => undef, - client_rpc_layoutstats => undef, - client_rpc_getdevicelist => undef, - client_rpc_test_stateid => undef, - client_rpc_secinfo_no => undef, - client_rpc_get_lease_time => undef, - client_rpc_sequence => undef, - client_rpc_fsid_present => undef, - client_rpc_secinfo => undef, - client_rpc_fs_locations => undef, - client_rpc_server_caps => undef, - client_rpc_renew => undef, - client_rpc_confirm => undef, - client_rpc_null => undef, client_rpc_Getattr => undef, client_rpc_Setattr => undef, client_rpc_Lookup => undef, @@ -238,94 +224,94 @@ my $data = { client_cache_BioD_Misses => undef, client_cache_DirE_Hits => undef, client_cache_DirE_Misses => undef, - server_Getattr => undef, - server_Setattr => undef, - server_Lookup => undef, - server_Readlink => undef, - server_Read => undef, - server_Write => undef, - server_Create => undef, - server_Remove => undef, - server_Rename => undef, - server_Link => undef, - server_Symlink => undef, - server_Mkdir => undef, - server_Rmdir => undef, - server_Readdir => undef, - server_RdirPlus => undef, - server_Access => undef, - server_Mknod => undef, - server_Fsstat => undef, - server_FSinfo => undef, - server_pathConf => undef, - server_Commit => undef, - server_LookupP => undef, - server_SetClId => undef, - server_SetClIdCf => undef, - server_Open => undef, - server_OpenAttr => undef, - server_OpenDwnGr => undef, - server_OpenCfrm => undef, - server_DelePurge => undef, - server_DelRet => undef, - server_GetFH => undef, - server_Lock => undef, - server_LockT => undef, - server_LockU => undef, - server_Close => undef, - server_Verify => undef, - server_NVerify => undef, - server_PutFH => undef, - server_PutPubFH => undef, - server_PutRootFH => undef, - server_Renew => undef, - server_RestoreFH => undef, - server_SaveFH => undef, - server_Secinfo => undef, - server_RelLockOwn => undef, - server_V4Create => undef, - server_BackChannelCt => undef, - server_BindConnToSes => undef, - server_ExchangeID => undef, - server_CreateSess => undef, - server_DestroySess => undef, - server_FreeStateID => undef, - server_GetDirDeleg => undef, - server_GetDevInfo => undef, - server_GetDevList => undef, - server_layoutCommit => undef, - server_LayoutGet => undef, - server_LayoutReturn => undef, - server_GetDirDeleg => undef, - server_GetDevInfo => undef, - server_GetDevList => undef, - server_layoutCommit => undef, - server_LayoutGet => undef, - server_LayoutReturn => undef, - server_SecInfNoName => undef, - server_Sequence => undef, - server_SetSSV => undef, - server_TestStateID => undef, - server_WantDeleg => undef, - server_DestroyClId => undef, - server_ReclaimCompl => undef, - server_Allocate => undef, - server_Copy => undef, - server_CopyNotify => undef, - server_Deallocate => undef, - server_IOAdvise => undef, - server_LayoutError => undef, - server_LayoutStats => undef, - server_OffloadCncl => undef, - server_OffloadStat => undef, - server_ReadPlus => undef, - server_Seek => undef, - server_WriteSame => undef, - server_Clone => undef, - server_GetExtattr => undef, - server_SetExtattr => undef, - server_ListExtattr => undef, - server_RmExtattr => undef, + server_rpc_Getattr => undef, + server_rpc_Setattr => undef, + server_rpc_Lookup => undef, + server_rpc_Readlink => undef, + server_rpc_Read => undef, + server_rpc_Write => undef, + server_rpc_Create => undef, + server_rpc_Remove => undef, + server_rpc_Rename => undef, + server_rpc_Link => undef, + server_rpc_Symlink => undef, + server_rpc_Mkdir => undef, + server_rpc_Rmdir => undef, + server_rpc_Readdir => undef, + server_rpc_RdirPlus => undef, + server_rpc_Access => undef, + server_rpc_Mknod => undef, + server_rpc_Fsstat => undef, + server_rpc_FSinfo => undef, + server_rpc_pathConf => undef, + server_rpc_Commit => undef, + server_rpc_LookupP => undef, + server_rpc_SetClId => undef, + server_rpc_SetClIdCf => undef, + server_rpc_Open => undef, + server_rpc_OpenAttr => undef, + server_rpc_OpenDwnGr => undef, + server_rpc_OpenCfrm => undef, + server_rpc_DelePurge => undef, + server_rpc_DelRet => undef, + server_rpc_GetFH => undef, + server_rpc_Lock => undef, + server_rpc_LockT => undef, + server_rpc_LockU => undef, + server_rpc_Close => undef, + server_rpc_Verify => undef, + server_rpc_NVerify => undef, + server_rpc_PutFH => undef, + server_rpc_PutPubFH => undef, + server_rpc_PutRootFH => undef, + server_rpc_Renew => undef, + server_rpc_RestoreFH => undef, + server_rpc_SaveFH => undef, + server_rpc_Secinfo => undef, + server_rpc_RelLockOwn => undef, + server_rpc_V4Create => undef, + server_rpc_BackChannelCt => undef, + server_rpc_BindConnToSes => undef, + server_rpc_ExchangeID => undef, + server_rpc_CreateSess => undef, + server_rpc_DestroySess => undef, + server_rpc_FreeStateID => undef, + server_rpc_GetDirDeleg => undef, + server_rpc_GetDevInfo => undef, + server_rpc_GetDevList => undef, + server_rpc_layoutCommit => undef, + server_rpc_LayoutGet => undef, + server_rpc_LayoutReturn => undef, + server_rpc_GetDirDeleg => undef, + server_rpc_GetDevInfo => undef, + server_rpc_GetDevList => undef, + server_rpc_layoutCommit => undef, + server_rpc_LayoutGet => undef, + server_rpc_LayoutReturn => undef, + server_rpc_SecInfNoName => undef, + server_rpc_Sequence => undef, + server_rpc_SetSSV => undef, + server_rpc_TestStateID => undef, + server_rpc_WantDeleg => undef, + server_rpc_DestroyClId => undef, + server_rpc_ReclaimCompl => undef, + server_rpc_Allocate => undef, + server_rpc_Copy => undef, + server_rpc_CopyNotify => undef, + server_rpc_Deallocate => undef, + server_rpc_IOAdvise => undef, + server_rpc_LayoutError => undef, + server_rpc_LayoutStats => undef, + server_rpc_OffloadCncl => undef, + server_rpc_OffloadStat => undef, + server_rpc_ReadPlus => undef, + server_rpc_Seek => undef, + server_rpc_WriteSame => undef, + server_rpc_Clone => undef, + server_rpc_GetExtattr => undef, + server_rpc_SetExtattr => undef, + server_rpc_ListExtattr => undef, + server_rpc_RmExtattr => undef, server_Clients => undef, server_OpenOwner => undef, server_Opens => undef, @@ -334,20 +320,10 @@ my $data = { server_Delegs => undef, server_Layouts => undef, server_cache_Inprog => undef, - 'server_cache_Non-idem' => undef, + server_cache_NonIdem => undef, server_cache_Misses => undef, server_cache_CacheSize => undef, server_cache_TCPPeak => undef, - server_calls => undef, - server_badcalls => undef, - server_badfmt => undef, - server_badauth => undef, - server_badclnt => undef, - server_null => undef, - server_compound => undef, - 'server_op0-unused' => undef, - 'server_op1-unused' => undef, - 'server_op2-future' => undef, } }; @@ -357,270 +333,272 @@ my $data = { #### #### if ( $^O eq 'freebsd' ) { - my $output_raw = `nfsstat -E`; - my @output_split = split( /\n/, $output_raw ); - my $previous_line = ''; - my $mode = ''; - foreach my $line (@output_split) { - if ( $line =~ /^[Cc]lient/ ) { - $mode = 'client'; - } elsif ( $line =~ /^[Ss]erver/ ) { - $mode = 'server'; - } - if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Setattr}, - $data->{stats}{client_rpc_Lookup}, $data->{stats}{client_rpc_Readlink}, - $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_Remove}, - $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, - $data->{stats}{client_rpc_Symlink}, $data->{stats}{client_rpc_Mkdir} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_Rmdir}, $data->{stats}{client_rpc_Readdir}, - $data->{stats}{client_rpc_RdirPlus}, $data->{stats}{client_rpc_Access}, - $data->{stats}{client_rpc_Mknod}, $data->{stats}{client_rpc_Fsstat} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_pathConf}, - $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_SetClId}, - $data->{stats}{client_rpc_SetClIdCf}, $data->{stats}{client_rpc_Lock} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, - $data->{stats}{client_rpc_Open}, $data->{stats}{client_rpc_OpenCfr} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenDownGr\ +Close/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Close}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_FreeStateID}, - $data->{stats}{client_rpc_PutRootFH}, $data->{stats}{client_rpc_DelegRet}, - $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, - $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_DestroyClId}, - $data->{stats}{client_rpc_LayoutGet}, $data->{stats}{client_rpc_GetDevInfo} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_LayoutCommit}, $data->{stats}{client_rpc_LayoutReturn}, - $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_ReadDataS}, - $data->{stats}{client_rpc_WriteDataS}, $data->{stats}{client_rpc_CommitDataS} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_CreateLayout}, - $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_LookupOpen} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_IOAdvise}, $data->{stats}{client_rpc_Allocate}, - $data->{stats}{client_rpc_Copy}, $data->{stats}{client_rpc_Seek}, - $data->{stats}{client_rpc_SeekDataS}, $data->{stats}{client_rpc_GetExtattr} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_SetExtattr}, $data->{stats}{client_rpc_RmExtattr}, - $data->{stats}{client_rpc_ListExtattr}, $data->{stats}{client_rpc_Deallocate}, - $data->{stats}{client_rpc_LayoutError} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_OpenOwner}, $data->{stats}{client_Opens}, $data->{stats}{client_LockOwner}, - $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LocalOwn} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_LocalOpen}, $data->{stats}{client_LocalLown}, - $data->{stats}{client_LocalLock}, $data->{stats}{client_Layouts} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_info_TimedOut}, $data->{stats}{client_rpc_info_Invalid}, - $data->{stats}{client_rpc_info_X_Replies}, $data->{stats}{client_rpc_info_Retries}, - $data->{stats}{client_rpc_info_Requests} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_cache_Attr_Hits}, $data->{stats}{client_cache_Attr_Misses}, - $data->{stats}{client_cache_Lkup_Hits}, $data->{stats}{client_cache_Lkup_Misses} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_cache_BioR_Hits}, $data->{stats}{client_cache_BioR_Misses}, - $data->{stats}{client_cache_BioW_Hits}, $data->{stats}{client_cache_BioW_Misses} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioRL Hits\ +BioRL\ +Misses\ +BioD Hits\ +BioD Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_cache_BioRL_Hits}, $data->{stats}{client_cache_BioRL_Misses}, - $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( $data->{stats}{client_cache_DirE_Hits}, $data->{stats}{client_cache_DirE_Misses}, ) - = split( / +/m, $line ); - } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Getattr}, $data->{stats}{server_Setattr}, $data->{stats}{server_Lookup}, - $data->{stats}{server_Readlink}, $data->{stats}{server_Read}, $data->{stats}{server_Write}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Create}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, - $data->{stats}{server_Link}, $data->{stats}{server_Symlink}, $data->{stats}{server_Mkdir}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Rmdir}, $data->{stats}{server_Readdir}, $data->{stats}{server_RdirPlus}, - $data->{stats}{server_Access}, $data->{stats}{server_Mknod}, $data->{stats}{server_Fsstat}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_FSinfo}, $data->{stats}{server_pathConf}, $data->{stats}{server_Commit}, - $data->{stats}{server_LookupP}, $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, $data->{stats}{server_OpenDwnGr}, - $data->{stats}{server_OpenCfrm}, $data->{stats}{server_DelePurge}, $data->{stats}{server_DelRet}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_GetFH}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, - $data->{stats}{server_LockU}, $data->{stats}{server_Close}, $data->{stats}{server_Verify}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_NVerify}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, - $data->{stats}{server_PutRootFH}, $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, - $data->{stats}{server_RelLockOwn}, $data->{stats}{server_V4Create} - ) = split( / +/m, $line ); - } elsif ( - $previous_line =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, - $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, - $data->{stats}{server_DestroySess}, $data->{stats}{server_FreeStateID}, - ) = split( / +/m, $line ); - } elsif ( - $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_GetDirDeleg}, $data->{stats}{server_GetDevInfo}, - $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, - $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, - $data->{stats}{server_SetSSV}, $data->{stats}{server_TestStateID}, - $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ReclaimCompl/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( $data->{stats}{server_ReclaimCompl} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_Allocate}, $data->{stats}{server_Copy}, - $data->{stats}{server_CopyNotify}, $data->{stats}{server_Deallocate}, - $data->{stats}{server_IOAdvise}, $data->{stats}{server_LayoutError}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LayoutStats\ +OffloadCncl\ +OffloadStat\ +ReadPlus\ +Seek\ +WriteSame/ ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_LayoutStats}, $data->{stats}{server_OffloadCncl}, - $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, - $data->{stats}{server_Seek}, $data->{stats}{server_WriteSame}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Clone}, $data->{stats}{server_GetExtattr}, - $data->{stats}{server_SetExtattr}, $data->{stats}{server_ListExtattr}, - $data->{stats}{server_RmExtattr} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Clients}, $data->{stats}{server_OpenOwner}, $data->{stats}{server_Opens}, - $data->{stats}{server_LockOwner}, $data->{stats}{server_Locks}, $data->{stats}{server_Delegs}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /^ +Layouts$/ && $mode eq 'server' ) { - $line =~ s/^ +//; - $line =~ s/ +$//; - $data->{stats}{server_Layouts} = $line; - } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_cache_Inprog}, $data->{stats}{'server_cache_Non-idem'}, - $data->{stats}{server_cache_Misses}, $data->{stats}{server_cache_CacheSize}, - $data->{stats}{server_cache_TCPPeak} - ) = split( / +/m, $line ); - } - $previous_line = $line; - } ## end foreach my $line (@output_split) + eval { + my $output_raw = `nfsstat -E --libxo json`; + my $stats_json = decode_json($output_raw); + $data->{stats}{client_rpc_Getattr} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{getattr}; + $data->{stats}{client_rpc_Setattr} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{setattr}; + $data->{stats}{client_rpc_Lookup} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{lookup}; + $data->{stats}{client_rpc_Readlink} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{readlink}; + $data->{stats}{client_rpc_Read} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{read}; + $data->{stats}{client_rpc_Write} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{write}; + $data->{stats}{client_rpc_Create} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{create}; + $data->{stats}{client_rpc_Remove} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{remove}; + $data->{stats}{client_rpc_Rename} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rename}; + $data->{stats}{client_rpc_Link} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{link}; + $data->{stats}{client_rpc_Symlink} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{symlink}; + $data->{stats}{client_rpc_Mkdir} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{mkdir}; + $data->{stats}{client_rpc_Rmdir} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rmdir}; + $data->{stats}{client_rpc_Readdir} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{readdir}; + $data->{stats}{client_rpc_RdirPlus} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rdirplus}; + $data->{stats}{client_rpc_Access} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rdirplus}; + $data->{stats}{client_rpc_Mknod} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{mknod}; + $data->{stats}{client_rpc_Fsstat} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{fsstat}; + $data->{stats}{client_rpc_FSinfo} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{fsinfo}; + $data->{stats}{client_rpc_pathConf} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{pathconf}; + $data->{stats}{client_rpc_Commit} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{commit}; + $data->{stats}{client_rpc_SetClId} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{setclientid}; + $data->{stats}{client_rpc_SetClIdCf} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{setclientidcf}; + $data->{stats}{client_rpc_Lock} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{lock}; + $data->{stats}{client_rpc_LockT} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{lockt}; + $data->{stats}{client_rpc_LockU} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{locku}; + $data->{stats}{client_rpc_Open} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{open}; + $data->{stats}{client_rpc_OpenCfr} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{opencfr}; + $data->{stats}{client_rpc_OpenDownGr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{opendowngr}; + $data->{stats}{client_rpc_Close} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{close}; + $data->{stats}{client_rpc_RelLckOwn} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{rellckown}; + $data->{stats}{client_rpc_FreeStateID} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{freestateid}; + $data->{stats}{client_rpc_PutRootFH} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{putrootfh}; + $data->{stats}{client_rpc_DelegRet} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{delegret}; + $data->{stats}{client_rpc_GetAcl} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{getacl}; + $data->{stats}{client_rpc_SetAcl} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{setacl}; + $data->{stats}{client_rpc_ExchangeId} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{exchangeid}; + $data->{stats}{client_rpc_CreateSess} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{createsess}; + $data->{stats}{client_rpc_DestroySess} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{destroysess}; + $data->{stats}{client_rpc_DestroyClId} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{destroyclid}; + $data->{stats}{client_rpc_LayoutGet} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{layoutget}; + $data->{stats}{client_rpc_GetDevInfo} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{getdevinfo}; + $data->{stats}{client_rpc_LayoutCommit} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{layoutcomit}; + $data->{stats}{client_rpc_LayoutReturn} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{layoutreturn}; + $data->{stats}{client_rpc_ReclaimCompl} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{reclaimcompl}; + $data->{stats}{client_rpc_ReadDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{readdatas}; + $data->{stats}{client_rpc_WriteDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{writedatas}; + $data->{stats}{client_rpc_CommitDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{commitdatas}; + $data->{stats}{client_rpc_OpenLayout} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{openlayout}; + $data->{stats}{client_rpc_CreateLayout} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{createlayout}; + $data->{stats}{client_rpc_BindConnSess} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{bindconnsess}; + $data->{stats}{client_rpc_LookupOpen} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{lookupopen}; + $data->{stats}{client_rpc_IOAdvise} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{ioadvise}; + $data->{stats}{client_rpc_Allocate} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{allocate}; + $data->{stats}{client_rpc_Copy} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{copy}; + $data->{stats}{client_rpc_Seek} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{seek}; + $data->{stats}{client_rpc_SeekDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{seekdatas}; + $data->{stats}{client_rpc_GetExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{getextattr}; + $data->{stats}{client_rpc_SetExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{setextattr}; + $data->{stats}{client_rpc_RmExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{rmextattr}; + $data->{stats}{client_rpc_ListExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{listextattr}; + $data->{stats}{client_rpc_Deallocate} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{deallocate}; + $data->{stats}{client_rpc_LayoutError} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{layouterror}; + $data->{stats}{client_OpenOwner} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{openowner}; + $data->{stats}{client_Opens} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{opens}; + $data->{stats}{client_LockOwner} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{lockowner}; + $data->{stats}{client_Locks} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{locks}; + $data->{stats}{client_Delegs} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{delegs}; + $data->{stats}{client_LocalOwn} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{localown}; + $data->{stats}{client_LocalOpen} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{localopen}; + $data->{stats}{client_LocalLown} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{locallown}; + $data->{stats}{client_LocalLock} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{locallock}; + $data->{stats}{client_Layouts} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{layouts}; + $data->{stats}{client_rpc_info_TimedOut} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{timedout}; + $data->{stats}{client_rpc_info_Invalid} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{invalid}; + $data->{stats}{client_rpc_info_X_Replies} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{timedout}; + $data->{stats}{client_rpc_info_Retries} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{retries}; + $data->{stats}{client_rpc_info_Requests} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{requests}; + $data->{stats}{client_cache_Attr_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{attrhits}; + $data->{stats}{client_cache_Attr_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{attrmisses}; + $data->{stats}{client_cache_Lkup_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{lkuphits}; + $data->{stats}{client_cache_Lkup_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{lkupmisses}; + $data->{stats}{client_cache_BioR_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biorhits}; + $data->{stats}{client_cache_BioR_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biormisses}; + $data->{stats}{client_cache_BioW_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biowhits}; + $data->{stats}{client_cache_BioW_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biowmisses}; + $data->{stats}{client_cache_BioRL_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biorlhits}; + $data->{stats}{client_cache_BioRL_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biorlmisses}; + $data->{stats}{client_cache_BioD_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biodhits}; + $data->{stats}{client_cache_BioD_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biodmisses}; + $data->{stats}{client_cache_DirE_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{direhits}; + $data->{stats}{client_cache_DirE_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{diremisses}; + $data->{stats}{server_rpc_Getattr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{getattr}; + $data->{stats}{server_rpc_Setattr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{setattr}; + $data->{stats}{server_rpc_Lookup} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lookup}; + $data->{stats}{server_rpc_Readlink} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{readlink}; + $data->{stats}{server_rpc_Read} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{read}; + $data->{stats}{server_rpc_Write} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{write}; + $data->{stats}{server_rpc_Create} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{create}; + $data->{stats}{server_rpc_Remove} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{remove}; + $data->{stats}{server_rpc_Rename} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rename}; + $data->{stats}{server_rpc_Link} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{link}; + $data->{stats}{server_rpc_Symlink} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{symlink}; + $data->{stats}{server_rpc_Mkdir} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{mkdir}; + $data->{stats}{server_rpc_Rmdir} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rmdir}; + $data->{stats}{server_rpc_Readdir} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{readdir}; + $data->{stats}{server_rpc_RdirPlus} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rdirplus}; + $data->{stats}{server_rpc_Access} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{access}; + $data->{stats}{server_rpc_Mknod} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{mknod}; + $data->{stats}{server_rpc_Fsstat} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{fsstat}; + $data->{stats}{server_rpc_FSinfo} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{fsinfo}; + $data->{stats}{server_rpc_pathConf} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{pathconf}; + $data->{stats}{server_rpc_Commit} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{commit}; + $data->{stats}{server_rpc_LookupP} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lookupp}; + $data->{stats}{server_rpc_SetClId} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{setclientid}; + $data->{stats}{server_rpc_SetClIdCf} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{setclientidcfrm}; + $data->{stats}{server_rpc_Open} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{open}; + $data->{stats}{server_rpc_OpenAttr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{openattr}; + $data->{stats}{server_rpc_OpenDwnGr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{opendwgr}; + $data->{stats}{server_rpc_OpenCfrm} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{opencfrm}; + $data->{stats}{server_rpc_DelePurge} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{delepurge}; + $data->{stats}{server_rpc_DelRet} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{getattr}; + $data->{stats}{server_rpc_GetFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{getfh}; + $data->{stats}{server_rpc_Lock} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lock}; + $data->{stats}{server_rpc_LockT} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lockt}; + $data->{stats}{server_rpc_LockU} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{locku}; + $data->{stats}{server_rpc_Close} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{close}; + $data->{stats}{server_rpc_Verify} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{verify}; + $data->{stats}{server_rpc_NVerify} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nverify}; + $data->{stats}{server_rpc_PutFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{putfh}; + $data->{stats}{server_rpc_PutPubFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{putpubfh}; + $data->{stats}{server_rpc_PutRootFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{putrootfh}; + $data->{stats}{server_rpc_Renew} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{renew}; + $data->{stats}{server_rpc_RestoreFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{restore}; + $data->{stats}{server_rpc_SaveFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{savefh}; + $data->{stats}{server_rpc_Secinfo} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{secinfo}; + $data->{stats}{server_rpc_RelLockOwn} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rellockown}; + $data->{stats}{server_rpc_V4Create} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{v4create}; + $data->{stats}{server_rpc_BackChannelCt} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{backchannelctrl}; + $data->{stats}{server_rpc_BindConnToSes} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{bindconntosess}; + $data->{stats}{server_rpc_ExchangeID} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{exchangeid}; + $data->{stats}{server_rpc_CreateSess} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{createsess}; + $data->{stats}{server_rpc_DestroySess} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{destroysess}; + $data->{stats}{server_rpc_FreeStateID} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{freestateid}; + $data->{stats}{server_rpc_GetDirDeleg} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdirdeleg}; + $data->{stats}{server_rpc_GetDevInfo} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevinfo}; + $data->{stats}{server_rpc_GetDevList} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevlist}; + $data->{stats}{server_rpc_layoutCommit} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutcommit}; + $data->{stats}{server_rpc_LayoutGet} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutget}; + $data->{stats}{server_rpc_LayoutReturn} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutreturn}; + $data->{stats}{server_rpc_GetDirDeleg} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdirdeleg}; + $data->{stats}{server_rpc_GetDevInfo} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevinfo}; + $data->{stats}{server_rpc_GetDevList} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevlist}; + $data->{stats}{server_rpc_layoutCommit} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutcommit}; + $data->{stats}{server_rpc_LayoutGet} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutget}; + $data->{stats}{server_rpc_LayoutReturn} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutreturn}; + $data->{stats}{server_rpc_SecInfNoName} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{reclaimcompl}; + $data->{stats}{server_rpc_Sequence} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{sequence}; + $data->{stats}{server_rpc_SetSSV} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{setssv}; + $data->{stats}{server_rpc_TestStateID} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{teststateid}; + $data->{stats}{server_rpc_WantDeleg} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{wantdeleg}; + $data->{stats}{server_rpc_DestroyClId} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{destroyclid}; + $data->{stats}{server_rpc_ReclaimCompl} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{reclaimcompl}; + $data->{stats}{server_rpc_Allocate} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{allocate}; + $data->{stats}{server_rpc_Copy} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{copy}; + $data->{stats}{server_rpc_CopyNotify} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{copynotify}; + $data->{stats}{server_rpc_Deallocate} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{deallocate}; + $data->{stats}{server_rpc_IOAdvise} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{ioadvise}; + $data->{stats}{server_rpc_LayoutError} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{layouterror}; + $data->{stats}{server_rpc_LayoutStats} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{layoutstats}; + $data->{stats}{server_rpc_OffloadCncl} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{offloadcncl}; + $data->{stats}{server_rpc_OffloadStat} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{offloadstat}; + $data->{stats}{server_rpc_ReadPlus} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{readplus}; + $data->{stats}{server_rpc_Seek} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{seek}; + $data->{stats}{server_rpc_WriteSame} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{writesame}; + $data->{stats}{server_rpc_Clone} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{clone}; + $data->{stats}{server_rpc_GetExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{getextattr}; + $data->{stats}{server_rpc_SetExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{setextattr}; + $data->{stats}{server_rpc_ListExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{listextattr}; + $data->{stats}{server_rpc_RmExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{rmextattr}; + $data->{stats}{server_Clients} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{clients}; + $data->{stats}{server_OpenOwner} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{openowner}; + $data->{stats}{server_Opens} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{opens}; + $data->{stats}{server_LockOwner} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{lockowner}; + $data->{stats}{server_Locks} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{locks}; + $data->{stats}{server_Delegs} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{delegs}; + $data->{stats}{server_Layouts} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{layouts}; + $data->{stats}{server_cache_Inprog} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{inprog}; + $data->{stats}{server_cache_NonIdem} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{nonidem}; + $data->{stats}{server_cache_Misses} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{misses}; + $data->{stats}{server_cache_CacheSize} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{cachesize}; + $data->{stats}{server_cache_TCPPeak} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{tcppeak}; + }; } ## end if ( $^O eq 'freebsd' ) #### @@ -640,169 +618,7 @@ if ( $^O eq 'linux' ) { $mode = 'server'; } if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { - ( - $data->{stats}{server_calls}, $data->{stats}{'server_badcalls'}, $data->{stats}{server_badfmt}, - $data->{stats}{server_badauth}, $data->{stats}{server_badclnt}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'client' ) { - ( $data->{stats}{server_null}, $data->{stats}{server_compound}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { - ( - $data->{stats}{'server_op0-unused'}, $data->{stats}{'server_op1-unused'}, - $data->{stats}{'server_op2-future'}, $data->{stats}{server_Access}, - $data->{stats}{server_Close}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Commit}, $data->{stats}{server_Create}, $data->{stats}{server_DelePurge}, - $data->{stats}{server_Delegs}, $data->{stats}{server_Getattr}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { - ( - $data->{stats}{server_GetFH}, $data->{stats}{server_Link}, $data->{stats}{server_Lock}, - $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Lookup}, $data->{stats}{server_LookupP}, $data->{stats}{server_NVerify}, - $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ && $mode eq 'server' ) { - ( - $data->{stats}{server_OpenCfrm}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_PutFH}, - $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Read}, $data->{stats}{server_Readdir}, $data->{stats}{server_Readlink}, - $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, $data->{stats}{server_SaveFH}, - $data->{stats}{server_Secinfo}, $data->{stats}{server_Setattr}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { - ( - $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, - $data->{stats}{server_Verify}, $data->{stats}{server_Write}, - $data->{stats}{server_RelLockOwn}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { - ( - $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, - $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, - $data->{stats}{server_DestroySess}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ && $mode eq 'server' ) { - ( - $data->{stats}{server_FreeStateID}, $data->{stats}{server_GetDirDeleg}, - $data->{stats}{server_GetDevInfo}, $data->{stats}{server_GetDevList}, - $data->{stats}{server_layoutCommit}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ && $mode eq 'server' ) { - ( - $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, - $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, - $data->{stats}{server_SetSSV}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ && $mode eq 'server' ) { - ( - $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, - $data->{stats}{server_DestroyClId}, $data->{stats}{server_ReclaimCompl}, - $data->{stats}{server_Allocate}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Copy}, $data->{stats}{server_CopyNotify}, - $data->{stats}{server_Deallocate}, $data->{stats}{server_IOAdvise}, - $data->{stats}{server_LayoutError}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Layouts}, $data->{stats}{server_OffloadCncl}, - $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, - $data->{stats}{server_Seek}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /write_same/ ) { - ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line && $mode eq 'client' ); - } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ ) { - ( - $data->{stats}{client_rpc_info_Requests}, - $data->{stats}{client_rpc_info_Retries}, - $data->{stats}{client_rpc_info_X_Replies} - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_null}, $data->{stats}{client_rpc_Read}, - $data->{stats}{client_rpc_Write}, $data->{stats}{client_rpc_Commit}, - $data->{stats}{client_rpc_Open}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_OpenCfr}, $data->{stats}{client_rpc_OpenLayout}, - $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Commit}, - $data->{stats}{client_rpc_Open}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_renew}, - $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_confirm}, - $data->{stats}{client_rpc_Lock}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, - $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Getattr}, - $data->{stats}{client_rpc_Lookup}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_LookOpen}, $data->{stats}{client_rpc_Remove}, - $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, - $data->{stats}{client_rpc_Symlink}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_pathConf}, - $data->{stats}{client_rpc_statfs}, $data->{stats}{client_rpc_Readlink}, - $data->{stats}{client_rpc_Readlink}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_server_caps}, $data->{stats}{client_rpc_DelegRet}, - $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl}, - $data->{stats}{client_rpc_fs_locations}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_secinfo}, - $data->{stats}{client_rpc_fsid_present}, $data->{stats}{client_rpc_ExchangeId}, - $data->{stats}{client_rpc_CreateSess}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_sequence}, - $data->{stats}{client_rpc_get_lease_time}, $data->{stats}{client_rpc_ReclaimCompl}, - $data->{stats}{client_rpc_LayoutGet}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_GetDevInfo}, $data->{stats}{client_rpc_LayoutCommit}, - $data->{stats}{client_rpc_LayoutReturn}, $data->{stats}{client_rpc_secinfo_no}, - $data->{stats}{client_rpc_test_stateid}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_getdevicelist}, - $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_DestroyClId}, - $data->{stats}{client_rpc_Seek}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Deallocate}, - $data->{stats}{client_rpc_layoutstats}, $data->{stats}{client_rpc_clone}, - ) = split( /[\ \t]+/m, $line ); + } $previous_line = $line; } ## end foreach my $line (@output_split) @@ -815,9 +631,9 @@ if ( $^O eq 'linux' ) { #### my @stat_keys = keys( %{ $data->{stats} } ); foreach my $item (@stat_keys) { - if ( $item =~ /^client/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { + if ( $item =~ /^client/ && defined( $data->{stats}{$item} ) && $data->{stats}{$item} > 0 ) { $data->{is_client} = 1; - } elsif ( $item =~ /^server/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { + } elsif ( $item =~ /^server/ && defined( $data->{stats}{$item} ) && $data->{stats}{$item} > 0 ) { $data->{is_server} = 1; } } From 0b3531e1cccb7401b06cdf854e95f0eafeae4063 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 9 Mar 2024 16:46:40 -0600 Subject: [PATCH 286/332] massively re-work it and clean it up --- snmp/nfs | 822 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 627 insertions(+), 195 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 87f518458..c6391d0a5 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -6,7 +6,7 @@ nfs - LibreNMS JSON style SNMP extend for NFS monitoring =head1 VERSION -0.0.1 +0.0.2 =head1 SYNOPSIS @@ -132,198 +132,253 @@ my $data = { mounts => [], mounted_by => [], stats => { - client_rpc_Getattr => undef, - client_rpc_Setattr => undef, - client_rpc_Lookup => undef, - client_rpc_Readlink => undef, - client_rpc_Read => undef, - client_rpc_Write => undef, - client_rpc_Create => undef, - client_rpc_Remove => undef, - client_rpc_Rename => undef, - client_rpc_Link => undef, - client_rpc_Symlink => undef, - client_rpc_Mkdir => undef, - client_rpc_Rmdir => undef, - client_rpc_Readdir => undef, - client_rpc_RdirPlus => undef, - client_rpc_Access => undef, - client_rpc_Mknod => undef, - client_rpc_Fsstat => undef, - client_rpc_FSinfo => undef, - client_rpc_pathConf => undef, - client_rpc_Commit => undef, - client_rpc_SetClId => undef, - client_rpc_SetClIdCf => undef, - client_rpc_Lock => undef, - client_rpc_LockT => undef, - client_rpc_LockU => undef, - client_rpc_Open => undef, - client_rpc_OpenCfr => undef, - client_rpc_OpenDownGr => undef, - client_rpc_Close => undef, - client_rpc_RelLckOwn => undef, - client_rpc_FreeStateID => undef, - client_rpc_PutRootFH => undef, - client_rpc_DelegRet => undef, - client_rpc_GetAcl => undef, - client_rpc_SetAcl => undef, - client_rpc_ExchangeId => undef, - client_rpc_CreateSess => undef, - client_rpc_DestroySess => undef, - client_rpc_DestroyClId => undef, - client_rpc_LayoutGet => undef, - client_rpc_GetDevInfo => undef, - client_rpc_LayoutCommit => undef, - client_rpc_LayoutReturn => undef, - client_rpc_ReclaimCompl => undef, - client_rpc_ReadDataS => undef, - client_rpc_WriteDataS => undef, - client_rpc_CommitDataS => undef, - client_rpc_OpenLayout => undef, - client_rpc_CreateLayout => undef, - client_rpc_BindConnSess => undef, - client_rpc_LookupOpen => undef, - client_rpc_IOAdvise => undef, - client_rpc_Allocate => undef, - client_rpc_Copy => undef, - client_rpc_Seek => undef, - client_rpc_SeekDataS => undef, - client_rpc_GetExtattr => undef, - client_rpc_SetExtattr => undef, - client_rpc_RmExtattr => undef, - client_rpc_ListExtattr => undef, - client_rpc_Deallocate => undef, - client_rpc_LayoutError => undef, - client_OpenOwner => undef, - client_Opens => undef, - client_LockOwner => undef, - client_Locks => undef, - client_Delegs => undef, - client_LocalOwn => undef, - client_LocalOpen => undef, - client_LocalLown => undef, - client_LocalLock => undef, - client_Layouts => undef, - client_rpc_info_TimedOut => undef, - client_rpc_info_Invalid => undef, - client_rpc_info_X_Replies => undef, - client_rpc_info_Retries => undef, - client_rpc_info_Requests => undef, - client_cache_Attr_Hits => undef, - client_cache_Attr_Misses => undef, - client_cache_Lkup_Hits => undef, - client_cache_Lkup_Misses => undef, - client_cache_BioR_Hits => undef, - client_cache_BioR_Misses => undef, - client_cache_BioW_Hits => undef, - client_cache_BioW_Misses => undef, - client_cache_BioRL_Hits => undef, - client_cache_BioRL_Misses => undef, - client_cache_BioD_Hits => undef, - client_cache_BioD_Misses => undef, - client_cache_DirE_Hits => undef, - client_cache_DirE_Misses => undef, - server_rpc_Getattr => undef, - server_rpc_Setattr => undef, - server_rpc_Lookup => undef, - server_rpc_Readlink => undef, - server_rpc_Read => undef, - server_rpc_Write => undef, - server_rpc_Create => undef, - server_rpc_Remove => undef, - server_rpc_Rename => undef, - server_rpc_Link => undef, - server_rpc_Symlink => undef, - server_rpc_Mkdir => undef, - server_rpc_Rmdir => undef, - server_rpc_Readdir => undef, - server_rpc_RdirPlus => undef, - server_rpc_Access => undef, - server_rpc_Mknod => undef, - server_rpc_Fsstat => undef, - server_rpc_FSinfo => undef, - server_rpc_pathConf => undef, - server_rpc_Commit => undef, - server_rpc_LookupP => undef, - server_rpc_SetClId => undef, - server_rpc_SetClIdCf => undef, - server_rpc_Open => undef, - server_rpc_OpenAttr => undef, - server_rpc_OpenDwnGr => undef, - server_rpc_OpenCfrm => undef, - server_rpc_DelePurge => undef, - server_rpc_DelRet => undef, - server_rpc_GetFH => undef, - server_rpc_Lock => undef, - server_rpc_LockT => undef, - server_rpc_LockU => undef, - server_rpc_Close => undef, - server_rpc_Verify => undef, - server_rpc_NVerify => undef, - server_rpc_PutFH => undef, - server_rpc_PutPubFH => undef, - server_rpc_PutRootFH => undef, - server_rpc_Renew => undef, - server_rpc_RestoreFH => undef, - server_rpc_SaveFH => undef, - server_rpc_Secinfo => undef, - server_rpc_RelLockOwn => undef, - server_rpc_V4Create => undef, - server_rpc_BackChannelCt => undef, - server_rpc_BindConnToSes => undef, - server_rpc_ExchangeID => undef, - server_rpc_CreateSess => undef, - server_rpc_DestroySess => undef, - server_rpc_FreeStateID => undef, - server_rpc_GetDirDeleg => undef, - server_rpc_GetDevInfo => undef, - server_rpc_GetDevList => undef, - server_rpc_layoutCommit => undef, - server_rpc_LayoutGet => undef, - server_rpc_LayoutReturn => undef, - server_rpc_GetDirDeleg => undef, - server_rpc_GetDevInfo => undef, - server_rpc_GetDevList => undef, - server_rpc_layoutCommit => undef, - server_rpc_LayoutGet => undef, - server_rpc_LayoutReturn => undef, - server_rpc_SecInfNoName => undef, - server_rpc_Sequence => undef, - server_rpc_SetSSV => undef, - server_rpc_TestStateID => undef, - server_rpc_WantDeleg => undef, - server_rpc_DestroyClId => undef, - server_rpc_ReclaimCompl => undef, - server_rpc_Allocate => undef, - server_rpc_Copy => undef, - server_rpc_CopyNotify => undef, - server_rpc_Deallocate => undef, - server_rpc_IOAdvise => undef, - server_rpc_LayoutError => undef, - server_rpc_LayoutStats => undef, - server_rpc_OffloadCncl => undef, - server_rpc_OffloadStat => undef, - server_rpc_ReadPlus => undef, - server_rpc_Seek => undef, - server_rpc_WriteSame => undef, - server_rpc_Clone => undef, - server_rpc_GetExtattr => undef, - server_rpc_SetExtattr => undef, - server_rpc_ListExtattr => undef, - server_rpc_RmExtattr => undef, - server_Clients => undef, - server_OpenOwner => undef, - server_Opens => undef, - server_LockOwner => undef, - server_Locks => undef, - server_Delegs => undef, - server_Layouts => undef, - server_cache_Inprog => undef, - server_cache_NonIdem => undef, - server_cache_Misses => undef, - server_cache_CacheSize => undef, - server_cache_TCPPeak => undef, + client_rpc_null => 0, + client_rpc_root => 0, + client_rpc_confirm => 0, + client_rpc_server_caps => 0, + client_rpc_fs_locations => 0, + client_rpc_secinfo => 0, + client_rpc_fsid_present => 0, + client_rpc_sequence => 0, + client_rpc_get_lease_time => 0, + client_rpc_test_stateid => 0, + client_rpc_secinfo_no => 0, + client_rpc_getdevicelist => 0, + client_rpc_layoutstats => 0, + client_rpc_wrcache => 0, + client_rpc_Getattr => 0, + client_rpc_Setattr => 0, + client_rpc_Lookup => 0, + client_rpc_Readlink => 0, + client_rpc_Read => 0, + client_rpc_Write => 0, + client_rpc_Create => 0, + client_rpc_Remove => 0, + client_rpc_Rename => 0, + client_rpc_Link => 0, + client_rpc_Symlink => 0, + client_rpc_Mkdir => 0, + client_rpc_Rmdir => 0, + client_rpc_Readdir => 0, + client_rpc_RdirPlus => 0, + client_rpc_Access => 0, + client_rpc_Mknod => 0, + client_rpc_Fsstat => 0, + client_rpc_FSinfo => 0, + client_rpc_pathConf => 0, + client_rpc_Commit => 0, + client_rpc_SetClId => 0, + client_rpc_SetClIdCf => 0, + client_rpc_Lock => 0, + client_rpc_LockT => 0, + client_rpc_LockU => 0, + client_rpc_Open => 0, + client_rpc_OpenCfr => 0, + client_rpc_OpenDownGr => 0, + client_rpc_Close => 0, + client_rpc_RelLckOwn => 0, + client_rpc_FreeStateID => 0, + client_rpc_PutRootFH => 0, + client_rpc_DelegRet => 0, + client_rpc_GetAcl => 0, + client_rpc_SetAcl => 0, + client_rpc_ExchangeId => 0, + client_rpc_CreateSess => 0, + client_rpc_DestroySess => 0, + client_rpc_DestroyClId => 0, + client_rpc_LayoutGet => 0, + client_rpc_GetDevInfo => 0, + client_rpc_LayoutCommit => 0, + client_rpc_LayoutReturn => 0, + client_rpc_ReclaimCompl => 0, + client_rpc_ReadDataS => 0, + client_rpc_WriteDataS => 0, + client_rpc_CommitDataS => 0, + client_rpc_OpenLayout => 0, + client_rpc_CreateLayout => 0, + client_rpc_BindConnSess => 0, + client_rpc_LookupOpen => 0, + client_rpc_IOAdvise => 0, + client_rpc_Allocate => 0, + client_rpc_Copy => 0, + client_rpc_Seek => 0, + client_rpc_SeekDataS => 0, + client_rpc_GetExtattr => 0, + client_rpc_SetExtattr => 0, + client_rpc_RmExtattr => 0, + client_rpc_ListExtattr => 0, + client_rpc_Deallocate => 0, + client_rpc_LayoutError => 0, + client_OpenOwner => 0, + client_Opens => 0, + client_LockOwner => 0, + client_Locks => 0, + client_Delegs => 0, + client_LocalOwn => 0, + client_LocalOpen => 0, + client_LocalLown => 0, + client_LocalLock => 0, + client_Layouts => 0, + client_rpc_info_TimedOut => 0, + client_rpc_info_Invalid => 0, + client_rpc_info_X_Replies => 0, + client_rpc_info_Retries => 0, + client_rpc_info_Requests => 0, + client_rpc_info_authrefrsh => 0, + client_cache_Attr_Hits => 0, + client_cache_Attr_Misses => 0, + client_cache_Lkup_Hits => 0, + client_cache_Lkup_Misses => 0, + client_cache_BioR_Hits => 0, + client_cache_BioR_Misses => 0, + client_cache_BioW_Hits => 0, + client_cache_BioW_Misses => 0, + client_cache_BioRL_Hits => 0, + client_cache_BioRL_Misses => 0, + client_cache_BioD_Hits => 0, + client_cache_BioD_Misses => 0, + client_cache_DirE_Hits => 0, + client_cache_DirE_Misses => 0, + client_network_packets => 0, + client_network_udp => 0, + client_network_tcp => 0, + client_network_tcpconn => 0, + server_rpc_Getattr => 0, + server_rpc_Setattr => 0, + server_rpc_Lookup => 0, + server_rpc_Readlink => 0, + server_rpc_Read => 0, + server_rpc_Write => 0, + server_rpc_Create => 0, + server_rpc_Remove => 0, + server_rpc_Rename => 0, + server_rpc_Link => 0, + server_rpc_Symlink => 0, + server_rpc_Mkdir => 0, + server_rpc_Rmdir => 0, + server_rpc_Readdir => 0, + server_rpc_RdirPlus => 0, + server_rpc_Access => 0, + server_rpc_Mknod => 0, + server_rpc_Fsstat => 0, + server_rpc_FSinfo => 0, + server_rpc_pathConf => 0, + server_rpc_Commit => 0, + server_rpc_LookupP => 0, + server_rpc_SetClId => 0, + server_rpc_SetClIdCf => 0, + server_rpc_Open => 0, + server_rpc_OpenAttr => 0, + server_rpc_OpenDwnGr => 0, + server_rpc_OpenCfrm => 0, + server_rpc_DelePurge => 0, + server_rpc_DelRet => 0, + server_rpc_GetFH => 0, + server_rpc_Lock => 0, + server_rpc_LockT => 0, + server_rpc_LockU => 0, + server_rpc_Close => 0, + server_rpc_Verify => 0, + server_rpc_NVerify => 0, + server_rpc_PutFH => 0, + server_rpc_PutPubFH => 0, + server_rpc_PutRootFH => 0, + server_rpc_Renew => 0, + server_rpc_RestoreFH => 0, + server_rpc_SaveFH => 0, + server_rpc_Secinfo => 0, + server_rpc_RelLockOwn => 0, + server_rpc_V4Create => 0, + server_rpc_BackChannelCt => 0, + server_rpc_BindConnToSes => 0, + server_rpc_ExchangeID => 0, + server_rpc_CreateSess => 0, + server_rpc_DestroySess => 0, + server_rpc_FreeStateID => 0, + server_rpc_GetDirDeleg => 0, + server_rpc_GetDevInfo => 0, + server_rpc_GetDevList => 0, + server_rpc_layoutCommit => 0, + server_rpc_LayoutGet => 0, + server_rpc_LayoutReturn => 0, + server_rpc_GetDirDeleg => 0, + server_rpc_GetDevInfo => 0, + server_rpc_GetDevList => 0, + server_rpc_layoutCommit => 0, + server_rpc_LayoutGet => 0, + server_rpc_LayoutReturn => 0, + server_rpc_SecInfNoName => 0, + server_rpc_Sequence => 0, + server_rpc_SetSSV => 0, + server_rpc_TestStateID => 0, + server_rpc_WantDeleg => 0, + server_rpc_DestroyClId => 0, + server_rpc_ReclaimCompl => 0, + server_rpc_Allocate => 0, + server_rpc_Copy => 0, + server_rpc_CopyNotify => 0, + server_rpc_Deallocate => 0, + server_rpc_IOAdvise => 0, + server_rpc_LayoutError => 0, + server_rpc_LayoutStats => 0, + server_rpc_OffloadCncl => 0, + server_rpc_OffloadStat => 0, + server_rpc_ReadPlus => 0, + server_rpc_Seek => 0, + server_rpc_WriteSame => 0, + server_rpc_Clone => 0, + server_rpc_GetExtattr => 0, + server_rpc_SetExtattr => 0, + server_rpc_ListExtattr => 0, + server_rpc_RmExtattr => 0, + server_Clients => 0, + server_OpenOwner => 0, + server_Opens => 0, + server_LockOwner => 0, + server_Locks => 0, + server_Delegs => 0, + server_Layouts => 0, + server_network_packets => 0, + server_network_udp => 0, + server_network_tcp => 0, + server_network_tcpconn => 0, + server_rpcStats_calls => 0, + server_rpcStats_badcalls => 0, + server_rpcStats_badfmt => 0, + server_rpcStats_badauth => 0, + server_rpcStats_badclnt => 0, + server_cache_Inprog => 0, + server_cache_NonIdem => 0, + server_cache_Misses => 0, + server_cache_CacheSize => 0, + server_cache_TCPPeak => 0, + server_cache_hits => 0, + server_cache_nocache => 0, + server_io_read => 0, + server_io_write => 0, + server_RAcache_0 => 0, + server_RAcache_1 => 0, + server_RAcache_2 => 0, + server_RAcache_3 => 0, + server_RAcache_4 => 0, + server_RAcache_5 => 0, + server_RAcache_6 => 0, + server_RAcache_7 => 0, + server_RAcache_8 => 0, + server_RAcache_9 => 0, + server_RAcache_notfound => 0, + server_FHcache_lookup => 0, + server_FHcache_anon => 0, + server_FHcache_ncachedir => 0, + server_FHcache_ncachenondir => 0, + server_FHcache_stale => 0, + server_rpc_null => 0, + server_rpc_root => 0, + server_rpc_wrcache => 0, + server_rpc_compound => 0, + server_rpc_op0_unused => 0, + server_rpc_op1_unused => 0, + server_rpc_op2_future => 0, } }; @@ -611,14 +666,391 @@ if ( $^O eq 'linux' ) { my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; my $mode = ''; + foreach my $line (@output_split) { if ( $line =~ /^[Cc]lient/ ) { $mode = 'client'; } elsif ( $line =~ /^[Ss]erver/ ) { $mode = 'server'; } - if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { - + if ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'server' ) { + ( + $data->{stats}{server_network_packets}, $data->{stats}{server_network_udp}, + $data->{stats}{server_network_tcp}, $data->{stats}{server_network_tcpconn}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { + ( + $data->{stats}{server_rpcStats_calls}, $data->{stats}{server_rpcStats_badcalls}, + $data->{stats}{server_rpcStats_badfmt}, $data->{stats}{server_rpcStats_badauth}, + $data->{stats}{server_rpcStats_badclnt}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /hits\ +misses\ +nocache/ && $mode eq 'server' ) { + ( + $data->{stats}{server_cache_hits}, + $data->{stats}{server_cache_Misses}, + $data->{stats}{server_cache_nocache}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /read\ +write/ && $mode eq 'server' ) { + ( $data->{stats}{server_io_read}, $data->{stats}{server_io_write}, ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line + =~ /size\ +0\-10\%\ +10\-20\%\ +20\-3\0%\ +30\-40\%\ +40\-50\%\ +50\-60\%\ +60\-70\%\ +70\-80\%\ +80\-90\%\ +90\-100\%\ +notfound/ + && $mode eq 'server' ) + { + ( + $data->{stats}{server_RAcache_0}, $data->{stats}{server_RAcache_1}, + $data->{stats}{server_RAcache_2}, $data->{stats}{server_RAcache_3}, + $data->{stats}{server_RAcache_4}, $data->{stats}{server_RAcache_5}, + $data->{stats}{server_RAcache_6}, $data->{stats}{server_RAcache_7}, + $data->{stats}{server_RAcache_8}, $data->{stats}{server_RAcache_9}, + $data->{stats}{server_RAcache_notfound} + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /lookup\ +anon\ +ncachedir\ +ncachenondir\ +stale/ && $mode eq 'server' ) { + ( + $data->{stats}{server_FHcache_lookup}, $data->{stats}{server_FHcache_anon}, + $data->{stats}{server_FHcache_ncachedir}, $data->{stats}{server_FHcache_ncachenondir}, + $data->{stats}{server_FHcache_stale}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_null} += $split[0]; + $data->{stats}{server_rpc_Getattr} += $split[1]; + $data->{stats}{server_rpc_Setattr} += $split[2]; + $data->{stats}{server_rpc_root} += $split[3]; + $data->{stats}{server_rpc_Lookup} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Readlink} += $split[0]; + $data->{stats}{server_rpc_Read} += $split[1]; + $data->{stats}{server_rpc_wrcache} += $split[2]; + $data->{stats}{server_rpc_Write} += $split[3]; + $data->{stats}{server_rpc_Create} += $split[4]; + } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Remove} += $split[0]; + $data->{stats}{server_rpc_Rename} += $split[1]; + $data->{stats}{server_rpc_Link} += $split[2]; + $data->{stats}{server_rpc_Symlink} += $split[3]; + $data->{stats}{server_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Rmdir} += $split[0]; + $data->{stats}{server_rpc_Readdir} += $split[1]; + $data->{stats}{server_rpc_Fsstat} += $split[2]; + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_null} += $split[0]; + $data->{stats}{server_rpc_Getattr} += $split[1]; + $data->{stats}{server_rpc_Setattr} += $split[2]; + $data->{stats}{server_rpc_Lookup} += $split[3]; + $data->{stats}{server_rpc_Access} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Readlink} += $split[0]; + $data->{stats}{server_rpc_Read} += $split[1]; + $data->{stats}{server_rpc_Write} += $split[2]; + $data->{stats}{server_rpc_Create} += $split[3]; + $data->{stats}{server_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Symlink} += $split[0]; + $data->{stats}{server_rpc_Mknod} += $split[1]; + $data->{stats}{server_rpc_Remove} += $split[2]; + $data->{stats}{server_rpc_Rmdir} += $split[3]; + $data->{stats}{server_rpc_Rename} += $split[4]; + } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Link} += $split[0]; + $data->{stats}{server_rpc_Readdir} += $split[1]; + $data->{stats}{server_rpc_ReadPlus} += $split[2]; + $data->{stats}{server_rpc_Fsstat} += $split[3]; + $data->{stats}{server_rpc_FSinfo} += $split[4]; + } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_pathConf} += $split[0]; + $data->{stats}{server_rpc_Commit} += $split[1]; + } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_null} += $split[0]; + $data->{stats}{server_rpc_compound} += $split[1]; + } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_op0_unused} += $split[0]; + $data->{stats}{server_rpc_op1_unused} += $split[1]; + $data->{stats}{server_rpc_op2_future} += $split[2]; + $data->{stats}{server_rpc_Access} += $split[3]; + $data->{stats}{server_rpc_Close} += $split[4]; + } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Commit} += $split[0]; + $data->{stats}{server_rpc_Create} += $split[1]; + $data->{stats}{server_rpc_DelePurge} = $split[2]; + $data->{stats}{server_rpc_DelRet} = $split[3]; + $data->{stats}{server_rpc_Getattr} += $split[4]; + } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_GetFH} += $split[0]; + $data->{stats}{server_rpc_Link} += $split[1]; + $data->{stats}{server_rpc_Lock} += $split[2]; + $data->{stats}{server_rpc_LockT} += $split[3]; + $data->{stats}{server_rpc_LockU} += $split[4]; + } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Lookup} += $split[0]; + $data->{stats}{server_rpc_LookupP} += $split[1]; + $data->{stats}{server_rpc_NVerify} += $split[2]; + $data->{stats}{server_rpc_Open} += $split[3]; + $data->{stats}{server_rpc_OpeAttr} += $split[4]; + } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ +putpubfh\ +putrootfh/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_OpenCfrm} += $split[0]; + $data->{stats}{server_rpc_OpenDwnGr} += $split[1]; + $data->{stats}{server_rpc_PutFH} += $split[2]; + $data->{stats}{server_rpc_PutPubFH} += $split[3]; + $data->{stats}{server_rpc_PutRootFH} += $split[4]; + } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Read} += $split[0]; + $data->{stats}{server_rpc_Readdir} += $split[1]; + $data->{stats}{server_rpc_Readlink} += $split[2]; + $data->{stats}{server_rpc_Remove} += $split[3]; + $data->{stats}{server_rpc_Rename} += $split[4]; + } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Renew} += $split[0]; + $data->{stats}{server_rpc_RestoreFH} += $split[1]; + $data->{stats}{server_rpc_SaveFH} += $split[2]; + $data->{stats}{server_rpc_Secinfo} += $split[3]; + $data->{stats}{server_rpc_Setattr} += $split[4]; + } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_SetClId} += $split[0]; + $data->{stats}{server_rpc_SetClIdCf} += $split[1]; + $data->{stats}{server_rpc_Verify} += $split[2]; + $data->{stats}{server_rpc_Write} += $split[3]; + $data->{stats}{server_rpc_RelLockOwn} += $split[4]; + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_BackChannelCt} += $split[0]; + $data->{stats}{server_rpc_BindConnToSes} += $split[1]; + $data->{stats}{server_rpc_ExchangeID} += $split[2]; + $data->{stats}{server_rpc_CreateSess} += $split[3]; + $data->{stats}{server_rpc_DestroySess} += $split[4]; + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_BackChannelCt} += $split[0]; + $data->{stats}{server_rpc_BindConnToSes} += $split[1]; + $data->{stats}{server_rpc_ExchangeID} += $split[2]; + $data->{stats}{server_rpc_CreateSess} += $split[3]; + $data->{stats}{server_rpc_DestroySess} += $split[4]; + } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_FreeStateID} += $split[0]; + $data->{stats}{server_rpc_GetDirDeleg} += $split[1]; + $data->{stats}{server_rpc_GetDevInfo} += $split[2]; + $data->{stats}{server_rpc_GetDevList} += $split[3]; + $data->{stats}{server_rpc_layoutCommit} += $split[4]; + } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_LayoutGet} += $split[0]; + $data->{stats}{server_rpc_LayoutReturn} += $split[1]; + $data->{stats}{server_rpc_SecInfNoName} += $split[2]; + $data->{stats}{server_rpc_Sequence} += $split[3]; + $data->{stats}{server_rpc_SetSSV} += $split[4]; + } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_TestStateID} += $split[0]; + $data->{stats}{server_rpc_WantDeleg} += $split[1]; + $data->{stats}{server_rpc_DestroyClId} += $split[2]; + $data->{stats}{server_rpc_ReclaimCompl} += $split[3]; + $data->{stats}{server_rpc_Allocate} += $split[4]; + } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Copy} += $split[0]; + $data->{stats}{server_rpc_CopyNotify} += $split[1]; + $data->{stats}{server_rpc_Deallocate} += $split[2]; + $data->{stats}{server_rpc_IOAdvise} += $split[3]; + $data->{stats}{server_rpc_LayoutError} += $split[4]; + } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_LayoutStats} += $split[0]; + $data->{stats}{server_rpc_OffloadCncl} += $split[1]; + $data->{stats}{server_rpc_OffloadStat} += $split[2]; + $data->{stats}{server_rpc_ReadPlus} += $split[3]; + $data->{stats}{server_rpc_Seek} += $split[4]; + } elsif ( $previous_line =~ /write_same/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_WriteSame} += $split[0]; + } elsif ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_network_packets} += $split[0]; + $data->{stats}{client_network_udp} += $split[1]; + $data->{stats}{client_network_tcp} += $split[2]; + $data->{stats}{client_network_tcpconn} += $split[3]; + } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_info_calls} += $split[0]; + $data->{stats}{client_rpc_info_Retries} += $split[1]; + $data->{stats}{client_rpc_info_authrefrsh} += $split[2]; + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_null} += $split[0]; + $data->{stats}{client_rpc_Getattr} += $split[1]; + $data->{stats}{client_rpc_Setattr} += $split[2]; + $data->{stats}{client_rpc_root} += $split[3]; + $data->{stats}{client_rpc_Lookup} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Readlink} += $split[0]; + $data->{stats}{client_rpc_Read} += $split[1]; + $data->{stats}{client_rpc_wrcache} += $split[2]; + $data->{stats}{client_rpc_Write} += $split[3]; + $data->{stats}{client_rpc_Create} += $split[4]; + } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Remove} += $split[0]; + $data->{stats}{client_rpc_Rename} += $split[1]; + $data->{stats}{client_rpc_Link} += $split[2]; + $data->{stats}{client_rpc_Symlink} += $split[3]; + $data->{stats}{client_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Rmdir} += $split[0]; + $data->{stats}{client_rpc_Readdir} += $split[1]; + $data->{stats}{client_rpc_Fsstat} += $split[2]; + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_null} += $split[0]; + $data->{stats}{client_rpc_Getattr} += $split[1]; + $data->{stats}{client_rpc_Setattr} += $split[2]; + $data->{stats}{client_rpc_Lookup} += $split[3]; + $data->{stats}{client_rpc_Access} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Readlink} += $split[0]; + $data->{stats}{client_rpc_Read} += $split[1]; + $data->{stats}{client_rpc_Write} += $split[2]; + $data->{stats}{client_rpc_Create} += $split[3]; + $data->{stats}{client_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Symlink} += $split[0]; + $data->{stats}{client_rpc_Mknod} += $split[1]; + $data->{stats}{client_rpc_Remove} += $split[2]; + $data->{stats}{client_rpc_Rmdir} += $split[3]; + $data->{stats}{client_rpc_Rename} += $split[4]; + } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Link} += $split[0]; + $data->{stats}{client_rpc_Readdir} += $split[1]; + $data->{stats}{client_rpc_Readdir} += $split[2]; + $data->{stats}{client_rpc_Fsstat} += $split[3]; + $data->{stats}{client_rpc_FSinfo} += $split[4]; + } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_pathConf} += $split[0]; + $data->{stats}{client_rpc_Commit} += $split[1]; + } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_null} += $split[0]; + $data->{stats}{client_rpc_Read} += $split[1]; + $data->{stats}{client_rpc_Write} += $split[2]; + $data->{stats}{client_rpc_Commit} += $split[3]; + $data->{stats}{client_rpc_Open} += $split[4]; + } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_OpenCfr} += $split[0]; + $data->{stats}{client_rpc_OpenLayout} += $split[1]; + $data->{stats}{client_rpc_OpenDownGr} += $split[2]; + $data->{stats}{client_rpc_Close} += $split[3]; + $data->{stats}{client_rpc_Setattr} += $split[4]; + } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_FSinfo} += $split[0]; + $data->{stats}{client_rpc_Rename} += $split[1]; + $data->{stats}{client_rpc_SetClId} += $split[2]; + $data->{stats}{client_rpc_confirm} += $split[3]; + $data->{stats}{client_rpc_Lock} += $split[4]; + } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_LockT} += $split[0]; + $data->{stats}{client_rpc_LockU} += $split[1]; + $data->{stats}{client_rpc_Access} += $split[2]; + $data->{stats}{client_rpc_Getattr} += $split[3]; + $data->{stats}{client_rpc_Lookup} += $split[4]; + } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_LookupOpen} += $split[0]; + $data->{stats}{client_rpc_Remove} += $split[1]; + $data->{stats}{client_rpc_Rename} += $split[2]; + $data->{stats}{client_rpc_Link} += $split[3]; + $data->{stats}{client_rpc_Symlink} += $split[4]; + } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Create} += $split[0]; + $data->{stats}{client_rpc_pathConf} += $split[1]; + $data->{stats}{client_rpc_Fsstat} += $split[2]; + $data->{stats}{client_rpc_Readlink} += $split[3]; + $data->{stats}{client_rpc_Readdir} += $split[4]; + } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_server_caps} += $split[0]; + $data->{stats}{client_rpc_DelegRet} += $split[1]; + $data->{stats}{client_rpc_Getattr} += $split[2]; + $data->{stats}{client_rpc_SetAcl} += $split[3]; + $data->{stats}{client_rpc_fs_locations} += $split[4]; + } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_RelLckOwn} += $split[0]; + $data->{stats}{client_rpc_secinfo} += $split[1]; + $data->{stats}{client_rpc_fsid_present} += $split[2]; + $data->{stats}{client_rpc_ExchangeId} += $split[3]; + $data->{stats}{client_rpc_CreateSess} += $split[4]; + } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_DestroySess} += $split[0]; + $data->{stats}{client_rpc_sequence} += $split[1]; + $data->{stats}{client_rpc_get_lease_time} += $split[2]; + $data->{stats}{client_rpc_ReclaimCompl} += $split[3]; + $data->{stats}{client_rpc_LayoutGet} += $split[4]; + } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_GetDevInfo} += $split[0]; + $data->{stats}{client_rpc_LayoutCommit} += $split[1]; + $data->{stats}{client_rpc_LayoutReturn} += $split[2]; + $data->{stats}{client_rpc_secinfo_no} += $split[3]; + $data->{stats}{client_rpc_test_stateid} += $split[4]; + } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_FreeStateID} += $split[0]; + $data->{stats}{client_rpc_getdevicelist} += $split[1]; + $data->{stats}{client_rpc_BindConnSess} += $split[2]; + $data->{stats}{client_rpc_DestroyClId} += $split[3]; + $data->{stats}{client_rpc_Seek} += $split[4]; + } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Allocate} += $split[0]; + $data->{stats}{client_rpc_Deallocate} += $split[1]; + $data->{stats}{client_rpc_layoutstats} += $split[2]; + $data->{stats}{client_rpc_Close} += $split[3]; } $previous_line = $line; } ## end foreach my $line (@output_split) From b5edc44ce83ecfc36298ff4277bf7ee79854927b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 12 Mar 2024 22:34:48 -0500 Subject: [PATCH 287/332] fix linux stuff --- snmp/nfs | 136 +++++++++++++++++++++++-------------------------------- 1 file changed, 57 insertions(+), 79 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index c6391d0a5..bc00b0e46 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -54,30 +54,6 @@ if possible. =cut -## -## -## General Notes -## -## -# -# FreeBSD used as the design basis given better stats produced and as well -# as actually documented. - -### -### -### Linux Notes -### -### -# -# What the following map to if if there is a FreeBSD equivalent is not clear. -# -# fs_locations -# test_stateid -# fsid_present -# open_conf -# confirm -# null - use strict; use warnings; use Getopt::Long; @@ -662,38 +638,40 @@ if ( $^O eq 'freebsd' ) { #### #### if ( $^O eq 'linux' ) { - my $output_raw = `nfsstat | sed 's/[0-9\.]*\%//g'`; + my $output_raw = `nfsstat -2 -3 -4 -v| sed 's/[0-9\.]*\%//g'`; my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; my $mode = ''; foreach my $line (@output_split) { + $line =~ s/\t/\ /g; + $line =~ s/\ +$//g; if ( $line =~ /^[Cc]lient/ ) { $mode = 'client'; } elsif ( $line =~ /^[Ss]erver/ ) { $mode = 'server'; } - if ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'server' ) { + if ( $previous_line =~ /^packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'server' ) { ( $data->{stats}{server_network_packets}, $data->{stats}{server_network_udp}, $data->{stats}{server_network_tcp}, $data->{stats}{server_network_tcpconn}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { ( $data->{stats}{server_rpcStats_calls}, $data->{stats}{server_rpcStats_badcalls}, $data->{stats}{server_rpcStats_badfmt}, $data->{stats}{server_rpcStats_badauth}, $data->{stats}{server_rpcStats_badclnt}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /hits\ +misses\ +nocache/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^hits\ +misses\ +nocache/ && $mode eq 'server' ) { ( $data->{stats}{server_cache_hits}, $data->{stats}{server_cache_Misses}, $data->{stats}{server_cache_nocache}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /read\ +write/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^read\ +write/ && $mode eq 'server' ) { ( $data->{stats}{server_io_read}, $data->{stats}{server_io_write}, ) = split( /[\t\ ]+/, $line ); } elsif ( $previous_line - =~ /size\ +0\-10\%\ +10\-20\%\ +20\-3\0%\ +30\-40\%\ +40\-50\%\ +50\-60\%\ +60\-70\%\ +70\-80\%\ +80\-90\%\ +90\-100\%\ +notfound/ + =~ /^size\ +0\-10\%\ +10\-20\%\ +20\-3\0%\ +30\-40\%\ +40\-50\%\ +50\-60\%\ +60\-70\%\ +70\-80\%\ +80\-90\%\ +90\-100\%\ +notfound/ && $mode eq 'server' ) { ( @@ -704,131 +682,131 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_RAcache_8}, $data->{stats}{server_RAcache_9}, $data->{stats}{server_RAcache_notfound} ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /lookup\ +anon\ +ncachedir\ +ncachenondir\ +stale/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^lookup\ +anon\ +ncachedir\ +ncachenondir\ +stale/ && $mode eq 'server' ) { ( $data->{stats}{server_FHcache_lookup}, $data->{stats}{server_FHcache_anon}, $data->{stats}{server_FHcache_ncachedir}, $data->{stats}{server_FHcache_ncachenondir}, $data->{stats}{server_FHcache_stale}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_null} += $split[0]; $data->{stats}{server_rpc_Getattr} += $split[1]; $data->{stats}{server_rpc_Setattr} += $split[2]; $data->{stats}{server_rpc_root} += $split[3]; $data->{stats}{server_rpc_Lookup} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Readlink} += $split[0]; $data->{stats}{server_rpc_Read} += $split[1]; $data->{stats}{server_rpc_wrcache} += $split[2]; $data->{stats}{server_rpc_Write} += $split[3]; $data->{stats}{server_rpc_Create} += $split[4]; - } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Remove} += $split[0]; $data->{stats}{server_rpc_Rename} += $split[1]; $data->{stats}{server_rpc_Link} += $split[2]; $data->{stats}{server_rpc_Symlink} += $split[3]; $data->{stats}{server_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^rmdir\ +readdir\ +fsstat/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Rmdir} += $split[0]; $data->{stats}{server_rpc_Readdir} += $split[1]; $data->{stats}{server_rpc_Fsstat} += $split[2]; - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_null} += $split[0]; $data->{stats}{server_rpc_Getattr} += $split[1]; $data->{stats}{server_rpc_Setattr} += $split[2]; $data->{stats}{server_rpc_Lookup} += $split[3]; $data->{stats}{server_rpc_Access} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Readlink} += $split[0]; $data->{stats}{server_rpc_Read} += $split[1]; $data->{stats}{server_rpc_Write} += $split[2]; $data->{stats}{server_rpc_Create} += $split[3]; $data->{stats}{server_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Symlink} += $split[0]; $data->{stats}{server_rpc_Mknod} += $split[1]; $data->{stats}{server_rpc_Remove} += $split[2]; $data->{stats}{server_rpc_Rmdir} += $split[3]; $data->{stats}{server_rpc_Rename} += $split[4]; - } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Link} += $split[0]; $data->{stats}{server_rpc_Readdir} += $split[1]; $data->{stats}{server_rpc_ReadPlus} += $split[2]; $data->{stats}{server_rpc_Fsstat} += $split[3]; $data->{stats}{server_rpc_FSinfo} += $split[4]; - } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^pathconf\ +commit/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_pathConf} += $split[0]; $data->{stats}{server_rpc_Commit} += $split[1]; - } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^null\ +compound/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_null} += $split[0]; $data->{stats}{server_rpc_compound} += $split[1]; - } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_op0_unused} += $split[0]; $data->{stats}{server_rpc_op1_unused} += $split[1]; $data->{stats}{server_rpc_op2_future} += $split[2]; $data->{stats}{server_rpc_Access} += $split[3]; $data->{stats}{server_rpc_Close} += $split[4]; - } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Commit} += $split[0]; $data->{stats}{server_rpc_Create} += $split[1]; $data->{stats}{server_rpc_DelePurge} = $split[2]; $data->{stats}{server_rpc_DelRet} = $split[3]; $data->{stats}{server_rpc_Getattr} += $split[4]; - } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_GetFH} += $split[0]; $data->{stats}{server_rpc_Link} += $split[1]; $data->{stats}{server_rpc_Lock} += $split[2]; $data->{stats}{server_rpc_LockT} += $split[3]; $data->{stats}{server_rpc_LockU} += $split[4]; - } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Lookup} += $split[0]; $data->{stats}{server_rpc_LookupP} += $split[1]; $data->{stats}{server_rpc_NVerify} += $split[2]; $data->{stats}{server_rpc_Open} += $split[3]; $data->{stats}{server_rpc_OpeAttr} += $split[4]; - } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ +putpubfh\ +putrootfh/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^open_conf\ +open_dgrd\ +putfh\ +putpubfh\ +putrootfh/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_OpenCfrm} += $split[0]; $data->{stats}{server_rpc_OpenDwnGr} += $split[1]; $data->{stats}{server_rpc_PutFH} += $split[2]; $data->{stats}{server_rpc_PutPubFH} += $split[3]; $data->{stats}{server_rpc_PutRootFH} += $split[4]; - } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Read} += $split[0]; $data->{stats}{server_rpc_Readdir} += $split[1]; $data->{stats}{server_rpc_Readlink} += $split[2]; $data->{stats}{server_rpc_Remove} += $split[3]; $data->{stats}{server_rpc_Rename} += $split[4]; - } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Renew} += $split[0]; $data->{stats}{server_rpc_RestoreFH} += $split[1]; $data->{stats}{server_rpc_SaveFH} += $split[2]; $data->{stats}{server_rpc_Secinfo} += $split[3]; $data->{stats}{server_rpc_Setattr} += $split[4]; - } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_SetClId} += $split[0]; $data->{stats}{server_rpc_SetClIdCf} += $split[1]; $data->{stats}{server_rpc_Verify} += $split[2]; $data->{stats}{server_rpc_Write} += $split[3]; $data->{stats}{server_rpc_RelLockOwn} += $split[4]; - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + } elsif ( $previous_line =~ /^bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_BackChannelCt} += $split[0]; @@ -836,7 +814,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_ExchangeID} += $split[2]; $data->{stats}{server_rpc_CreateSess} += $split[3]; $data->{stats}{server_rpc_DestroySess} += $split[4]; - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + } elsif ( $previous_line =~ /^bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_BackChannelCt} += $split[0]; @@ -844,7 +822,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_ExchangeID} += $split[2]; $data->{stats}{server_rpc_CreateSess} += $split[3]; $data->{stats}{server_rpc_DestroySess} += $split[4]; - } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ + } elsif ( $previous_line =~ /^free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -853,7 +831,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_GetDevInfo} += $split[2]; $data->{stats}{server_rpc_GetDevList} += $split[3]; $data->{stats}{server_rpc_layoutCommit} += $split[4]; - } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ + } elsif ( $previous_line =~ /^layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -862,7 +840,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_SecInfNoName} += $split[2]; $data->{stats}{server_rpc_Sequence} += $split[3]; $data->{stats}{server_rpc_SetSSV} += $split[4]; - } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ + } elsif ( $previous_line =~ /^test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -871,14 +849,14 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_DestroyClId} += $split[2]; $data->{stats}{server_rpc_ReclaimCompl} += $split[3]; $data->{stats}{server_rpc_Allocate} += $split[4]; - } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Copy} += $split[0]; $data->{stats}{server_rpc_CopyNotify} += $split[1]; $data->{stats}{server_rpc_Deallocate} += $split[2]; $data->{stats}{server_rpc_IOAdvise} += $split[3]; $data->{stats}{server_rpc_LayoutError} += $split[4]; - } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ + } elsif ( $previous_line =~ /^layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -887,10 +865,10 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_OffloadStat} += $split[2]; $data->{stats}{server_rpc_ReadPlus} += $split[3]; $data->{stats}{server_rpc_Seek} += $split[4]; - } elsif ( $previous_line =~ /write_same/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^write_same/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_WriteSame} += $split[0]; - } elsif ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_network_packets} += $split[0]; $data->{stats}{client_network_udp} += $split[1]; @@ -901,107 +879,107 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_info_calls} += $split[0]; $data->{stats}{client_rpc_info_Retries} += $split[1]; $data->{stats}{client_rpc_info_authrefrsh} += $split[2]; - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_null} += $split[0]; $data->{stats}{client_rpc_Getattr} += $split[1]; $data->{stats}{client_rpc_Setattr} += $split[2]; $data->{stats}{client_rpc_root} += $split[3]; $data->{stats}{client_rpc_Lookup} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Readlink} += $split[0]; $data->{stats}{client_rpc_Read} += $split[1]; $data->{stats}{client_rpc_wrcache} += $split[2]; $data->{stats}{client_rpc_Write} += $split[3]; $data->{stats}{client_rpc_Create} += $split[4]; - } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Remove} += $split[0]; $data->{stats}{client_rpc_Rename} += $split[1]; $data->{stats}{client_rpc_Link} += $split[2]; $data->{stats}{client_rpc_Symlink} += $split[3]; $data->{stats}{client_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^rmdir\ +readdir\ +fsstat/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Rmdir} += $split[0]; $data->{stats}{client_rpc_Readdir} += $split[1]; $data->{stats}{client_rpc_Fsstat} += $split[2]; - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_null} += $split[0]; $data->{stats}{client_rpc_Getattr} += $split[1]; $data->{stats}{client_rpc_Setattr} += $split[2]; $data->{stats}{client_rpc_Lookup} += $split[3]; $data->{stats}{client_rpc_Access} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Readlink} += $split[0]; $data->{stats}{client_rpc_Read} += $split[1]; $data->{stats}{client_rpc_Write} += $split[2]; $data->{stats}{client_rpc_Create} += $split[3]; $data->{stats}{client_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Symlink} += $split[0]; $data->{stats}{client_rpc_Mknod} += $split[1]; $data->{stats}{client_rpc_Remove} += $split[2]; $data->{stats}{client_rpc_Rmdir} += $split[3]; $data->{stats}{client_rpc_Rename} += $split[4]; - } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Link} += $split[0]; $data->{stats}{client_rpc_Readdir} += $split[1]; $data->{stats}{client_rpc_Readdir} += $split[2]; $data->{stats}{client_rpc_Fsstat} += $split[3]; $data->{stats}{client_rpc_FSinfo} += $split[4]; - } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^pathconf\ +commit/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_pathConf} += $split[0]; $data->{stats}{client_rpc_Commit} += $split[1]; - } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_null} += $split[0]; $data->{stats}{client_rpc_Read} += $split[1]; $data->{stats}{client_rpc_Write} += $split[2]; $data->{stats}{client_rpc_Commit} += $split[3]; $data->{stats}{client_rpc_Open} += $split[4]; - } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_OpenCfr} += $split[0]; $data->{stats}{client_rpc_OpenLayout} += $split[1]; $data->{stats}{client_rpc_OpenDownGr} += $split[2]; $data->{stats}{client_rpc_Close} += $split[3]; $data->{stats}{client_rpc_Setattr} += $split[4]; - } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_FSinfo} += $split[0]; $data->{stats}{client_rpc_Rename} += $split[1]; $data->{stats}{client_rpc_SetClId} += $split[2]; $data->{stats}{client_rpc_confirm} += $split[3]; $data->{stats}{client_rpc_Lock} += $split[4]; - } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_LockT} += $split[0]; $data->{stats}{client_rpc_LockU} += $split[1]; $data->{stats}{client_rpc_Access} += $split[2]; $data->{stats}{client_rpc_Getattr} += $split[3]; $data->{stats}{client_rpc_Lookup} += $split[4]; - } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_LookupOpen} += $split[0]; $data->{stats}{client_rpc_Remove} += $split[1]; $data->{stats}{client_rpc_Rename} += $split[2]; $data->{stats}{client_rpc_Link} += $split[3]; $data->{stats}{client_rpc_Symlink} += $split[4]; - } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Create} += $split[0]; $data->{stats}{client_rpc_pathConf} += $split[1]; $data->{stats}{client_rpc_Fsstat} += $split[2]; $data->{stats}{client_rpc_Readlink} += $split[3]; $data->{stats}{client_rpc_Readdir} += $split[4]; - } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) + } elsif ( $previous_line =~ /^server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_server_caps} += $split[0]; @@ -1009,7 +987,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_Getattr} += $split[2]; $data->{stats}{client_rpc_SetAcl} += $split[3]; $data->{stats}{client_rpc_fs_locations} += $split[4]; - } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ + } elsif ( $previous_line =~ /^rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1018,7 +996,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_fsid_present} += $split[2]; $data->{stats}{client_rpc_ExchangeId} += $split[3]; $data->{stats}{client_rpc_CreateSess} += $split[4]; - } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ + } elsif ( $previous_line =~ /^destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1027,7 +1005,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_get_lease_time} += $split[2]; $data->{stats}{client_rpc_ReclaimCompl} += $split[3]; $data->{stats}{client_rpc_LayoutGet} += $split[4]; - } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ + } elsif ( $previous_line =~ /^getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1036,7 +1014,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_LayoutReturn} += $split[2]; $data->{stats}{client_rpc_secinfo_no} += $split[3]; $data->{stats}{client_rpc_test_stateid} += $split[4]; - } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ + } elsif ( $previous_line =~ /^free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1045,7 +1023,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_BindConnSess} += $split[2]; $data->{stats}{client_rpc_DestroyClId} += $split[3]; $data->{stats}{client_rpc_Seek} += $split[4]; - } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Allocate} += $split[0]; $data->{stats}{client_rpc_Deallocate} += $split[1]; From 6562441fbe205a0c11943fa903137f46ab9ab469 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 24 Mar 2024 20:20:57 -0500 Subject: [PATCH 288/332] initial add for poudriere --- snmp/poudriere | 356 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100755 snmp/poudriere diff --git a/snmp/poudriere b/snmp/poudriere new file mode 100755 index 000000000..9469bb385 --- /dev/null +++ b/snmp/poudriere @@ -0,0 +1,356 @@ +#!/usr/bin/env perl + +=head1 NAME + +poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere + +=head1 VERSION + +0.0.1 + +=head1 SYNOPSIS + +poudriere [B<-w>] [B<-b>] [B<-o> ] + +poudriere --help|-h + +poudriere --version|-v + +=head1 SNMPD CONFIG + + extend poudriere /etc/snmp/extends/poudriere -b + +or if using cron... + + extend poudriere cat /var/cache/poudriere.json.snmp + +=head1 DESCRIPTION + +Uses showmount and nfsstat to gather information for the OSes below for NFS. + + FreeBSD + Linux + +=head1 FLAGS + +=head2 -w + +Write the results out. + +=head2 -b + +Print out the compressed data if GZip+Base64 is smaller. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/poudriere.json', +meaning it will be written out to the two locations. + + /var/cache/poudriere.json + /var/cache/poudriere.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; + +sub time_to_seconds{ + my $time=$_[0]; + + if (!defined($time)) { + return 0; + } + + if ($time=~/^0\:[0-9]+\.[0-9]+$/) { + $time=~s/^0\://; + return $time; + }elsif ($time=~/^[0-9]+\:[0-9]+\.[0-9]+$/) { + my $minutes=$time; + $minutes=~s/\:.*//; + $time=~s/.*\://; + $time = ($minutes * 60) + $time; + return $time; + }elsif ($time=~/^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/) { + my $days=$time; + $days=~s/D\:.*$//; + my $minutes=$time; + $minutes=~s/^.*D\://; + $minutes=~s/\:.*//; + $time = ($days * 86400) + ($minutes * 60) + $time; + return $time; + } + + # return 0 for anything unknown + return 0; +} + +#the version of returned data +my $VERSION = 1; + +# ensure sbin is in the path +$ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin'; + +my $pretty; +my $cache_base = '/var/cache/poudriere.json'; +my $write; +my $compress; +my $version; +my $help; +GetOptions( + 'o=s' => \$cache_base, + w => \$write, + b => \$compress, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + status => '', + build_info => '', + not_done => 0, + stats => { + 'copy-on-write-faults' => 0, + 'cpu-time' => 0, + 'data-size' => 0, + 'elapsed-times' => 0, + 'involuntary-context-switches' => 0, + 'job-control-count' => 0, + 'major-faults' => 0, + 'minor-faults' => 0, + 'percent-cpu' => 0, + 'percent-memory' => 0, + 'read-blocks' => 0, + 'received-messages' => 0, + 'rss' => 0, + 'sent-messages' => 0, + 'stack-size' => 0, + 'swaps' => 0, + 'system-time' => 0, + 'text-size' => 0, + 'threads' => 0, + 'user-time' => 0, + 'voluntary-context-switches' => 0, + 'written-blocks' => 0, + 'QUEUE' => 0, + 'BUILT' => 0, + 'FAIL' => 0, + 'SKIP' => 0, + 'IGNORE' => 0, + 'FETCH' => 0, + 'REMAIN' => 0, + 'TIME' => 0, + }, + jailANDportsANDset => {} +}; + +my @ps_stats = ( + 'copy-on-write-faults', 'cpu-time', + 'data-size', 'elapsed-times', + 'involuntary-context-switches', 'job-control-count', + 'major-faults', 'minor-faults', + 'percent-cpu', 'percent-memory', + 'read-blocks', 'received-messages', + 'rss', 'sent-messages', 'stack-size', + 'swaps', 'system-time', + 'text-size', 'threads', + 'user-time', 'voluntary-context-switches', +); + +my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'REMAIN', 'TIME' ); + +### +### +### get basic info via calling poudriere status +### +### + +my $status_raw = `poudriere -N status -f 2> /dev/null`; +if ( $? == 0 ) { + $data->{status} = $status_raw; + $data->{build_info} = `poudriere -N status -f -b 2>&1`; + + my @status_split = split( /\n/, $status_raw ); + my $status_split_int = 1; + while ( defined( $status_split[$status_split_int] ) ) { + + my $jls; + eval { $jls = decode_json(`jls --libxo json`); }; + if ($@) { + $jls = { 'jail-information' => { jail => [] } }; + } + + my $found = { + 'copy-on-write-faults' => 0, + 'cpu-time' => 0, + 'data-size' => 0, + 'elapsed-times' => 0, + 'involuntary-context-switches' => 0, + 'job-control-count' => 0, + 'major-faults' => 0, + 'minor-faults' => 0, + 'percent-cpu' => 0, + 'percent-memory' => 0, + 'read-blocks' => 0, + 'received-messages' => 0, + 'rss' => 0, + 'sent-messages' => 0, + 'stack-size' => 0, + 'swaps' => 0, + 'system-time' => 0, + 'text-size' => 0, + 'threads' => 0, + 'user-time' => 0, + 'voluntary-context-switches' => 0, + }; + ( + $found->{SET}, $found->{PORTS}, $found->{JAIL}, $found->{BUILD}, $found->{STATUS}, + $found->{QUEUE}, $found->{BUILT}, $found->{FAIL}, $found->{SKIP}, $found->{IGNORE}, + $found->{FETCH}, $found->{REMAIN}, $found->{TIME}, $found->{LOGS} + ) = split( / +/, $status_split[$status_split_int], 14 ); + + if ($found->{STATUS} ne 'done') { + $data->{not_done}=1; + } + + my $jailANDportsANDset; + if ( $found->{SET} eq '-' ) { + $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS}; + } else { + $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS} . '-' . $found->{SET}; + } + + foreach my $item (@poudriere_stats) { + if ($item eq 'TIME') { + $found->{$item} = time_to_seconds($found->{$item}); + } + $data->{stats}{$item} += $found->{$item}; + } + + ## + ## find the jails + ## + my @jails; + my $jail_regex='^'.$jailANDportsANDset.'-job-[0-9]+'; + my $jls_int=0; + while (defined( $jls->{'jail-information'}{jail}[$jls_int] )) { + if ( + $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset || + $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ + ) { + push(@jails, $jls->{'jail-information'}{jail}[$jls_int]{jid}); + } + $jls_int++; + } + + ## + ## if we have found jails, grab the information via ps + ## + if (defined($jails[0])) { + my $jails_string=join(',', @jails); + + my $ps; + eval { + $ps = decode_json(`ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string`); + }; + if ($@) { + $ps = { 'process-information' => { process => [] } }; + } + my $ps_int=0; + while (defined( $ps->{'process-information'}{process}[$ps_int] )) { + foreach my $item (@ps_stats) { + if ($item eq 'user-time' || $item eq 'cpu-time' || $item eq 'system-time') { + $ps->{'process-information'}{process}[$ps_int]{$item} = time_to_seconds($ps->{'process-information'}{process}[$ps_int]{$item}); + } + $data->{stats}{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; + $found->{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; + } + $ps_int++; + } + } + + $data->{jailANDportsANDset}{$jailANDportsANDset} = $found; + $status_split_int++; + } ## end while ( defined( $status_split[$status_split_int...])) +} else { + $to_return->{error} = 1; + $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; +} + +### +### +### finalize it +### +### + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + write_file( $cache_base . '.snmp', $raw_json ); + } else { + write_file( $cache_base . '.snmp', $compressed ); + $print_compressed = 1; + } + + if ( $compress && $print_compressed ) { + print $compressed; + } else { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + print $raw_json; + } else { + print $compressed; + } +} ## end else [ if ($write) ] From b3e8132e84e9da21ab0e5a70f1bcab81350b79d9 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 25 Mar 2024 20:41:30 -0500 Subject: [PATCH 289/332] gatcher info on build stage --- snmp/poudriere | 193 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 136 insertions(+), 57 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 9469bb385..900fd9b64 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -26,11 +26,6 @@ or if using cron... =head1 DESCRIPTION -Uses showmount and nfsstat to gather information for the OSes below for NFS. - - FreeBSD - Linux - =head1 FLAGS =head2 -w @@ -63,35 +58,35 @@ use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; use JSON; -sub time_to_seconds{ - my $time=$_[0]; +sub time_to_seconds { + my $time = $_[0]; - if (!defined($time)) { + if ( !defined($time) ) { return 0; } - if ($time=~/^0\:[0-9]+\.[0-9]+$/) { - $time=~s/^0\://; + if ( $time =~ /^0\:[0-9]+\.[0-9]+$/ ) { + $time =~ s/^0\://; return $time; - }elsif ($time=~/^[0-9]+\:[0-9]+\.[0-9]+$/) { - my $minutes=$time; - $minutes=~s/\:.*//; - $time=~s/.*\://; - $time = ($minutes * 60) + $time; + } elsif ( $time =~ /^[0-9]+\:[0-9]+\.[0-9]+$/ ) { + my $minutes = $time; + $minutes =~ s/\:.*//; + $time =~ s/.*\://; + $time = ( $minutes * 60 ) + $time; return $time; - }elsif ($time=~/^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/) { - my $days=$time; - $days=~s/D\:.*$//; - my $minutes=$time; - $minutes=~s/^.*D\://; - $minutes=~s/\:.*//; - $time = ($days * 86400) + ($minutes * 60) + $time; + } elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/ ) { + my $days = $time; + $days =~ s/D\:.*$//; + my $minutes = $time; + $minutes =~ s/^.*D\://; + $minutes =~ s/\:.*//; + $time = ( $days * 86400 ) + ( $minutes * 60 ) + $time; return $time; } # return 0 for anything unknown return 0; -} +} ## end sub time_to_seconds #the version of returned data my $VERSION = 1; @@ -133,8 +128,8 @@ my $to_return = { }; my $data = { status => '', - build_info => '', - not_done => 0, + build_info => '', + not_done => 0, stats => { 'copy-on-write-faults' => 0, 'cpu-time' => 0, @@ -166,21 +161,32 @@ my $data = { 'FETCH' => 0, 'REMAIN' => 0, 'TIME' => 0, + 'check-sanity' => 0, + 'pkg-depends' => 0, + 'fetch-depends' => 0, + 'fetch checksum' => 0, + 'extract-depends' => 0, + 'extract' => 0, + 'patch-depends' => 0, + 'patch' => 0, + 'build-depends' => 0, + 'lib-depends' => 0, + 'configure' => 0, + 'build' => 0, + 'run-depends' => 0, + 'stage' => 0, + 'package' => 0, }, jailANDportsANDset => {} }; my @ps_stats = ( - 'copy-on-write-faults', 'cpu-time', - 'data-size', 'elapsed-times', - 'involuntary-context-switches', 'job-control-count', - 'major-faults', 'minor-faults', - 'percent-cpu', 'percent-memory', - 'read-blocks', 'received-messages', - 'rss', 'sent-messages', 'stack-size', - 'swaps', 'system-time', - 'text-size', 'threads', - 'user-time', 'voluntary-context-switches', + 'copy-on-write-faults', 'cpu-time', 'data-size', 'elapsed-times', + 'involuntary-context-switches', 'job-control-count', 'major-faults', 'minor-faults', + 'percent-cpu', 'percent-memory', 'read-blocks', 'received-messages', + 'rss', 'sent-messages', 'stack-size', 'swaps', + 'system-time', 'text-size', 'threads', 'user-time', + 'voluntary-context-switches', ); my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'REMAIN', 'TIME' ); @@ -228,6 +234,21 @@ if ( $? == 0 ) { 'threads' => 0, 'user-time' => 0, 'voluntary-context-switches' => 0, + 'check-sanity' => 0, + 'pkg-depends' => 0, + 'fetch-depends' => 0, + 'fetch checksum' => 0, + 'extract-depends' => 0, + 'extract' => 0, + 'patch-depends' => 0, + 'patch' => 0, + 'build-depends' => 0, + 'lib-depends' => 0, + 'configure' => 0, + 'build' => 0, + 'run-depends' => 0, + 'stage' => 0, + 'package' => 0, }; ( $found->{SET}, $found->{PORTS}, $found->{JAIL}, $found->{BUILD}, $found->{STATUS}, @@ -235,8 +256,8 @@ if ( $? == 0 ) { $found->{FETCH}, $found->{REMAIN}, $found->{TIME}, $found->{LOGS} ) = split( / +/, $status_split[$status_split_int], 14 ); - if ($found->{STATUS} ne 'done') { - $data->{not_done}=1; + if ( $found->{STATUS} ne 'done' ) { + $data->{not_done} = 1; } my $jailANDportsANDset; @@ -247,8 +268,8 @@ if ( $? == 0 ) { } foreach my $item (@poudriere_stats) { - if ($item eq 'TIME') { - $found->{$item} = time_to_seconds($found->{$item}); + if ( $item eq 'TIME' ) { + $found->{$item} = time_to_seconds( $found->{$item} ); } $data->{stats}{$item} += $found->{$item}; } @@ -257,14 +278,13 @@ if ( $? == 0 ) { ## find the jails ## my @jails; - my $jail_regex='^'.$jailANDportsANDset.'-job-[0-9]+'; - my $jls_int=0; - while (defined( $jls->{'jail-information'}{jail}[$jls_int] )) { - if ( - $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset || - $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ - ) { - push(@jails, $jls->{'jail-information'}{jail}[$jls_int]{jid}); + my $jail_regex = '^' . $jailANDportsANDset . '-job-[0-9]+'; + my $jls_int = 0; + while ( defined( $jls->{'jail-information'}{jail}[$jls_int] ) ) { + if ( $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset + || $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ ) + { + push( @jails, $jls->{'jail-information'}{jail}[$jls_int]{jid} ); } $jls_int++; } @@ -272,32 +292,91 @@ if ( $? == 0 ) { ## ## if we have found jails, grab the information via ps ## - if (defined($jails[0])) { - my $jails_string=join(',', @jails); + if ( defined( $jails[0] ) ) { + my $jails_string = join( ',', @jails ); my $ps; eval { - $ps = decode_json(`ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string`); - }; + $ps + = decode_json( + `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string` + ); + }; if ($@) { $ps = { 'process-information' => { process => [] } }; } - my $ps_int=0; - while (defined( $ps->{'process-information'}{process}[$ps_int] )) { + my $ps_int = 0; + while ( defined( $ps->{'process-information'}{process}[$ps_int] ) ) { foreach my $item (@ps_stats) { - if ($item eq 'user-time' || $item eq 'cpu-time' || $item eq 'system-time') { - $ps->{'process-information'}{process}[$ps_int]{$item} = time_to_seconds($ps->{'process-information'}{process}[$ps_int]{$item}); + if ( $item eq 'user-time' || $item eq 'cpu-time' || $item eq 'system-time' ) { + $ps->{'process-information'}{process}[$ps_int]{$item} + = time_to_seconds( $ps->{'process-information'}{process}[$ps_int]{$item} ); } $data->{stats}{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; $found->{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; } $ps_int++; - } - } + } ## end while ( defined( $ps->{'process-information'}...)) + } ## end if ( defined( $jails[0] ) ) $data->{jailANDportsANDset}{$jailANDportsANDset} = $found; $status_split_int++; } ## end while ( defined( $status_split[$status_split_int...])) + + my @build_info_split = split( /\n/, $data->{build_info} ); + my $current_section; + foreach my $line (@build_info_split) { + if ( $line =~ /^\[.*\]\ \[.*\] .*Queued.*Built/ ) { + $current_section = $line; + $current_section =~ s/^\[//; + $current_section =~ s/\].*$//; + } elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) { + my $type; + if ( $line =~ /[\ \t]check\-sanity[\ \t]/ ) { + $type = 'check-sanity'; + } elsif ( $line =~ /[\ \t]pkg-depends[\ \t]/ ) { + $type = 'pkg-depends'; + } elsif ( $line =~ /[\ \t]fetch-depends[\ \t]/ ) { + $type = 'fetch-depends'; + } elsif ( $line =~ /[\ \t]fetch[\ \t]/ ) { + $type = 'fetch'; + } elsif ( $line =~ /[\ \t]checksum[\ \t]/ ) { + $type = 'checksum'; + } elsif ( $line =~ /[\ \t]extract\-depends[\ \t]/ ) { + $type = 'extract-depends'; + } elsif ( $line =~ /[\ \t]extract[\ \t]/ ) { + $type = 'extract'; + } elsif ( $line =~ /[\ \t]patch-depends[\ \t]/ ) { + $type = 'patch-depends'; + } elsif ( $line =~ /[\ \t]lib\-depends[\ \t]/ ) { + $type = 'lib-depends'; + } elsif ( $line =~ /[\ \t]configure[\ \t]/ ) { + $type = 'configure'; + } elsif ( $line =~ /[\ \t]build[\ \t]/ ) { + $type = 'build'; + } elsif ( $line =~ /[\ \t]build\-depends[\ \t]/ ) { + $type = 'build-depends'; + } elsif ( $line =~ /[\ \t]lib\-depends[\ \t]/ ) { + $type = 'lib-depends'; + } elsif ( $line =~ /[\ \t]configure[\ \t]/ ) { + $type = 'configure'; + } elsif ( $line =~ /[\ \t]build[\ \t]/ ) { + $type = 'build'; + } elsif ( $line =~ /[\ \t]run\-depends[\ \t]/ ) { + $type = 'run-depends'; + } elsif ( $line =~ /[\ \t]stage[\ \t]/ ) { + $type = 'stage'; + } elsif ( $line =~ /[\ \t]package[\ \t]/ ) { + $type = 'package'; + } + if (defined($type)) { + $data->{stats}{$type}++; + if (defined($data->{jailANDportsANDset}{$current_section})) { + $data->{jailANDportsANDset}{$current_section}{$type}++; + } + } + } ## end elsif ( $line =~ /^\[[0-9]+\].*\/.*\|.*-.*\:/) + } ## end foreach my $line (@build_info_split) } else { $to_return->{error} = 1; $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; From e5ec0480e6cd2256c98fffc9f4ef109e2e4edd16 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 26 Mar 2024 17:55:27 -0500 Subject: [PATCH 290/332] a few minor tweaks --- snmp/poudriere | 2 -- 1 file changed, 2 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 900fd9b64..b413fc895 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -24,8 +24,6 @@ or if using cron... extend poudriere cat /var/cache/poudriere.json.snmp -=head1 DESCRIPTION - =head1 FLAGS =head2 -w From 716519db9558249da85bd700fc54c65dcc1df033 Mon Sep 17 00:00:00 2001 From: Edwin Hoksberg Date: Tue, 23 Apr 2024 14:45:41 +0200 Subject: [PATCH 291/332] [add-opensearch-options] Add secure(-S) and disable hostname validation(-i) options for opensearch script --- snmp/opensearch | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 5b731b2eb..91f8752d4 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -35,6 +35,8 @@ Supported command line options are as below. Default: 127.0.0.1 -p The port to use. Default: 9200 + -S Use https instead of http. + -I Do not verify hostname (when used with -S). -P Pretty print. The last is only really relevant to the usage with SNMP. @@ -59,21 +61,27 @@ sub main::HELP_MESSAGE { . " Default: 127.0.0.1\n" . "-p The port to use.\n" . " Default: 9200\n" + . "-S Use https instead of http.\n" + . "-I Do not verify hostname (when used with -S).\n" . "-P Pretty print.\n"; } +my $protocol = 'http'; my $host = '127.0.0.1'; my $port = 9200; #gets the options my %opts; -getopts( 'h:p:P', \%opts ); +getopts( 'h:p:SIP', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } if ( defined( $opts{p} ) ) { $port = $opts{p}; } +if ( $opts{S} ) { + $protocol = 'https'; +} # my $to_return = { @@ -83,8 +91,8 @@ my $to_return = { date => {}, }; -my $stats_url = 'http://' . $host . ':' . $port . '/_stats'; -my $health_url = 'http://' . $host . ':' . $port . '/_cluster/health'; +my $stats_url = $protocol . '://' . $host . ':' . $port . '/_stats'; +my $health_url = $protocol . '://' . $host . ':' . $port . '/_cluster/health'; my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{P} ) { @@ -93,6 +101,10 @@ if ( $opts{P} ) { my $ua = LWP::UserAgent->new( timeout => 10 ); +if ( $opts{I} ) { + $ua->ssl_opts( verify_hostname => 0, SSL_verify_mode => 0x00 ); +} + my $stats_response = $ua->get($stats_url); my $stats_json; if ( $stats_response->is_success ) { From 68da0b47cd353cc79e22e815d5762e32ddaf6c20 Mon Sep 17 00:00:00 2001 From: Marco Valle Date: Sun, 28 Apr 2024 18:50:24 +0200 Subject: [PATCH 292/332] HTTPS and CA file validation implemented. --- snmp/opensearch | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 5b731b2eb..300115a32 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -31,11 +31,14 @@ Add this to snmpd.conf as below and restart snmpd. Supported command line options are as below. + -c CA file path. + Default: empty -h The host to connect to. Default: 127.0.0.1 -p The port to use. Default: 9200 -P Pretty print. + -S Use HTTPS. The last is only really relevant to the usage with SNMP. @@ -55,25 +58,31 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print "\n" + . "-c CA file path.\n" . "-h The host to connect to.\n" . " Default: 127.0.0.1\n" . "-p The port to use.\n" . " Default: 9200\n" - . "-P Pretty print.\n"; + . "-P Pretty print.\n" + . "-S Use HTTPS.\n"; } my $host = '127.0.0.1'; my $port = 9200; +my $schema = 'http'; #gets the options my %opts; -getopts( 'h:p:P', \%opts ); +getopts( 'c:h:p:P:S', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } if ( defined( $opts{p} ) ) { $port = $opts{p}; } +if ( $opts{S} ) { + $schema = 'https'; +} # my $to_return = { @@ -83,8 +92,8 @@ my $to_return = { date => {}, }; -my $stats_url = 'http://' . $host . ':' . $port . '/_stats'; -my $health_url = 'http://' . $host . ':' . $port . '/_cluster/health'; +my $stats_url = $schema . '://' . $host . ':' . $port . '/_stats'; +my $health_url = $schema . '://' . $host . ':' . $port . '/_cluster/health'; my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{P} ) { @@ -93,6 +102,11 @@ if ( $opts{P} ) { my $ua = LWP::UserAgent->new( timeout => 10 ); +if ( defined( $opts{c} ) ) { + # set ca file + $ua->ssl_opts( SSL_ca_file => $opts{c}); +} + my $stats_response = $ua->get($stats_url); my $stats_json; if ( $stats_response->is_success ) { From 5dac45c386d1df8bef3cd80483de3125f10c76f0 Mon Sep 17 00:00:00 2001 From: Marco Valle Date: Sun, 28 Apr 2024 18:58:30 +0200 Subject: [PATCH 293/332] Authorization header from file implemented. --- snmp/opensearch | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 300115a32..cac3d5093 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -31,6 +31,7 @@ Add this to snmpd.conf as below and restart snmpd. Supported command line options are as below. + -a Auth token path. -c CA file path. Default: empty -h The host to connect to. @@ -58,6 +59,7 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print "\n" + . "-a Auth token path.\n" . "-c CA file path.\n" . "-h The host to connect to.\n" . " Default: 127.0.0.1\n" @@ -73,7 +75,7 @@ my $schema = 'http'; #gets the options my %opts; -getopts( 'c:h:p:P:S', \%opts ); +getopts( 'a:c:h:p:P:S', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } @@ -84,6 +86,14 @@ if ( $opts{S} ) { $schema = 'https'; } +my $auth_token; +if ( defined( $opts{a} ) ) { + open my $auth_file, '<', $opts{a}; + $auth_token = <$auth_file>; + close $auth_file; + chop $auth_token; +} + # my $to_return = { error => 0, @@ -107,7 +117,13 @@ if ( defined( $opts{c} ) ) { $ua->ssl_opts( SSL_ca_file => $opts{c}); } -my $stats_response = $ua->get($stats_url); +my $stats_response; +if ( defined( $opts{a} ) ) { + $stats_response = $ua->get($stats_url, "Authorization" => $auth_token,); +} else { + $stats_response = $ua->get($stats_url); +} + my $stats_json; if ( $stats_response->is_success ) { eval { $stats_json = decode_json( $stats_response->decoded_content ); }; @@ -131,7 +147,13 @@ else { exit; } -my $health_response = $ua->get($health_url); +my $health_response; +if ( defined( $opts{a} ) ) { + $health_response = $ua->get($health_url, "Authorization" => $auth_token,); +} else { + $health_response = $ua->get($health_url); +} + my $health_json; if ( $health_response->is_success ) { eval { $health_json = decode_json( $health_response->decoded_content ); }; From 827e74fd40ee62dc97573827b6a7b64982d72a8b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 12 May 2024 10:21:20 -0500 Subject: [PATCH 294/332] begin work on redis.pl --- snmp/redis.pl | 104 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 snmp/redis.pl diff --git a/snmp/redis.pl b/snmp/redis.pl new file mode 100644 index 000000000..490ef6d81 --- /dev/null +++ b/snmp/redis.pl @@ -0,0 +1,104 @@ +#!/usr/bin/env perl + +=head1 NAME + +logsize - LinbreNMS JSON extend for redis. + +=head1 SYNOPSIS + +logsize [B<-B>] + +=head1 SWITCHES + +=head2 -B + +Do not the return output via GZip+Base64. + +=head1 SETUP + +Install the depends. + + # FreeBSD + pkg install p5-JSON p5-TOML p5-MIME-Base64 + # Debian + apt-get install libjson-perl libmime-base64-perl + +Create the cache dir, by default "/var/cache/". + +Then set it up in SNMPD. + + # if running it via cron + extend redis /usr/local/etc/snmp/redis.pl + +=cut + +use warnings; +use strict; +use JSON; +use Getopt::Std; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "LibreNMS redis extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + print ' + +-B Do not use Gzip+Base64 for the output. +'; +} + +my $return_json = { + error => 0, + errorString => '', + version => 1, + data => { + }, +}; + +#gets the options +my %opts = (); +getopts( 'B', \%opts ); + +# ensure that $ENV{PATH} has has it +$ENV{PATH}=$ENV{PATH}.':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; + +my $output_raw=`redis-cli info 2> /dev/null`; +if ($? != 0) { + $return_json->{error}=1; + $return_json->{error}='redis-cli info exited non-zero'; + print encode_json($return_json)."\n"; +} + +$output_raw=~s/\r//g; +my $section; +foreach my $line (split(/\n/, $output_raw)) { + if ($line ne '' && $line =~ /^# /) { + $line =~ s/^# //; + $section= $line; + $return_json->{data}{$section}={}; + }elsif ($line ne '' && defined($section)) { + my ($key, $value)=split(/\:/, $line); + if (defined($key) && defined($value)) { + $return_json->{data}{$section}{$key}=$value; + } + } +} + +my $return_json_raw=encode_json($return_json); +if ($opts{B}) { + print $return_json_raw."\n"; + exit 0; +} + +my $toReturnCompressed; +gzip \$return_json_raw => \$toReturnCompressed; +my $compressed = encode_base64($toReturnCompressed); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +print $compressed; From bf89867d6d44dfe7861b5e83058c3829aad8032f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 12 May 2024 10:41:14 -0500 Subject: [PATCH 295/332] add help/version info --- snmp/redis.pl | 70 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 27 deletions(-) mode change 100644 => 100755 snmp/redis.pl diff --git a/snmp/redis.pl b/snmp/redis.pl old mode 100644 new mode 100755 index 490ef6d81..3d4a80830 --- a/snmp/redis.pl +++ b/snmp/redis.pl @@ -2,7 +2,7 @@ =head1 NAME -logsize - LinbreNMS JSON extend for redis. +redis.pl - LinbreNMS JSON extend for redis. =head1 SYNOPSIS @@ -14,6 +14,14 @@ =head2 -B Do not the return output via GZip+Base64. +=head2 -h|--help + +Print help info. + +=head2 -v|--version + +Print version info. + =head1 SETUP Install the depends. @@ -39,6 +47,7 @@ =head1 SETUP use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use File::Slurp; +use Pod::Usage; $Getopt::Std::STANDARD_HELP_VERSION = 1; @@ -47,52 +56,59 @@ sub main::VERSION_MESSAGE { } sub main::HELP_MESSAGE { - print ' - --B Do not use Gzip+Base64 for the output. -'; + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } my $return_json = { error => 0, errorString => '', version => 1, - data => { - }, + data => {}, }; #gets the options my %opts = (); -getopts( 'B', \%opts ); +getopts( 'Bhv', \%opts ); + +if ( $opts{v} ) { + main::VERSION_MESSAGE; + exit 256; +} + +if ( $opts{h} ) { + main::VERSION_MESSAGE; + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 256; +} # ensure that $ENV{PATH} has has it -$ENV{PATH}=$ENV{PATH}.':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; +$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; -my $output_raw=`redis-cli info 2> /dev/null`; -if ($? != 0) { - $return_json->{error}=1; - $return_json->{error}='redis-cli info exited non-zero'; - print encode_json($return_json)."\n"; +my $output_raw = `redis-cli info 2> /dev/null`; +if ( $? != 0 ) { + $return_json->{error} = 1; + $return_json->{error} = 'redis-cli info exited non-zero'; + print encode_json($return_json) . "\n"; } -$output_raw=~s/\r//g; +$output_raw =~ s/\r//g; my $section; -foreach my $line (split(/\n/, $output_raw)) { - if ($line ne '' && $line =~ /^# /) { +foreach my $line ( split( /\n/, $output_raw ) ) { + if ( $line ne '' && $line =~ /^# / ) { $line =~ s/^# //; - $section= $line; - $return_json->{data}{$section}={}; - }elsif ($line ne '' && defined($section)) { - my ($key, $value)=split(/\:/, $line); - if (defined($key) && defined($value)) { - $return_json->{data}{$section}{$key}=$value; + $section = $line; + $return_json->{data}{$section} = {}; + } elsif ( $line ne '' && defined($section) ) { + my ( $key, $value ) = split( /\:/, $line ); + if ( defined($key) && defined($value) ) { + $return_json->{data}{$section}{$key} = $value; } } -} +} ## end foreach my $line ( split( /\n/, $output_raw ) ) -my $return_json_raw=encode_json($return_json); -if ($opts{B}) { - print $return_json_raw."\n"; +my $return_json_raw = encode_json($return_json); +if ( $opts{B} ) { + print $return_json_raw. "\n"; exit 0; } From 3bb1624fdd90ce943fcccbb2c3cc049245e47032 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 4 Jun 2024 00:51:47 -0500 Subject: [PATCH 296/332] ignore inactive arrays for now --- snmp/mdadm | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index b0c9b3c5f..023af68a5 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -1,7 +1,7 @@ #!/usr/bin/env bash # MDADM SNMP extension for LibreNMS # Version -extendVer='2.0.0' +extendVer='2' # Initial portion of json mdadmSNMPOutput='{ "data": [' @@ -40,6 +40,10 @@ main() { [[ "${mdadmArray}" =~ '/dev/md'[[:digit:]]+'p' ]] && continue mdadmName="$(basename "$(realpath "${mdadmArray}")")" + + # Ignore inactive arrays + [[ $(grep "^${mdadmName}" /proc/mdstat) =~ 'inactive' ]] && continue + mdadmSysDev="/sys/block/${mdadmName}" degraded=$(maybe_get "${mdadmSysDev}/md/degraded") From e5cf71e1a3a205469d9b74a95640c2b777dc4753 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 9 Jun 2024 22:56:30 -0500 Subject: [PATCH 297/332] add new php-fpm extend that supports multiple instances (#525) --- snmp/php-fpm | 235 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100755 snmp/php-fpm diff --git a/snmp/php-fpm b/snmp/php-fpm new file mode 100755 index 000000000..7413a5aa3 --- /dev/null +++ b/snmp/php-fpm @@ -0,0 +1,235 @@ +#!/usr/bin/env perl + +=head1 NAME + +php-fpm - LibreNMS JSON SNMP extend for gathering information for php-fpm + +=head1 VERSION + +0.0.1 + +=head1 DESCRIPTION + +For more information, see L. + +=head1 SWITCHES + +=head1 -c + +The config file to use. + +Default: /usr/local/etc/php-fpm_extend.json + +=head2 -C + +Do not compress the information return using GZip+Base64. + +=head1 -h|--help + +Print help info. + +=head1 -v|--version + +Print version info. + +=head1 CONFIG FILE + +The config file is a JSON file. + + - .instances :: An hash of instances to fetch. The key represents the + instance name and value is the URL to fetch, minus the '?json' bit. + Default :: undef + + - .use_exec :: A boolean for instances values should be treated as a command + instead of a URL. All instances must be a command and can not be a lone URL. + The returned data is expected to be parsable JSON data. + Default :: 0 + +Example... + + { + "instances": { + "thefrog": "https://thefrog/fpm-status", + "foobar": "https://foo.bar/fpm-status" + } + } + +A use_exec example... + + { + "instances": { + "thefrog": "curl 'https://thefrog/fpm-status?json' 2> /dev/null", + "foobar": "curl 'https://foo.bar/fpm-status?json' 2> /dev/null", + }, + "use_exec": 1 + } + +=cut + +use strict; +use warnings; +use JSON; +use Getopt::Long; +use File::Slurp; +use IO::Compress::Gzip qw(gzip $GzipError); +use MIME::Base64; +use Pod::Usage; +use String::ShellQuote; + +sub return_the_data { + my $to_return = $_[0]; + my $do_not_compress = $_[1]; + + my $to_return_string = encode_json($to_return); + + if ($do_not_compress) { + print $to_return_string . "\n"; + return; + } + + my $toReturnCompressed; + gzip \$to_return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end sub return_the_data + +#gets the options +my %opts; +my $do_not_compress; +my $version; +my $help; +my $config_file = '/usr/local/etc/php-fpm_extend.json'; +GetOptions( + C => \$do_not_compress, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +my @to_total = ( + "accepted conn", + "active processes", + "idle processes", + "listen queue", + "listen queue len", + "max active processes", + "max children reached", + "max listen queue", + "slow requests", + "total processes", +); + +my $to_return = { + data => { + instances => {}, + instance_errors => {}, + errored => 0, + totals => { + "accepted conn" => 0, + "active processes" => 0, + "idle processes" => 0, + "listen queue" => 0, + "listen queue len" => 0, + "max active processes" => 0, + "max children reached" => 0, + "max listen queue" => 0, + "slow requests" => 0, + "total processes" => 0, + }, + }, + version => 1, + error => 0, + errorString => '', +}; + +# error if the config does not exist +if ( !-f $config_file ) { + $to_return->{errorString} = 'Config file, "' . $config_file . '", does not exist'; + $to_return->{error} = 1; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} + +# read the config and decode it +my $config; +eval { + my $raw_config = read_file($config_file); + $config = decode_json($raw_config); +}; +if ($@) { + $to_return->{errorString} = 'Reading config errored... ' . $@; + $to_return->{error} = 2; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} + +# ensure the config is basically sane +if ( !defined( $config->{instances} ) ) { + $to_return->{errorString} = '.instances does not exist in the config'; + $to_return->{error} = 3; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} +if ( ref( $config->{instances} ) ne 'HASH' ) { + $to_return->{errorString} = '.instances is not a hash'; + $to_return->{error} = 3; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} +if ( defined( $config->{use_exec} ) && ref( $config->{use_exec} ) ne '' ) { + $to_return->{errorString} = '.use_exec is defined and is a hash or array'; + $to_return->{error} = 3; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} + +# get a list of instances and process each instance +my @instances = keys( %{ $config->{instances} } ); +foreach my $item (@instances) { + if ( ref( $config->{instances}{$item} ) eq '' ) { + my $command; + if ( !$config->{use_exec} ) { + $command = 'curl ' . shell_quote( $config->{instances}{$item} . '?json' ) . ' 2> /dev/null'; + } else { + $command = $config->{instances}{$item}; + } + eval { + my $instance_data_raw = `$command`; + if ( $? ne 0 ) { + $command =~ s/\"/\\\"/g; + die( 'command "' . $command . '" exited non-zero returnining... ' . $instance_data_raw ); + } + my $instance_data; + $to_return->{data}{instances}{$item} = decode_json($instance_data_raw); + }; + # if + if ($@) { + $to_return->{data}{instances}{$item} = {}; + $to_return->{data}{instance_errors}{$item} = $@; + $to_return->{data}{errored} = 1; + }else { + # add the the instance to the totals + foreach my $total_item (@to_total) { + if (defined($to_return->{data}{instances}{$item}{$total_item}) + && $to_return->{data}{instances}{$item}{$total_item} =~ /^\d+$/ + ) { + $to_return->{data}{totals}{$total_item} += $to_return->{data}{instances}{$item}{$total_item}; + } + } + } + } ## end if ( ref( $config->{instances}{$item} ) eq...) +} ## end foreach my $item (@instances) + +return_the_data( $to_return, $do_not_compress ); +exit 0; From e447b9209ef4f1f989cd67c56fa9f24ff74e82ec Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 15 Jun 2024 04:10:10 -0500 Subject: [PATCH 298/332] php-fpm: change instances to pools and add start since min total (#527) * php-fpm cleanup * add start since min * remove use line for Statistics::Lite --- snmp/php-fpm | 93 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 36 deletions(-) diff --git a/snmp/php-fpm b/snmp/php-fpm index 7413a5aa3..0510b8b6d 100755 --- a/snmp/php-fpm +++ b/snmp/php-fpm @@ -36,19 +36,19 @@ Print version info. The config file is a JSON file. - - .instances :: An hash of instances to fetch. The key represents the - instance name and value is the URL to fetch, minus the '?json' bit. + - .pools :: An hash of pools to fetch. The key represents the + pool name and value is the URL to fetch, minus the '?json' bit. Default :: undef - - .use_exec :: A boolean for instances values should be treated as a command - instead of a URL. All instances must be a command and can not be a lone URL. + - .use_exec :: A boolean for pools values should be treated as a command + instead of a URL. All poolss must be a command and can not be a lone URL. The returned data is expected to be parsable JSON data. Default :: 0 Example... { - "instances": { + "pools": { "thefrog": "https://thefrog/fpm-status", "foobar": "https://foo.bar/fpm-status" } @@ -57,7 +57,7 @@ Example... A use_exec example... { - "instances": { + "pools": { "thefrog": "curl 'https://thefrog/fpm-status?json' 2> /dev/null", "foobar": "curl 'https://foo.bar/fpm-status?json' 2> /dev/null", }, @@ -130,12 +130,15 @@ my @to_total = ( "total processes", ); +my @to_migrate = @to_total; +push( @to_migrate, 'start since', 'start time', 'pool', 'process manager' ); + my $to_return = { data => { - instances => {}, - instance_errors => {}, - errored => 0, - totals => { + pools => {}, + pool_errors => {}, + errored => 0, + totals => { "accepted conn" => 0, "active processes" => 0, "idle processes" => 0, @@ -146,6 +149,7 @@ my $to_return = { "max listen queue" => 0, "slow requests" => 0, "total processes" => 0, + 'start since min' => undef, }, }, version => 1, @@ -175,14 +179,14 @@ if ($@) { } # ensure the config is basically sane -if ( !defined( $config->{instances} ) ) { - $to_return->{errorString} = '.instances does not exist in the config'; +if ( !defined( $config->{pools} ) ) { + $to_return->{errorString} = '.pools does not exist in the config'; $to_return->{error} = 3; return_the_data( $to_return, $do_not_compress ); exit 1; } -if ( ref( $config->{instances} ) ne 'HASH' ) { - $to_return->{errorString} = '.instances is not a hash'; +if ( ref( $config->{pools} ) ne 'HASH' ) { + $to_return->{errorString} = '.pools is not a hash'; $to_return->{error} = 3; return_the_data( $to_return, $do_not_compress ); exit 1; @@ -194,42 +198,59 @@ if ( defined( $config->{use_exec} ) && ref( $config->{use_exec} ) ne '' ) { exit 1; } -# get a list of instances and process each instance -my @instances = keys( %{ $config->{instances} } ); -foreach my $item (@instances) { - if ( ref( $config->{instances}{$item} ) eq '' ) { +# get a list of pools and process each pool +my @pools = keys( %{ $config->{pools} } ); +foreach my $item (@pools) { + if ( ref( $config->{pools}{$item} ) eq '' ) { my $command; if ( !$config->{use_exec} ) { - $command = 'curl ' . shell_quote( $config->{instances}{$item} . '?json' ) . ' 2> /dev/null'; + $command = 'curl ' . shell_quote( $config->{pools}{$item} . '?json' ) . ' 2> /dev/null'; } else { - $command = $config->{instances}{$item}; + $command = $config->{pools}{$item}; } eval { - my $instance_data_raw = `$command`; + my $pool_data_raw = `$command`; if ( $? ne 0 ) { $command =~ s/\"/\\\"/g; - die( 'command "' . $command . '" exited non-zero returnining... ' . $instance_data_raw ); + die( 'command "' . $command . '" exited non-zero returnining... ' . $pool_data_raw ); + } + my $pool_data = decode_json($pool_data_raw); + $to_return->{data}{pools}{$item} = {}; + # ensure the hash only includes what we want and nothing unexpected + foreach my $migrate_item (@to_migrate) { + if ( defined( $pool_data->{$migrate_item} ) && ref( $pool_data->{$migrate_item} ) eq '' ) { + $to_return->{data}{pools}{$item}{$migrate_item} = $pool_data->{$migrate_item}; + } } - my $instance_data; - $to_return->{data}{instances}{$item} = decode_json($instance_data_raw); }; # if if ($@) { - $to_return->{data}{instances}{$item} = {}; - $to_return->{data}{instance_errors}{$item} = $@; - $to_return->{data}{errored} = 1; - }else { - # add the the instance to the totals + $to_return->{data}{pools}{$item} = {}; + $to_return->{data}{pool_errors}{$item} = $@; + $to_return->{data}{errored} = 1; + } else { + # add the the pool to the totals foreach my $total_item (@to_total) { - if (defined($to_return->{data}{instances}{$item}{$total_item}) - && $to_return->{data}{instances}{$item}{$total_item} =~ /^\d+$/ - ) { - $to_return->{data}{totals}{$total_item} += $to_return->{data}{instances}{$item}{$total_item}; + if ( defined( $to_return->{data}{pools}{$item}{$total_item} ) + && $to_return->{data}{pools}{$item}{$total_item} =~ /^\d+$/ ) + { + $to_return->{data}{totals}{$total_item} += $to_return->{data}{pools}{$item}{$total_item}; } } - } - } ## end if ( ref( $config->{instances}{$item} ) eq...) -} ## end foreach my $item (@instances) + + # handle start since min + if ( defined( $to_return->{data}{pools}{$item}{'start since'} ) + && $to_return->{data}{pools}{$item}{'start since'} =~ /^\d+$/ ) + { + if ( !defined( $to_return->{data}{totals}{'start since min'} ) + || $to_return->{data}{pools}{$item}{'start since'} < $to_return->{data}{totals}{'start since min'} ) + { + $to_return->{data}{totals}{'start since min'} = $to_return->{data}{pools}{$item}{'start since'}; + } + } + } ## end else [ if ($@) ] + } ## end if ( ref( $config->{pools}{$item} ) eq '' ) +} ## end foreach my $item (@pools) return_the_data( $to_return, $do_not_compress ); exit 0; From 317e93ed2b6668bc730fdde7336c9c39566a5556 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 15 Jun 2024 15:13:05 -0500 Subject: [PATCH 299/332] php-fpm: add last request cpu (#529) --- snmp/php-fpm | 70 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 28 deletions(-) diff --git a/snmp/php-fpm b/snmp/php-fpm index 0510b8b6d..05a71dc2f 100755 --- a/snmp/php-fpm +++ b/snmp/php-fpm @@ -58,8 +58,8 @@ A use_exec example... { "pools": { - "thefrog": "curl 'https://thefrog/fpm-status?json' 2> /dev/null", - "foobar": "curl 'https://foo.bar/fpm-status?json' 2> /dev/null", + "thefrog": "curl 'https://thefrog/fpm-status?json&full' 2> /dev/null", + "foobar": "curl 'https://foo.bar/fpm-status?json&full' 2> /dev/null", }, "use_exec": 1 } @@ -118,16 +118,17 @@ if ($help) { } my @to_total = ( - "accepted conn", - "active processes", - "idle processes", - "listen queue", - "listen queue len", - "max active processes", - "max children reached", - "max listen queue", - "slow requests", - "total processes", + 'accepted conn', + 'active processes', + 'idle processes', + 'listen queue', + 'listen queue len', + 'max active processes', + 'max children reached', + 'max listen queue', + 'slow requests', + 'total processes', + 'last request cpu', ); my @to_migrate = @to_total; @@ -139,17 +140,18 @@ my $to_return = { pool_errors => {}, errored => 0, totals => { - "accepted conn" => 0, - "active processes" => 0, - "idle processes" => 0, - "listen queue" => 0, - "listen queue len" => 0, - "max active processes" => 0, - "max children reached" => 0, - "max listen queue" => 0, - "slow requests" => 0, - "total processes" => 0, - 'start since min' => undef, + 'accepted conn' => 0, + 'active processes' => 0, + 'idle processes' => 0, + 'listen queue' => 0, + 'listen queue len' => 0, + 'max active processes' => 0, + 'max children reached' => 0, + 'max listen queue' => 0, + 'slow requests' => 0, + 'total processes' => 0, + 'start since' => undef, + 'last request cpu' => 0, }, }, version => 1, @@ -204,7 +206,7 @@ foreach my $item (@pools) { if ( ref( $config->{pools}{$item} ) eq '' ) { my $command; if ( !$config->{use_exec} ) { - $command = 'curl ' . shell_quote( $config->{pools}{$item} . '?json' ) . ' 2> /dev/null'; + $command = 'curl ' . shell_quote( $config->{pools}{$item} . '?json&full' ) . ' 2> /dev/null'; } else { $command = $config->{pools}{$item}; } @@ -222,6 +224,18 @@ foreach my $item (@pools) { $to_return->{data}{pools}{$item}{$migrate_item} = $pool_data->{$migrate_item}; } } + + if (defined($pool_data->{'processes'}) && ref($pool_data->{'processes'}) eq 'ARRAY') { + $to_return->{data}{pools}{$item}{'last request cpu'} = 0; + foreach my $proc_item (@{ $pool_data->{'processes'} }) { + if (defined( $proc_item->{'last request cpu'}) && + ref($proc_item->{'last request cpu'}) eq '' && + $proc_item->{'last request cpu'} =~ /\d+\.\d+/ + ) { + $to_return->{data}{pools}{$item}{'last request cpu'} += $proc_item->{'last request cpu'}; + } + } + } }; # if if ($@) { @@ -232,7 +246,7 @@ foreach my $item (@pools) { # add the the pool to the totals foreach my $total_item (@to_total) { if ( defined( $to_return->{data}{pools}{$item}{$total_item} ) - && $to_return->{data}{pools}{$item}{$total_item} =~ /^\d+$/ ) + && $to_return->{data}{pools}{$item}{$total_item} =~ /^(\d+|\d+\.\d+)$/ ) { $to_return->{data}{totals}{$total_item} += $to_return->{data}{pools}{$item}{$total_item}; } @@ -242,10 +256,10 @@ foreach my $item (@pools) { if ( defined( $to_return->{data}{pools}{$item}{'start since'} ) && $to_return->{data}{pools}{$item}{'start since'} =~ /^\d+$/ ) { - if ( !defined( $to_return->{data}{totals}{'start since min'} ) - || $to_return->{data}{pools}{$item}{'start since'} < $to_return->{data}{totals}{'start since min'} ) + if ( !defined( $to_return->{data}{totals}{'start since'} ) + || $to_return->{data}{pools}{$item}{'start since'} < $to_return->{data}{totals}{'start since'} ) { - $to_return->{data}{totals}{'start since min'} = $to_return->{data}{pools}{$item}{'start since'}; + $to_return->{data}{totals}{'start since'} = $to_return->{data}{pools}{$item}{'start since'}; } } } ## end else [ if ($@) ] From c4fbffd0cde48a2f3d7b3d0163e2b9e65d39314c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 5 Jul 2024 15:22:23 -0500 Subject: [PATCH 300/332] snmp/redis.pl now handles multiple instances and has a config file (#530) --- snmp/redis.pl | 197 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 159 insertions(+), 38 deletions(-) diff --git a/snmp/redis.pl b/snmp/redis.pl index 3d4a80830..812c98f8b 100755 --- a/snmp/redis.pl +++ b/snmp/redis.pl @@ -2,14 +2,24 @@ =head1 NAME -redis.pl - LinbreNMS JSON extend for redis. +redis - LinbreNMS JSON extend for redis. =head1 SYNOPSIS -logsize [B<-B>] +redis [B<-B>] [B<-c> ] + +redis [B<-v>|B<--version>] + +redis [B<-h>|B<--help>] =head1 SWITCHES +=head2 -c + +Config file to use. + +Default: /usr/local/etc/redis_extend.json + =head2 -B Do not the return output via GZip+Base64. @@ -38,6 +48,36 @@ =head1 SETUP # if running it via cron extend redis /usr/local/etc/snmp/redis.pl +If for multiple instances or the default of 'redis-cli info' +won't work, a config file will be needed. The config format +is JSON. + +The config entries are as below. + + - command :: If single instance, the command to use. + Type :: String + Default :: redis-cli + + - instances :: A hash where the keys are the instances names + and the values for each key are the command to use. + +The default config would be like below, which will be what is used +if no config file is specified/found. + + { + "command": "redis-cli info" + } + +For something with two instances, "foo" on port 6379 and "bar" on port 6380 +it would be like below. + + { + "instances": { + "foo": "redis-cli -p 6379", + "bar": "redis-cli -p 6380" + } + } + =cut use warnings; @@ -59,16 +99,39 @@ sub main::HELP_MESSAGE { pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } +sub return_the_data { + my $to_return = $_[0]; + my $do_not_compress = $_[1]; + + my $to_return_string = encode_json($to_return); + + if ($do_not_compress) { + print $to_return_string . "\n"; + return; + } + + my $toReturnCompressed; + gzip \$to_return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end sub return_the_data + my $return_json = { error => 0, errorString => '', - version => 1, - data => {}, + version => 2, + data => { 'extend_errors' => [] }, }; #gets the options my %opts = (); -getopts( 'Bhv', \%opts ); +getopts( 'Bhvc:', \%opts ); + +if ( !defined( $opts{c} ) ) { + $opts{c} = '/usr/local/etc/redis_extend.json'; +} if ( $opts{v} ) { main::VERSION_MESSAGE; @@ -81,40 +144,98 @@ sub main::HELP_MESSAGE { exit 256; } -# ensure that $ENV{PATH} has has it -$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; - -my $output_raw = `redis-cli info 2> /dev/null`; -if ( $? != 0 ) { - $return_json->{error} = 1; - $return_json->{error} = 'redis-cli info exited non-zero'; - print encode_json($return_json) . "\n"; -} - -$output_raw =~ s/\r//g; -my $section; -foreach my $line ( split( /\n/, $output_raw ) ) { - if ( $line ne '' && $line =~ /^# / ) { - $line =~ s/^# //; - $section = $line; - $return_json->{data}{$section} = {}; - } elsif ( $line ne '' && defined($section) ) { - my ( $key, $value ) = split( /\:/, $line ); - if ( defined($key) && defined($value) ) { - $return_json->{data}{$section}{$key} = $value; +my $single = 1; +my $config = { command => 'redis-cli info' }; +if ( -f $opts{c} ) { + eval { + my $raw_config = read_file( $opts{c} ); + $config = decode_json($raw_config); + if ( !defined( $config->{instances} ) ) { + if ( !defined( $config->{command} ) ) { + $config->{command} = 'redis-cli info'; + } + } elsif ( ref( $config->{instances} ) ne 'HASH' ) { + die( '.instances is defined and is not a hash but ref type ' . ref( $config->{instances} ) ); + } else { + $single = 0; } + }; + if ($@) { + push( @{ $return_json->{data}{extend_errors} }, $@ ); + return_the_data( $return_json, $opts{B} ); + exit 0; } -} ## end foreach my $line ( split( /\n/, $output_raw ) ) +} ## end if ( -f $opts{c} ) -my $return_json_raw = encode_json($return_json); -if ( $opts{B} ) { - print $return_json_raw. "\n"; - exit 0; -} +# ensure that $ENV{PATH} has has it +$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; -my $toReturnCompressed; -gzip \$return_json_raw => \$toReturnCompressed; -my $compressed = encode_base64($toReturnCompressed); -$compressed =~ s/\n//g; -$compressed = $compressed . "\n"; -print $compressed; +if ($single) { + my $command = $config->{command}; + my $output_raw = `$command 2> /dev/null`; + if ( $? != 0 ) { + push( + @{ $return_json->{data}{extend_errors} }, + '"' . $command . '" exited non-zero for with... ' . $output_raw + ); + } else { + $output_raw =~ s/\r//g; + my $section; + foreach my $line ( split( /\n/, $output_raw ) ) { + if ( $line ne '' && $line =~ /^# / ) { + $line =~ s/^# //; + $section = $line; + $return_json->{data}{$section} = {}; + } elsif ( $line ne '' && defined($section) ) { + my ( $key, $value ) = split( /\:/, $line ); + if ( defined($key) && defined($value) ) { + $return_json->{data}{$section}{$key} = $value; + } + } + } ## end foreach my $line ( split( /\n/, $output_raw ) ) + } ## end else [ if ( $? != 0 ) ] +} else { + my @instances = keys( %{ $config->{instances} } ); + $return_json->{data}{instances} = {}; + foreach my $instance (@instances) { + if ( ref( $config->{instances}{$instance} ) ne '' ) { + push( + @{ $return_json->{data}{extend_errors} }, + 'instance "' . $instance . '" is ref type ' . ref( $config->{instances}{$instance} ) + ); + } elsif ( $instance =~ /^[\-\_]/ ) { + push( @{ $return_json->{data}{extend_errors} }, 'instance "' . $instance . '" matches /^[\-\_]/' ); + } elsif ( $instance =~ /[\-\_\n\s\"\']$/ ) { + push( @{ $return_json->{data}{extend_errors} }, + 'instance "' . $instance . '" matches /[\-\_\n\s\'\\\"]$/' ); + } else { + my $command = $config->{instances}{$instance}; + my $output_raw = `$command 2> /dev/null`; + if ( $? != 0 ) { + push( + @{ $return_json->{data}{extend_errors} }, + '"' . $command . '" exited non-zero for instance "' . $instance . '" with... ' . $output_raw + ); + } else { + $output_raw =~ s/\r//g; + my $section; + $return_json->{data}{instances}{$instance} = {}; + foreach my $line ( split( /\n/, $output_raw ) ) { + if ( $line ne '' && $line =~ /^# / ) { + $line =~ s/^# //; + $section = $line; + $return_json->{data}{instances}{$instance}{$section} = {}; + } elsif ( $line ne '' && defined($section) ) { + my ( $key, $value ) = split( /\:/, $line ); + if ( defined($key) && defined($value) ) { + $return_json->{data}{instances}{$instance}{$section}{$key} = $value; + } + } + } ## end foreach my $line ( split( /\n/, $output_raw ) ) + } ## end else [ if ( $? != 0 ) ] + } ## end else [ if ( ref( $config->{instances}{$instance} ...))] + } ## end foreach my $instance (@instances) +} ## end else [ if ($single) ] + +return_the_data( $return_json, $opts{B} ); +exit 0; From 97ce2e32c346570cbd5d38cf29d5fd0acb917244 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 6 Jul 2024 22:16:14 -0500 Subject: [PATCH 301/332] add new wireguard extend (#531) * add new wireguard extend * add perl tidy and perl critic configs * disable jscpd as it resusts in false positives * note why jscpd is disabled --- .github/workflows/linter.yml | 3 + .perlcriticrc | 1 + .perltidyrc | 29 ++ snmp/wireguard.pl | 528 +++++++++++++++++++++++++++++++++++ 4 files changed, 561 insertions(+) create mode 100644 .perlcriticrc create mode 100644 .perltidyrc create mode 100755 snmp/wireguard.pl diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 797b4f20f..0aa1ee5a5 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -30,6 +30,9 @@ jobs: VALIDATE_PHP_PHPCS: false VALIDATE_PHP_PSALM: false + # errors on sanity checking in snmp/wireguard.pl + VALIDATE_JSCPD: false + SHELLCHECK_OPTS: --severity=warning DEFAULT_BRANCH: master diff --git a/.perlcriticrc b/.perlcriticrc new file mode 100644 index 000000000..ab2e45531 --- /dev/null +++ b/.perlcriticrc @@ -0,0 +1 @@ +exclude = ProhibitExplicitReturnUndef ProhibitOneArgBless ProhibitStringyEval diff --git a/.perltidyrc b/.perltidyrc new file mode 100644 index 000000000..87f068e2f --- /dev/null +++ b/.perltidyrc @@ -0,0 +1,29 @@ +-l=120 +-i=4 +-ci=4 +-st +-se + +-et=4 + +#-aws +-xci +#-dws +-vt=0 +-cti=0 +-bt=1 +-sbt=1 +-bbt=0 +-nsfs +-nolq +-ce +-csc +-csci=10 +-csct=40 +-cb +-iscl +-sbc +-nbbc + +-wbb="% + - * / x != == >= <= =~ !~ < > | & >= < = **= += *= &= <<= &&= -= /= |= >>= ||= .= %= ^= x=" + diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl new file mode 100755 index 000000000..d032c772a --- /dev/null +++ b/snmp/wireguard.pl @@ -0,0 +1,528 @@ +#!/usr/bin/env perl + +use warnings; +use strict; + +=head1 NAME + +wireguard - LinbreNMS JSON extend for wireguard. + +=head1 VERSION + +0.0.1 + +=cut + +our $VERSION = '0.0.1'; + +=head1 SYNOPSIS + +wireguard [B<-B>] [B<-c> ] [B<-p><0|1>] [B<-r> ] [B<-s><0|1>] + +wireguard [B<-v>|B<--version>] + +wireguard [B<-h>|B<--help>] + +=head1 SWITCHES + +=head2 -c + +Config file to use. + +Default: /usr/local/etc/wireguard_extend.json + +=head2 -p <0|1> + +Include the public key. + +Overrides the config item .include_pubey . + +=head2 -r + +A string of resolvers to use. + +Overrides the config item .pubkey_resolvers . + +=head2 -s <0|1> + +Use short hostnames + +Overrides the config item .use_short_hostname . + +=head2 -B + +Do not the return output via GZip+Base64. + +=head2 -h|--help + +Print help info. + +=head2 -v|--version + +Print version info. + +=head1 INSTALL + +Install the depends. + + # FreeBSD + pkg install p5-JSON p5-TOML p5-MIME-Base64 + # Debian + apt-get install libjson-perl libmime-base64-perl + +Then set it up in SNMPD. + + # if running it via cron + extend wireguard /usr/local/etc/snmp/wireguard + +=head1 CONFIG + +The default config is /usr/local/etc/wireguard_extend.json . + +The keys for it are as below. + + - include_pubkey :: Include the pubkey with the return. + values :: 0|1 + default :: 0 + + - use_short_hostname :: If the hostname should be shortname to just the first bit. + values :: 0|1 + default :: 1 + + - public_key_to_arbitrary_name :: An array of pubkys to name mappings. + default :: {} + + - pubkey_resolvers :: A list of resolvers to use to convert pubkeys to names. The + value is a comma seperated string. + default :: config,endpoint_if_first_allowed_is_subnet_use_hosts,endpoint_if_first_allowed_is_subnet_use_ip,first_allowed_use_hosts,first_allowed_use_ip + +=head2 PUBKEY RESOLVERS + +=head3 config + +Use the mappings from .public_key_to_arbitrary_name . + +The names are unaffected by .use_short_names . + +=head3 endpoint_if_first_allowed_is_subnet_use_hosts + +If the first allowed IP is a subnet, see if a matching IP can +be found in hosts for the endpoint. + +=head3 endpoint_if_first_allowed_is_subnet_use_getent + +If the first allowed IP is a subnet, see if a hit can be +found for the endpoint IP via getent hosts. + +This will possible use reverse DNS. + +=head3 endpoint_if_first_allowed_is_subnet_use_ip + +If the first allowed IP is a subnet, use the endpoint +IP for the name. + +=head3 first_allowed_use_hosts + +See if a match can be found in hosts for the first allowed IP. + +=head3 first_allowed_use_getent + +Use getent hosts to see try to fetch a match for the first +allowed IP. + +This will possible use reverse DNS. + +=head3 first_allowed_use_ip + +Use the first allowed IP as the name. + +=cut + +use JSON; +use Getopt::Std; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Pod::Usage; +use Socket; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print 'wireguard LibreNMS extend v. '.$VERSION."\n"; +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +sub return_the_data { + my $to_return = $_[0]; + my $do_not_compress = $_[1]; + + my $to_return_string = encode_json($to_return); + + if ($do_not_compress) { + print $to_return_string . "\n"; + return; + } + + my $toReturnCompressed; + gzip \$to_return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end sub return_the_data + +# arg[0]: string +# return[0]: host +# return[1]: port +sub host_port_split { + my $string = $_[0]; + if ( !defined($string) || $string =~ /\([Nn][Oo][Nn][Ee]\)/ ) { + return undef, undef; + } + + my $host = $string; + my $port = $string; + if ( $string =~ /^\[/ ) { + $host =~ s/^\[//; + $host =~ s/\]\:.*$//; + $port =~ s/^.*\]\://; + } else { + $host =~ s/\:.*$//; + $port =~ s/^.*\://; + } + + return $host, $port; +} ## end sub host_port_split + +my $return_json = { + error => 0, + errorString => '', + version => 2, + data => {}, +}; + +#gets the options +my %opts = (); +getopts( 'Bhvc:r:s:p:', \%opts ); + +if ( !defined( $opts{c} ) ) { + $opts{c} = '/usr/local/etc/wireguard_extend.json'; +} + +if ( $opts{v} ) { + &main::VERSION_MESSAGE; + exit 1; +} + +if ( $opts{h} ) { + &main::HELP_MESSAGE; + exit 1; +} + +## +## +## real in the config +## +## +our $config = { + include_pubkey => 0, + pubkey_resolvers => + 'config,endpoint_if_first_allowed_is_subnet_use_hosts,endpoint_if_first_allowed_is_subnet_use_ip,first_allowed_use_hosts,first_allowed_use_ip', + use_short_hostname => 1, + public_key_to_arbitrary_name => {}, +}; +if ( -f $opts{c} ) { + eval { + my $raw_config = read_file( $opts{c} ); + my $parsed_config = decode_json($raw_config); + if ( defined( $parsed_config->{public_key_to_arbitrary_name} ) + && ref( $parsed_config->{public_key_to_arbitrary_name} ) eq 'HASH' ) + { + $config->{public_key_to_arbitrary_name} = $parsed_config->{public_key_to_arbitrary_name}; + } + if ( defined( $parsed_config->{include_pubkey} ) && ref( $parsed_config->{include_pubkey} ) eq '' ) { + $config->{include_pubkey} = $parsed_config->{include_pubkey}; + } + if ( defined( $parsed_config->{pubkey_resolvers} ) && ref( $parsed_config->{pubkey_resolvers} ) eq '' ) { + $config->{pubkey_resolvers} = $parsed_config->{pubkey_resolvers}; + $config->{pubkey_resolvers} =~ s/\ //g; + } + if ( defined( $parsed_config->{pubkey_resolver_cache_file} ) + && ref( $parsed_config->{pubkey_resolver_cache_file} ) eq '' ) + { + $config->{pubkey_resolver_cache_file} = $parsed_config->{pubkey_resolver_cache_file}; + } + if ( defined( $parsed_config->{use_short_hostname} ) && ref( $parsed_config->{use_short_hostname} ) eq '' ) + { + $config->{use_short_hostname} = $parsed_config->{use_short_hostname}; + } + }; + if ($@) { + $return_json->{error} = 1; + $return_json->{errorString} = $@; + return_the_data( $return_json, $opts{B} ); + exit 0; + } +} ## end if ( -f $opts{c} ) + +if ( defined( $opts{p} ) ) { + $config->{include_pubkey} = $opts{p}; +} + +if ( defined( $opts{s} ) ) { + $config->{use_short_hostname} = $opts{s}; +} + +if ( defined( $opts{r} ) ) { + $config->{pubkey_resolvers} = $opts{r}; + $config->{pubkey_resolvers} =~ s/\ //g; +} + +# ensure that $ENV{PATH} has has it +$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; + +## +## +## get all the information +## +## +my $wg_info = {}; + +# get endpoint info +my $command_raw = `wg show all endpoints 2> /dev/null`; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + my $interface; + foreach my $line (@command_split) { + my $pubkey; + my $host; + my $port; + + my @line_split = split( /[\t\ ]+/, $line ); + if ( defined( $line_split[2] ) ) { + $interface = $line_split[0]; + $pubkey = $line_split[1]; + ( $host, $port ) = host_port_split( $line_split[2] ); + } else { + $pubkey = $line_split[0]; + ( $host, $port ) = host_port_split( $line_split[1] ); + } + + $wg_info->{$interface} = { + $pubkey => { + endpoint_host => $host, + endpoint_port => $port, + allowed_ips => [], + } + }; + } ## end foreach my $line (@command_split) +} ## end if ( $? == 0 ) + +# get the transfer info +$command_raw = `wg show all transfer 2> /dev/null`; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + foreach my $line (@command_split) { + my ( $interface, $pubkey, $recv, $sent ) = split( /[\t\ ]+/, $line ); + if ( defined($sent) ) { + $wg_info->{$interface}{$pubkey}{bytes_rcvd} = $recv; + $wg_info->{$interface}{$pubkey}{bytes_sent} = $sent; + } + } +} ## end if ( $? == 0 ) + +# get the handshake info +$command_raw = `wg show all latest-handshakes 2> /dev/null`; +my $current_time = time; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + foreach my $line (@command_split) { + my ( $interface, $pubkey, $when ) = split( /[\t\ ]+/, $line ); + if ( $when == 0 ) { + $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = undef; + } else { + $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = ( $current_time - $when ) / 60; + } + } +} ## end if ( $? == 0 ) + +# get allowed subnets +$command_raw = `wg show all allowed-ips 2> /dev/null`; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + foreach my $line (@command_split) { + my @line_split = split( /[\t\ ]+/, $line ); + my $int = 2; + while ( defined( $line_split[$int] ) ) { + if ( $line_split[$int] =~ /^[0-9\.]+\/32$/ ) { + $line_split[$int] =~ s/\/32//; + } elsif ( $line_split[$int] =~ /^[A-Fa-f0-9\:]+\/128$/ ) { + $line_split[$int] =~ s/\/128//; + } + push( @{ $wg_info->{ $line_split[0] }{ $line_split[1] }{allowed_ips} }, $line_split[$int] ); + $int++; + } + } ## end foreach my $line (@command_split) +} ## end if ( $? == 0 ) + +## +## +## try to translate pubkeys to a name +## +## +sub getent_hosts { + my $ip = $_[0]; + if ( !defined($ip) ) { + return undef; + } + # a bit of sanity checking, but this should never hit... wg should only return IPs for what this is used for + if ( $ip !~ /^[a-fA-F\:\.0-9]+$/ ) { + return undef; + } + my $command_raw = `getent hosts $ip 2> /dev/null`; + if ( $? != 0 ) { + return undef; + } + my @command_split = split( /\n/, $command_raw ); + if ( defined( $command_split[0] ) ) { + my @line_split = split( /[\t\ ]+/, $command_split[0] ); + if ( defined( $line_split[1] ) ) { + $line_split[1] =~ s/^\.//; + if ( $config->{use_short_hostname} ) { + $line_split[1] =~ s/\..*$//; + } + return $line_split[1]; + } + } ## end if ( defined( $command_split[0] ) ) + return undef; +} ## end sub getent_hosts +our $hosts_read = 0; +our $hosts = {}; + +sub hosts { + my $ip = $_[0]; + if ( !defined($ip) ) { + return undef; + } + if ( !$hosts_read ) { + $hosts_read = 1; + eval { + my $hosts_raw = read_file('/etc/hosts'); + my @hosts_split = grep( !/^[\t\ ]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $hosts_raw ) ) ); + foreach my $line (@hosts_split) { + my @line_split = split( /[\t\ ]+/, $line ); + if ( defined( $line_split[0] ) && defined( $line_split[1] ) ) { + $line_split[1] =~ s/^\.//; + if ( $config->{use_short_hostname} ) { + $line_split[1] =~ s/\..*$//; + } + + $hosts->{ $line_split[0] } = $line_split[1]; + } + } ## end foreach my $line (@hosts_split) + }; + } ## end if ( !$hosts_read ) + if ( defined( $hosts->{$ip} ) ) { + return $hosts->{$ip}; + } + return undef; +} ## end sub hosts +my @interfaces = keys( %{$wg_info} ); +my @resolvers = split( /\,+/, $config->{pubkey_resolvers} ); +foreach my $interface (@interfaces) { + my @pubkeys = keys( %{ $wg_info->{$interface} } ); + foreach my $pubkey (@pubkeys) { + my $matched = 0; + my $resolvers_int = 0; + while ( !$matched && defined( $resolvers[$resolvers_int] ) ) { + my $resolver = $resolvers[$resolvers_int]; + if ( !$matched && $resolver eq 'config' ) { + if ( defined( $config->{public_key_to_arbitrary_name}{$pubkey} ) ) { + $wg_info->{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; + $matched = 1; + } + } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_getent' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + { + my $name = getent_hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_hosts' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + { + my $name = hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_ip' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + { + $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{endpoint_host}; + $matched = 1; + } + } elsif ( !$matched && $resolver eq 'first_allowed_use_getent' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + my $name = getent_hosts($host); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } + } elsif ( !$matched && $resolver eq 'first_allowed_use_hosts' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + my $name = hosts($host); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } + } elsif ( !$matched && $resolver eq 'first_allowed_use_ip' ) { + $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + $matched = 1; + } + $resolvers_int++; + } ## end while ( !$matched && defined( $resolvers[$resolvers_int...])) + } ## end foreach my $pubkey (@pubkeys) +} ## end foreach my $interface (@interfaces) + +## +## +## translate found information to output info +## +## + +foreach my $interface (@interfaces) { + my @pubkeys = keys( %{ $wg_info->{$interface} } ); + foreach my $pubkey (@pubkeys) { + if ( defined( $wg_info->{$interface}{$pubkey}{name} ) ) { + if ( !defined( $return_json->{data}{$interface} ) ) { + $return_json->{data}{$interface} = {}; + } + $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} } = $wg_info->{$interface}{$pubkey}; + if ($config->{include_pubkey}) { + $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = $pubkey; + }else { + $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = undef; + } + } + } +} ## end foreach my $interface (@interfaces) + +return_the_data( $return_json, $opts{B} ); From f5184a43f804cad3b8beb3f7ba39dfae5ddfb002 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 6 Jul 2024 22:59:29 -0500 Subject: [PATCH 302/332] cleanup php-fpm a bix and fix being able to use a custom config path (#532) --- snmp/php-fpm | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/snmp/php-fpm b/snmp/php-fpm index 05a71dc2f..324abdc49 100755 --- a/snmp/php-fpm +++ b/snmp/php-fpm @@ -6,7 +6,7 @@ php-fpm - LibreNMS JSON SNMP extend for gathering information for php-fpm =head1 VERSION -0.0.1 +0.0.2 =head1 DESCRIPTION @@ -14,7 +14,7 @@ For more information, see L +=head2 -f The config file to use. @@ -24,11 +24,11 @@ Default: /usr/local/etc/php-fpm_extend.json Do not compress the information return using GZip+Base64. -=head1 -h|--help +=head2 -h|--help Print help info. -=head1 -v|--version +=head2 -v|--version Print version info. @@ -103,6 +103,7 @@ my $help; my $config_file = '/usr/local/etc/php-fpm_extend.json'; GetOptions( C => \$do_not_compress, + 'f=s' => \$config_file, v => \$version, version => \$version, h => \$help, @@ -225,17 +226,17 @@ foreach my $item (@pools) { } } - if (defined($pool_data->{'processes'}) && ref($pool_data->{'processes'}) eq 'ARRAY') { + if ( defined( $pool_data->{'processes'} ) && ref( $pool_data->{'processes'} ) eq 'ARRAY' ) { $to_return->{data}{pools}{$item}{'last request cpu'} = 0; - foreach my $proc_item (@{ $pool_data->{'processes'} }) { - if (defined( $proc_item->{'last request cpu'}) && - ref($proc_item->{'last request cpu'}) eq '' && - $proc_item->{'last request cpu'} =~ /\d+\.\d+/ - ) { + foreach my $proc_item ( @{ $pool_data->{'processes'} } ) { + if ( defined( $proc_item->{'last request cpu'} ) + && ref( $proc_item->{'last request cpu'} ) eq '' + && $proc_item->{'last request cpu'} =~ /\d+\.\d+/ ) + { $to_return->{data}{pools}{$item}{'last request cpu'} += $proc_item->{'last request cpu'}; } } - } + } ## end if ( defined( $pool_data->{'processes'} ) ...) }; # if if ($@) { From e1efa7b9ccc43af5f56d6261513fab693001421f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 6 Jul 2024 23:38:06 -0500 Subject: [PATCH 303/332] wireguard: fix depend info and remove use socket as that is not being used (#533) --- snmp/wireguard.pl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index d032c772a..271fefe2b 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -66,9 +66,9 @@ =head1 INSTALL Install the depends. # FreeBSD - pkg install p5-JSON p5-TOML p5-MIME-Base64 + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 # Debian - apt-get install libjson-perl libmime-base64-perl + apt-get install libjson-perl libmime-base64-perl libfile-slurp-perl Then set it up in SNMPD. @@ -144,7 +144,6 @@ =head3 first_allowed_use_ip use IO::Compress::Gzip qw(gzip $GzipError); use File::Slurp; use Pod::Usage; -use Socket; $Getopt::Std::STANDARD_HELP_VERSION = 1; From e0a263b5a913c86505cceb2253c97b4e80cfd622 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 10 Jul 2024 03:31:56 -0500 Subject: [PATCH 304/332] endpoint stuff now works (#534) --- snmp/wireguard.pl | 108 ++++++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 52 deletions(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index 271fefe2b..5a35197e8 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -9,11 +9,11 @@ =head1 NAME =head1 VERSION -0.0.1 +0.0.2 =cut -our $VERSION = '0.0.1'; +our $VERSION = '0.0.2'; =head1 SYNOPSIS @@ -148,7 +148,7 @@ =head3 first_allowed_use_ip $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print 'wireguard LibreNMS extend v. '.$VERSION."\n"; + print 'wireguard LibreNMS extend v. ' . $VERSION . "\n"; } sub main::HELP_MESSAGE { @@ -228,7 +228,7 @@ sub host_port_split { ## ## our $config = { - include_pubkey => 0, + include_pubkey => 0, pubkey_resolvers => 'config,endpoint_if_first_allowed_is_subnet_use_hosts,endpoint_if_first_allowed_is_subnet_use_ip,first_allowed_use_hosts,first_allowed_use_ip', use_short_hostname => 1, @@ -289,7 +289,7 @@ sub host_port_split { ## get all the information ## ## -my $wg_info = {}; +my %wg_info; # get endpoint info my $command_raw = `wg show all endpoints 2> /dev/null`; @@ -308,15 +308,19 @@ sub host_port_split { ( $host, $port ) = host_port_split( $line_split[2] ); } else { $pubkey = $line_split[0]; - ( $host, $port ) = host_port_split( $line_split[1] ); + if ( $line_split[1] =~ /^[\[\]0-9\.A-Fa-f]+\:[0-9]+$/ ) { + ( $host, $port ) = host_port_split( $line_split[1] ); + } } - $wg_info->{$interface} = { - $pubkey => { - endpoint_host => $host, - endpoint_port => $port, - allowed_ips => [], - } + if ( !defined( $wg_info{$interface} ) ) { + $wg_info{$interface} = {}; + } + + $wg_info{$interface}{$pubkey} = { + endpoint_host => $host, + endpoint_port => $port, + allowed_ips => [], }; } ## end foreach my $line (@command_split) } ## end if ( $? == 0 ) @@ -328,8 +332,8 @@ sub host_port_split { foreach my $line (@command_split) { my ( $interface, $pubkey, $recv, $sent ) = split( /[\t\ ]+/, $line ); if ( defined($sent) ) { - $wg_info->{$interface}{$pubkey}{bytes_rcvd} = $recv; - $wg_info->{$interface}{$pubkey}{bytes_sent} = $sent; + $wg_info{$interface}{$pubkey}{bytes_rcvd} = $recv; + $wg_info{$interface}{$pubkey}{bytes_sent} = $sent; } } } ## end if ( $? == 0 ) @@ -342,9 +346,9 @@ sub host_port_split { foreach my $line (@command_split) { my ( $interface, $pubkey, $when ) = split( /[\t\ ]+/, $line ); if ( $when == 0 ) { - $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = undef; + $wg_info{$interface}{$pubkey}{minutes_since_last_handshake} = undef; } else { - $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = ( $current_time - $when ) / 60; + $wg_info{$interface}{$pubkey}{minutes_since_last_handshake} = ( $current_time - $when ) / 60; } } } ## end if ( $? == 0 ) @@ -362,7 +366,7 @@ sub host_port_split { } elsif ( $line_split[$int] =~ /^[A-Fa-f0-9\:]+\/128$/ ) { $line_split[$int] =~ s/\/128//; } - push( @{ $wg_info->{ $line_split[0] }{ $line_split[1] }{allowed_ips} }, $line_split[$int] ); + push( @{ $wg_info{ $line_split[0] }{ $line_split[1] }{allowed_ips} }, $line_split[$int] ); $int++; } } ## end foreach my $line (@command_split) @@ -430,10 +434,10 @@ sub hosts { } return undef; } ## end sub hosts -my @interfaces = keys( %{$wg_info} ); +my @interfaces = keys(%wg_info); my @resolvers = split( /\,+/, $config->{pubkey_resolvers} ); foreach my $interface (@interfaces) { - my @pubkeys = keys( %{ $wg_info->{$interface} } ); + my @pubkeys = keys( %{ $wg_info{$interface} } ); foreach my $pubkey (@pubkeys) { my $matched = 0; my $resolvers_int = 0; @@ -441,59 +445,59 @@ sub hosts { my $resolver = $resolvers[$resolvers_int]; if ( !$matched && $resolver eq 'config' ) { if ( defined( $config->{public_key_to_arbitrary_name}{$pubkey} ) ) { - $wg_info->{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; + $wg_info{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; $matched = 1; } } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_getent' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) - && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// - && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - my $name = getent_hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + my $name = getent_hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } - } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } ## end if ( defined( $wg_info{$interface}{$pubkey...})) } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_hosts' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) - && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// - && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - my $name = hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + my $name = hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } - } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } ## end if ( defined( $wg_info{$interface}{$pubkey...})) } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_ip' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) - && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// - && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{endpoint_host}; + $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{endpoint_host}; $matched = 1; } } elsif ( !$matched && $resolver eq 'first_allowed_use_getent' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { - my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = getent_hosts($host); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } } } elsif ( !$matched && $resolver eq 'first_allowed_use_hosts' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { - my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = hosts($host); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } } } elsif ( !$matched && $resolver eq 'first_allowed_use_ip' ) { - $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; $matched = 1; } $resolvers_int++; @@ -508,20 +512,20 @@ sub hosts { ## foreach my $interface (@interfaces) { - my @pubkeys = keys( %{ $wg_info->{$interface} } ); + my @pubkeys = keys( %{ $wg_info{$interface} } ); foreach my $pubkey (@pubkeys) { - if ( defined( $wg_info->{$interface}{$pubkey}{name} ) ) { + if ( defined( $wg_info{$interface}{$pubkey}{name} ) ) { if ( !defined( $return_json->{data}{$interface} ) ) { $return_json->{data}{$interface} = {}; } - $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} } = $wg_info->{$interface}{$pubkey}; - if ($config->{include_pubkey}) { - $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = $pubkey; - }else { - $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = undef; + $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} } = $wg_info{$interface}{$pubkey}; + if ( $config->{include_pubkey} ) { + $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = $pubkey; + } else { + $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = undef; } - } - } + } ## end if ( defined( $wg_info{$interface}{$pubkey...})) + } ## end foreach my $pubkey (@pubkeys) } ## end foreach my $interface (@interfaces) return_the_data( $return_json, $opts{B} ); From eafd3417b112f822d50f7e10eca8a316d5a7d74f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 10 Jul 2024 10:10:06 -0500 Subject: [PATCH 305/332] now save the hostname if we got it (#535) --- snmp/wireguard.pl | 52 ++++++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index 5a35197e8..49e0c0ec6 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -9,11 +9,11 @@ =head1 NAME =head1 VERSION -0.0.2 +0.0.3 =cut -our $VERSION = '0.0.2'; +our $VERSION = '0.0.3'; =head1 SYNOPSIS @@ -395,14 +395,12 @@ sub getent_hosts { my @line_split = split( /[\t\ ]+/, $command_split[0] ); if ( defined( $line_split[1] ) ) { $line_split[1] =~ s/^\.//; - if ( $config->{use_short_hostname} ) { - $line_split[1] =~ s/\..*$//; - } return $line_split[1]; } - } ## end if ( defined( $command_split[0] ) ) + } return undef; } ## end sub getent_hosts + our $hosts_read = 0; our $hosts = {}; @@ -420,13 +418,9 @@ sub hosts { my @line_split = split( /[\t\ ]+/, $line ); if ( defined( $line_split[0] ) && defined( $line_split[1] ) ) { $line_split[1] =~ s/^\.//; - if ( $config->{use_short_hostname} ) { - $line_split[1] =~ s/\..*$//; - } - $hosts->{ $line_split[0] } = $line_split[1]; } - } ## end foreach my $line (@hosts_split) + } }; } ## end if ( !$hosts_read ) if ( defined( $hosts->{$ip} ) ) { @@ -434,6 +428,7 @@ sub hosts { } return undef; } ## end sub hosts + my @interfaces = keys(%wg_info); my @resolvers = split( /\,+/, $config->{pubkey_resolvers} ); foreach my $interface (@interfaces) { @@ -445,8 +440,9 @@ sub hosts { my $resolver = $resolvers[$resolvers_int]; if ( !$matched && $resolver eq 'config' ) { if ( defined( $config->{public_key_to_arbitrary_name}{$pubkey} ) ) { - $wg_info{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; - $matched = 1; + $wg_info{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; + $wg_info{$interface}{$pubkey}{hostname} = undef; + $matched = 1; } } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_getent' ) { if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) @@ -455,7 +451,7 @@ sub hosts { { my $name = getent_hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } ## end if ( defined( $wg_info{$interface}{$pubkey...})) @@ -466,7 +462,7 @@ sub hosts { { my $name = hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } ## end if ( defined( $wg_info{$interface}{$pubkey...})) @@ -475,7 +471,7 @@ sub hosts { && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{endpoint_host}; + $wg_info{$interface}{$pubkey}{hostname} = $wg_info{$interface}{$pubkey}{endpoint_host}; $matched = 1; } } elsif ( !$matched && $resolver eq 'first_allowed_use_getent' ) { @@ -483,7 +479,7 @@ sub hosts { my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = getent_hosts($host); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } @@ -492,12 +488,12 @@ sub hosts { my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = hosts($host); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } } elsif ( !$matched && $resolver eq 'first_allowed_use_ip' ) { - $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; + $wg_info{$interface}{$pubkey}{hostname} = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; $matched = 1; } $resolvers_int++; @@ -514,15 +510,25 @@ sub hosts { foreach my $interface (@interfaces) { my @pubkeys = keys( %{ $wg_info{$interface} } ); foreach my $pubkey (@pubkeys) { - if ( defined( $wg_info{$interface}{$pubkey}{name} ) ) { + if ( defined( $wg_info{$interface}{$pubkey}{name} ) || $wg_info{$interface}{$pubkey}{hostname} ) { if ( !defined( $return_json->{data}{$interface} ) ) { $return_json->{data}{$interface} = {}; } - $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} } = $wg_info{$interface}{$pubkey}; + my $name; + if ( defined( $wg_info{$interface}{$pubkey}{name} ) ) { + $name = $wg_info{$interface}{$pubkey}{name}; + delete( $wg_info{$interface}{$pubkey}{name} ); + } else { + $name = $wg_info{$interface}{$pubkey}{hostname}; + if ( $config->{use_short_hostname} ) { + $name =~ s/\..*$//; + } + } + $return_json->{data}{$interface}{$name} = $wg_info{$interface}{$pubkey}; if ( $config->{include_pubkey} ) { - $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = $pubkey; + $return_json->{data}{$interface}{$name}{pubkey} = $pubkey; } else { - $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = undef; + $return_json->{data}{$interface}{$name}{pubkey} = undef; } } ## end if ( defined( $wg_info{$interface}{$pubkey...})) } ## end foreach my $pubkey (@pubkeys) From 5cc135a8366eaa0395737062f43ba51e55527928 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 10 Jul 2024 14:27:32 -0500 Subject: [PATCH 306/332] don't shorten the hostname for wireguard.pl if it is a IPv4 address (#536) --- snmp/wireguard.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index 49e0c0ec6..45f6c01a8 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -520,7 +520,7 @@ sub hosts { delete( $wg_info{$interface}{$pubkey}{name} ); } else { $name = $wg_info{$interface}{$pubkey}{hostname}; - if ( $config->{use_short_hostname} ) { + if ( $config->{use_short_hostname} && $name !~ /^[0-9\.]+$/) { $name =~ s/\..*$//; } } From 4d33a236f5f288e2b7a424da743342fae0b57a44 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 14 Jul 2024 12:18:39 -0500 Subject: [PATCH 307/332] more work on the poudriere extend (#513) * more cleanup * add package and log size * fix checksum and add in some checks to make sure the value passed to read_dir is defined * more cleanup --- snmp/poudriere | 98 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 85 insertions(+), 13 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index b413fc895..36244c4f5 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/local/bin/perl =head1 NAME @@ -6,7 +6,7 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.0.1 +0.1.0 =head1 SYNOPSIS @@ -45,6 +45,12 @@ meaning it will be written out to the two locations. The later is for use with returning data for SNMP. Will be compressed if possible. +=head1 REQUIREMENTS + + p5-File-Slurp + p5-MIME-Base64 + p5-JSON + =cut use strict; @@ -55,6 +61,7 @@ use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; use JSON; +use Cwd 'abs_path'; sub time_to_seconds { my $time = $_[0]; @@ -66,13 +73,23 @@ sub time_to_seconds { if ( $time =~ /^0\:[0-9]+\.[0-9]+$/ ) { $time =~ s/^0\://; return $time; - } elsif ( $time =~ /^[0-9]+\:[0-9]+\.[0-9]+$/ ) { + } elsif ( $time =~ /^[0-9]+\:[0-9]+\.[0-9]+$/ + || $time =~ /^[0-9]+\:[0-9]+$/ ) + { my $minutes = $time; $minutes =~ s/\:.*//; $time =~ s/.*\://; $time = ( $minutes * 60 ) + $time; return $time; - } elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/ ) { + } elsif ( $time =~ /^[0-9]+\:[0-9]+\:[0-9]+\.[0-9]+$/ + || $time =~ /^[0-9]+\:[0-9]+\:[0-9]+$/ ) + { + my ( $hours, $minutes, $seconds ) = split( /:/, $time ); + $time = ( $hours * 3600 ) + ( $minutes * 60 ) + $seconds; + return $time; + } elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/ + || $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+$/ ) + { my $days = $time; $days =~ s/D\:.*$//; my $minutes = $time; @@ -80,7 +97,7 @@ sub time_to_seconds { $minutes =~ s/\:.*//; $time = ( $days * 86400 ) + ( $minutes * 60 ) + $time; return $time; - } + } ## end elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/...) # return 0 for anything unknown return 0; @@ -162,7 +179,8 @@ my $data = { 'check-sanity' => 0, 'pkg-depends' => 0, 'fetch-depends' => 0, - 'fetch checksum' => 0, + 'fetch' => 0, + 'checksum' => 0, 'extract-depends' => 0, 'extract' => 0, 'patch-depends' => 0, @@ -174,6 +192,12 @@ my $data = { 'run-depends' => 0, 'stage' => 0, 'package' => 0, + 'package_size_all' => 0, + 'package_size_latest' => 0, + 'package_size_building' => 0, + 'log_size_latest' => 0, + 'log_size_done' => 0, + 'log_size_per_package' => 0, }, jailANDportsANDset => {} }; @@ -195,10 +219,10 @@ my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'RE ### ### -my $status_raw = `poudriere -N status -f 2> /dev/null`; +my $status_raw = `poudriere -N status -f -l 2> /dev/null`; if ( $? == 0 ) { $data->{status} = $status_raw; - $data->{build_info} = `poudriere -N status -f -b 2>&1`; + $data->{build_info} = `poudriere -N status -f -b -l 2>&1`; my @status_split = split( /\n/, $status_raw ); my $status_split_int = 1; @@ -235,7 +259,8 @@ if ( $? == 0 ) { 'check-sanity' => 0, 'pkg-depends' => 0, 'fetch-depends' => 0, - 'fetch checksum' => 0, + 'fetch' => 0, + 'checksum' => 0, 'extract-depends' => 0, 'extract' => 0, 'patch-depends' => 0, @@ -247,6 +272,12 @@ if ( $? == 0 ) { 'run-depends' => 0, 'stage' => 0, 'package' => 0, + 'package_size_all' => 0, + 'package_size_latest' => 0, + 'package_size_building' => 0, + 'log_size_latest' => 0, + 'log_size_done' => 0, + 'log_size_per_package' => 0, }; ( $found->{SET}, $found->{PORTS}, $found->{JAIL}, $found->{BUILD}, $found->{STATUS}, @@ -265,11 +296,52 @@ if ( $? == 0 ) { $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS} . '-' . $found->{SET}; } + $found->{packages_dir_all} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/All'; + $found->{packages_dir_latest} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/Latest'; + $found->{packages_dir_building} + = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/.building'; + $found->{logs_dir_latest} = $found->{LOGS} . '/logs'; + $found->{logs_dir_done} = $found->{LOGS} . '/../latest-done/logs'; + $found->{logs_dir_per_package} = $found->{LOGS} . '/../latest-per-pkg/'; + my %dir_size_stats = ( + 'logs_dir_per_package' => 'log_size_per_package', + 'logs_dir_done' => 'log_size_done', + 'logs_dir_latest' => 'log_size_latest', + 'packages_dir_building' => 'package_size_building', + 'packages_dir_latest' => 'package_size_latest', + 'packages_dir_all' => 'package_size_all', + ); + + foreach my $item ( keys(%dir_size_stats) ) { + eval { + if ( defined( $found->{$item} ) ) { + $found->{$item} = abs_path( $found->{$item} ); + if ( defined( $found->{$item} ) ) { + if ( -d $found->{$item} ) { + my @files = read_dir( $found->{$item} ); + foreach my $to_stat (@files) { + if ( -f $found->{$item} . '/' . $to_stat ) { + my ( + $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, + $size, $atime, $mtime, $ctime, $blksize, $blocks + ) = stat( $found->{$item} . '/' . $to_stat ); + $found->{ $dir_size_stats{$item} } += $size; + } + } + $data->{stats}{ $dir_size_stats{$item} } = $found->{ $dir_size_stats{$item} }; + } ## end if ( -d $found->{$item} ) + } ## end if ( defined( $found->{$item} ) ) + } ## end if ( defined( $found->{$item} ) ) + }; + } ## end foreach my $item ( keys(%dir_size_stats) ) + foreach my $item (@poudriere_stats) { if ( $item eq 'TIME' ) { $found->{$item} = time_to_seconds( $found->{$item} ); } - $data->{stats}{$item} += $found->{$item}; + if ( $item =~ /^\d+$/ ) { + $data->{stats}{$item} += $found->{$item}; + } } ## @@ -367,13 +439,13 @@ if ( $? == 0 ) { } elsif ( $line =~ /[\ \t]package[\ \t]/ ) { $type = 'package'; } - if (defined($type)) { + if ( defined($type) ) { $data->{stats}{$type}++; - if (defined($data->{jailANDportsANDset}{$current_section})) { + if ( defined( $data->{jailANDportsANDset}{$current_section} ) ) { $data->{jailANDportsANDset}{$current_section}{$type}++; } } - } ## end elsif ( $line =~ /^\[[0-9]+\].*\/.*\|.*-.*\:/) + } ## end elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) } ## end foreach my $line (@build_info_split) } else { $to_return->{error} = 1; From 28f1fd2b675a26347ba09d061693c2ca06e58681 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 20 Jul 2024 07:26:53 -0500 Subject: [PATCH 308/332] for poudriere add -a for .data.history and -z to zero status stats when not building (#537) * add -a for .data.history * add -z --- snmp/poudriere | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 36244c4f5..2a76a929c 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -6,11 +6,11 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.1.0 +0.2.0 =head1 SYNOPSIS -poudriere [B<-w>] [B<-b>] [B<-o> ] +poudriere [B<-w>] [B<-b>] [B<-o> ] [B<-a>] [B<-z>] poudriere --help|-h @@ -26,6 +26,10 @@ or if using cron... =head1 FLAGS +=head2 -a + +Include `poudriere status -a` as .data.history . + =head2 -w Write the results out. @@ -34,6 +38,11 @@ Write the results out. Print out the compressed data if GZip+Base64 is smaller. +=head2 -z + +Zero the stats from `poudriere status` if it the status for a jail/ports/set +set is not not building. + =head2 -o Where to write the results to. Defaults to '/var/cache/poudriere.json', @@ -115,10 +124,14 @@ my $write; my $compress; my $version; my $help; +my $history; +my $zero_non_build; GetOptions( + a => \$history, 'o=s' => \$cache_base, w => \$write, b => \$compress, + z => \$zero_non_build, v => \$version, version => \$version, h => \$help, @@ -285,6 +298,17 @@ if ( $? == 0 ) { $found->{FETCH}, $found->{REMAIN}, $found->{TIME}, $found->{LOGS} ) = split( / +/, $status_split[$status_split_int], 14 ); + if ( $zero_non_build && $found->{STATUS} !~ /build/ ) { + $found->{QUEUE} = 0; + $found->{BUILT} = 0; + $found->{FAIL} = 0; + $found->{SKIP} = 0; + $found->{IGNORE} = 0; + $found->{FETCH} = 0; + $found->{REMAIN} = 0; + $found->{TIME} = 0; + } + if ( $found->{STATUS} ne 'done' ) { $data->{not_done} = 1; } @@ -339,7 +363,7 @@ if ( $? == 0 ) { if ( $item eq 'TIME' ) { $found->{$item} = time_to_seconds( $found->{$item} ); } - if ( $item =~ /^\d+$/ ) { + if ( $found->{$item} =~ /^\d+$/ ) { $data->{stats}{$item} += $found->{$item}; } } @@ -447,6 +471,13 @@ if ( $? == 0 ) { } } ## end elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) } ## end foreach my $line (@build_info_split) + + # + # include this history if asked to + # + if ($history) { + $data->{history} = `poudriere -N status -a 2> /dev/null`; + } } else { $to_return->{error} = 1; $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; From 1779ef36910e6df3e95a6a953860735c4ae11060 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 22 Jul 2024 13:42:13 -0500 Subject: [PATCH 309/332] add -q to quite the results and a bit of cleanup (#538) --- snmp/poudriere | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 2a76a929c..b030da77d 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -6,11 +6,13 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.2.0 +0.3.0 =head1 SYNOPSIS -poudriere [B<-w>] [B<-b>] [B<-o> ] [B<-a>] [B<-z>] +poudriere B<-w> [B<-o> ] [B<-a>] [B<-z>] [B<-q>] + +poudriere [<-b>] [B<-a>] [B<-z>] poudriere --help|-h @@ -18,10 +20,14 @@ poudriere --version|-v =head1 SNMPD CONFIG - extend poudriere /etc/snmp/extends/poudriere -b + extend poudriere /usr/local/etc/snmp/poudriere -b -a -z or if using cron... + # cron + 4/5 * * * * root /usr/local/etc/snmp/poudriere -b -a -z -q + + # snmpd.conf extend poudriere cat /var/cache/poudriere.json.snmp =head1 FLAGS @@ -30,13 +36,17 @@ or if using cron... Include `poudriere status -a` as .data.history . -=head2 -w +=head2 -b -Write the results out. +Encapsulate the result in GZip+Base64 if -w is not used. -=head2 -b +=head2 -q -Print out the compressed data if GZip+Base64 is smaller. +If -w is specified, do not print the results to stdout. + +=head2 -w + +Write the results out. =head2 -z @@ -126,10 +136,12 @@ my $version; my $help; my $history; my $zero_non_build; +my $if_write_be_quiet; GetOptions( a => \$history, 'o=s' => \$cache_base, w => \$write, + q => \$if_write_be_quiet, b => \$compress, z => \$zero_non_build, v => \$version, @@ -306,8 +318,8 @@ if ( $? == 0 ) { $found->{IGNORE} = 0; $found->{FETCH} = 0; $found->{REMAIN} = 0; - $found->{TIME} = 0; - } + $found->{TIME} = 0; + } ## end if ( $zero_non_build && $found->{STATUS} !~...) if ( $found->{STATUS} ne 'done' ) { $data->{not_done} = 1; @@ -393,7 +405,7 @@ if ( $? == 0 ) { eval { $ps = decode_json( - `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string` + `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string 2> /dev/null` ); }; if ($@) { @@ -503,16 +515,9 @@ if ($write) { $compressed =~ s/\n//g; $compressed = $compressed . "\n"; my $print_compressed = 0; - if ( length($compressed) > length($raw_json) ) { - write_file( $cache_base . '.snmp', $raw_json ); - } else { - write_file( $cache_base . '.snmp', $compressed ); - $print_compressed = 1; - } + write_file( $cache_base . '.snmp', $compressed ); - if ( $compress && $print_compressed ) { - print $compressed; - } else { + if ( !$if_write_be_quiet ) { print $raw_json; } } else { @@ -527,10 +532,5 @@ if ($write) { my $compressed = encode_base64($compressed_string); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; - my $print_compressed = 0; - if ( length($compressed) > length($raw_json) ) { - print $raw_json; - } else { - print $compressed; - } + print $compressed; } ## end else [ if ($write) ] From 7aeb463eda333ed2a83deae69aaa0cd50a83847d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 24 Jul 2024 19:16:25 -0500 Subject: [PATCH 310/332] poudriere: add debugging via -d (#540) --- snmp/poudriere | 145 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 127 insertions(+), 18 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index b030da77d..6ef1b6314 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -6,13 +6,13 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.3.0 +0.4.0 =head1 SYNOPSIS -poudriere B<-w> [B<-o> ] [B<-a>] [B<-z>] [B<-q>] +poudriere B<-w> [B<-o> ] [B<-a>] [B<-z>] [B<-q>] [B<-d>] -poudriere [<-b>] [B<-a>] [B<-z>] +poudriere [<-b>] [B<-a>] [B<-z>] [B<-d>] poudriere --help|-h @@ -40,6 +40,10 @@ Include `poudriere status -a` as .data.history . Encapsulate the result in GZip+Base64 if -w is not used. +=head2 -d + +Debug mode. This is noisy + =head2 -q If -w is specified, do not print the results to stdout. @@ -137,19 +141,26 @@ my $help; my $history; my $zero_non_build; my $if_write_be_quiet; +my $debug; GetOptions( a => \$history, + b => \$compress, + d => \$debug, + h => \$help, + help => \$help, 'o=s' => \$cache_base, - w => \$write, q => \$if_write_be_quiet, - b => \$compress, - z => \$zero_non_build, v => \$version, + w => \$write, version => \$version, - h => \$help, - help => \$help, + z => \$zero_non_build, ); +# include for dumping variables at parts +if ($debug) { + eval "use Data::Dumper; \$Data::Dumper::Sortkeys = 1;"; +} + if ($version) { pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); exit 255; @@ -246,17 +257,45 @@ my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'RE my $status_raw = `poudriere -N status -f -l 2> /dev/null`; if ( $? == 0 ) { + if ($debug) { + print "###\n###\n### poudriere -N status -f -l 2> /dev/null \n###\n###\n" . $status_raw . "\n\n\n"; + } + $data->{status} = $status_raw; - $data->{build_info} = `poudriere -N status -f -b -l 2>&1`; + $data->{build_info} = `poudriere -N status -f -b -l 2>\&1`; + + if ($debug) { + print "###\n###\n### poudriere -N status -f -b -l 2>\&1 \n###\n###\n" + . $data->{build_info} + . "\n\n\n###\n###\n###\n### jls --libxo json \n###\n###\n###\n"; + } + + my $jls; + eval { $jls = decode_json(`jls --libxo json`); }; + if ($@) { + $jls = { 'jail-information' => { jail => [] } }; + if ($debug) { + print "# failed to parse JSON... using empty hash... \n \$@ = " + . $@ + . "\n\$jls = " + . Dumper($jls) + . "\n\n\n"; + } + } else { + if ($debug) { + print "\$jls = " . Dumper($jls) . "\n\n\n"; + } + } + + if ($debug) { + print "###\n###\n###\n### starting line processing for status \n###\n###\n###\n"; + } my @status_split = split( /\n/, $status_raw ); my $status_split_int = 1; while ( defined( $status_split[$status_split_int] ) ) { - - my $jls; - eval { $jls = decode_json(`jls --libxo json`); }; - if ($@) { - $jls = { 'jail-information' => { jail => [] } }; + if ($debug) { + print '#\n#\n# processing line ' . $status_split_int . ': ' . $status_split[$status_split_int] . "\n#\n#\n"; } my $found = { @@ -319,7 +358,13 @@ if ( $? == 0 ) { $found->{FETCH} = 0; $found->{REMAIN} = 0; $found->{TIME} = 0; - } ## end if ( $zero_non_build && $found->{STATUS} !~...) + + if ($debug) { + print '# zeroing... $zero_non_build = true && status = ' . $found->{STATUS} . " !~ /build/\n"; + } + } elsif ($debug) { + print '# not zeroing ... $zero_non_build = false || status = ' . $found->{STATUS} . " =~ /build/\n"; + } if ( $found->{STATUS} ne 'done' ) { $data->{not_done} = 1; @@ -331,6 +376,9 @@ if ( $? == 0 ) { } else { $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS} . '-' . $found->{SET}; } + if ($debug) { + print '# $jailANDportsANDset = ' . $jailANDportsANDset . "\n"; + } $found->{packages_dir_all} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/All'; $found->{packages_dir_latest} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/Latest'; @@ -386,14 +434,30 @@ if ( $? == 0 ) { my @jails; my $jail_regex = '^' . $jailANDportsANDset . '-job-[0-9]+'; my $jls_int = 0; + if ($debug) { + print '# looking for jails matching... /' . $jail_regex . '/ or \'' . $jailANDportsANDset . "'\n"; + } while ( defined( $jls->{'jail-information'}{jail}[$jls_int] ) ) { if ( $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset || $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ ) { push( @jails, $jls->{'jail-information'}{jail}[$jls_int]{jid} ); + if ($debug) { + print 'match $jls->{"jail-information"}{"jail"}[' + . $jls_int + . ']{hostname} = ' + . $jls->{'jail-information'}{jail}[$jls_int]{hostname} . "\n"; + } + } else { + if ($debug) { + print '!match $jls->{"jail-information"}{"jail"}[' + . $jls_int + . ']{hostname} = ' + . $jls->{'jail-information'}{jail}[$jls_int]{hostname} . "\n"; + } } $jls_int++; - } + } ## end while ( defined( $jls->{'jail-information'}{jail...})) ## ## if we have found jails, grab the information via ps @@ -401,8 +465,16 @@ if ( $? == 0 ) { if ( defined( $jails[0] ) ) { my $jails_string = join( ',', @jails ); + if ($debug) { + print "# \$jails[0] defined \n# \$jails_string = " . $jails_string . "\n"; + } + my $ps; eval { + if ($debug) { + print + "##\n##\n## ps -o 'jid \%cpu \%mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string 2> /dev/null\n##\n##\n"; + } $ps = decode_json( `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string 2> /dev/null` @@ -410,6 +482,12 @@ if ( $? == 0 ) { }; if ($@) { $ps = { 'process-information' => { process => [] } }; + if ($debug) { + print '# JSON parsing errored... using default... ' . $@ . "\n"; + } + } + if ($debug) { + print '$ps = ' . Dumper($ps) . "\n"; } my $ps_int = 0; while ( defined( $ps->{'process-information'}{process}[$ps_int] ) ) { @@ -423,19 +501,38 @@ if ( $? == 0 ) { } $ps_int++; } ## end while ( defined( $ps->{'process-information'}...)) - } ## end if ( defined( $jails[0] ) ) + } else { + if ($debug) { + print "# \$jails[0] is undef\n"; + } + } $data->{jailANDportsANDset}{$jailANDportsANDset} = $found; $status_split_int++; + + if ($debug) { + print "\$data->{jailANDportsANDset}{$jailANDportsANDset} = " + . Dumper( $data->{jailANDportsANDset}{$jailANDportsANDset} ) . " \n\n"; + } } ## end while ( defined( $status_split[$status_split_int...])) + if ($debug) { + print "#\n#\n# processing \$data->{build_info}\n#\n#\n"; + } + my @build_info_split = split( /\n/, $data->{build_info} ); my $current_section; foreach my $line (@build_info_split) { + if ($debug) { + print "# processing line: " . $line . "\n"; + } if ( $line =~ /^\[.*\]\ \[.*\] .*Queued.*Built/ ) { $current_section = $line; $current_section =~ s/^\[//; $current_section =~ s/\].*$//; + if ($debug) { + print '# found section line... \$current_section = ' . $current_section . "\n"; + } } elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) { my $type; if ( $line =~ /[\ \t]check\-sanity[\ \t]/ ) { @@ -480,6 +577,11 @@ if ( $? == 0 ) { if ( defined( $data->{jailANDportsANDset}{$current_section} ) ) { $data->{jailANDportsANDset}{$current_section}{$type}++; } + if ($debug) { + print '# type line found... $type = ' . $type . "\n"; + } + } elsif ($debug) { + print "# line not matched"; } } ## end elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) } ## end foreach my $line (@build_info_split) @@ -489,10 +591,17 @@ if ( $? == 0 ) { # if ($history) { $data->{history} = `poudriere -N status -a 2> /dev/null`; + if ($debug) { + print "#\n#\n# including as .data.history ... poudriere -N status -a 2> /dev/null\n#\n"; + } + } else { + if ($debug) { + print "#\n#\n# not including as .data.history ... poudriere -N status -a 2> /dev/null"; + } } } else { $to_return->{error} = 1; - $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; + $to_return->{errorString} = 'non-zero exit for "poudriere -N status -f -l"'; } ### From de1bfc6b0012960b30df828332200f668eebb5a0 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 24 Jul 2024 19:17:31 -0500 Subject: [PATCH 311/332] zfs: handle a edge case that seems to affect some Linux installs for ZFS #523 --- snmp/zfs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 6a14acdad..7b2412ced 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -6,7 +6,7 @@ zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS =head1 VERSION -0.1.0 +0.1.1 =head1 DESCRIPTION @@ -80,8 +80,6 @@ use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; -#$Getopt::Std::STANDARD_HELP_VERSION = 1; - sub main::VERSION_MESSAGE { pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); } @@ -289,6 +287,13 @@ my $recently_used_percent; my $frequently_used_percent; if ( !defined( $stats_stuff->{p} ) && defined( $stats_stuff->{mfu_size} ) ) { $stats_stuff->{p} = $stats_stuff->{size} - $stats_stuff->{mfu_size}; +} elsif ( !defined( $stats_stuff->{p} ) + && !defined( $stats_stuff->{mfu_size} ) + && defined( $stats_stuff->{pd} && defined( $stats_stuff->{pm} ) ) ) +{ + # see https://github.com/librenms/librenms-agent/issues/518 + # this should set the value for p in those cases + $stats_stuff->{p} = $stats_stuff->{pd} + $stats_stuff->{pm}; } if ( $stats_stuff->{size} >= $stats_stuff->{c} ) { if ( !defined($mfu_size) ) { From d83ec419c22b053e28d5904687503b625273e918 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 26 Jul 2024 19:36:35 -0500 Subject: [PATCH 312/332] add snmp/http_access_log_combined (#541) --- snmp/http_access_log_combined | 656 ++++++++++++++++++++++++++++++++++ 1 file changed, 656 insertions(+) create mode 100755 snmp/http_access_log_combined diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined new file mode 100755 index 000000000..af7b841a4 --- /dev/null +++ b/snmp/http_access_log_combined @@ -0,0 +1,656 @@ +#!/usr/local/bin/perl + +=head1 NAME + +http_access_log_combined - LibreNMS JSON style SNMP extend for monitoring Apache style combined HTTP access logs + +=head1 VERSION + +0.1.0 + +=head1 SYNOPSIS + +http_access_log_combined B<-w> [B<-o> ] [B<-a>] [B<-q>] [B<-c> ] + +http_access_log_combined [<-b>] [B<-a>] [B<-z>] [B<-c> ] + +http_access_log_combined --help|-h + +http_access_log_combined --version|-v + +=head1 SNMPD CONFIG + + extend http_access_log_combined /usr/local/etc/snmp/poudriere -b + +or if using cron... + + # cron + 4/5 * * * * root /usr/local/etc/snmp/http_access_log_combined -b -q + + # snmpd.conf + extend poudriere cat /var/cache/http_access_log_combined.json.snmp + +=head1 FLAGS + +=head2 -b + +Encapsulate the result in GZip+Base64 if -w is not used. + +=head2 -c + +Config file to use. + +Default is /usr/local/etc/http_access_log_combined_extend.json . + +=head2 -q + +If -w is specified, do not print the results to stdout. + +=head2 -w + +Write the results out. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/http_access_log_combined.json', +meaning it will be written out to the two locations. + + /var/cache/http_access_log_combined.json + /var/cache/http_access_log_combined.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=head1 CONFIG + +=head2 manual + + - access :: An hash of files to process. + type :: hash + defualt :: {} + + - errors :: An hash of error files to get the size of. The key is matched keys in the access hash. + type :: hash + defualt :: {} + + # a example with a log named foo + { + "access":{ + "foo":"/var/log/www/foo.log" + }, + "error":{ + "foo":"/var/log/www/foo-error.log" + } + } + +=head2 auto + +Auto will attempt to generate a list of log files to process. Will look under the directory specified +for files matching the built regexp. The regexp is built by joining the access/error regexps to the end regexp. +so for access it would be come '-access.log$'. + + - auto :: If auto mode should be used or not. If not defined and .access + is not defined, then it will default to 1. Other wise it is undef, false. + + - auto_dir :: The dir to look for files in. + default :: /var/log/apache/ + + - auto_end_regex :: What to match files ending in. + default :: .log$ + + - auto_access_regex :: What will be prepended to the end regexp for looking for access log files. + default :: -access + + - auto_error_regex :: What will be prepended to the end regexp for looking for error log files. + default :: -error + + # default + { + "auto": 1, + "auto_dir": "/var/log/apache/", + "auto_end_regex": ".log$", + "auto_access_regex": "-access", + "auto_error_regex": "-error", + } + +=head1 REQUIREMENTS + + File::Slurp + MIME-Base64 + JSON + Statistics::Lite + File::ReadBackwards + + # FreeBSD + pkg install p5-File-Slurp p5-MIME-Base64 p5-JSON p5-Statistics-Lite p5-File-ReadBackwards + + # Debian + apt-get install libfile-slurp-perl libmime-base64-perl libjson-perl libstatistics-lite-perl libfile-readbackwards-perl + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; +use File::ReadBackwards; +use Time::Piece; +use Statistics::Lite qw(:all); + +# get what to use for the target time +my $current_time = time; +my $target_time = $current_time - 300; + +# +# parses the specified line +# +sub parse { + my $line_tp_parse = shift; + + my $to_return; + my $rest_of_line; + my $rest_of_line_p2; + + ( $to_return->{host}, $to_return->{user}, $to_return->{date}, $rest_of_line ) + = $line_tp_parse =~ m,^([^\s]+)\s+-\s+([^ ]+)\s+\[(.*?)\]\s+(.*),; + + my @date_split = split( /\s+/, $to_return->{date} ); + my $log_time; + eval { + $log_time = Time::Piece->strptime( $date_split[0] . $date_split[1], '%d/%h/%Y:%H:%M:%S%z' ); + $to_return->{timestamp} = $log_time->epoch; + }; + if ($@) { + $to_return->{timestamp} = 0; + } + + if ( defined($rest_of_line) ) { + ( + $to_return->{method}, $to_return->{path}, $to_return->{proto}, + $to_return->{code}, $to_return->{bytes}, $rest_of_line_p2 + ) = split( /\s/, $rest_of_line, 6 ); + $to_return->{method} =~ tr/\"//d; + $to_return->{proto} =~ tr/\"//d; + + if ( defined($rest_of_line_p2) ) { + my @rest_of_line_p2_split = split( /\"/, $rest_of_line_p2 ); + $to_return->{refer} = $rest_of_line_p2_split[1]; + $to_return->{agent} = $rest_of_line_p2_split[3]; + } + } ## end if ( defined($rest_of_line) ) + + return $to_return; +} ## end sub parse + +#the version of returned data +my $VERSION = 1; + +my $pretty; +my $cache_base = '/var/cache/http_access_log_combined.json'; +my $write; +my $compress; +my $version; +my $help; +my $history; +my $if_write_be_quiet; +my $debug; +my $config_file = '/usr/local/etc/http_access_log_combined_extend.json'; +GetOptions( + b => \$compress, + 'c=s' => \$config_file, + h => \$help, + help => \$help, + 'o=s' => \$cache_base, + q => \$if_write_be_quiet, + v => \$version, + w => \$write, + version => \$version, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +# read in the config file +my $config; +if ( -f $config_file && !-r $config_file ) { + die( $config_file . ' is not readable' ); +} elsif ( -f $config_file ) { + $config = decode_json( read_file($config_file) ); + if ( ref($config) ne 'HASH' ) { + die( '"' . ref($config) . '" is the base ref type for the config instead of HASH' ); + } +} else { + $config = {}; +} +if ( !defined( $config->{access} ) ) { + $config->{auto} = 1; + $config->{access} = {}; +} +if ( !defined( $config->{error} ) ) { + $config->{error} = {}; +} +if ( $config->{auto} ) { + if ( !defined( $config->{auto_dir} ) ) { + $config->{auto_dir} = '/var/log/apache/'; + } + if ( !defined( $config->{auto_end_regex} ) ) { + $config->{auto_end_regex} = '.log$'; + } + if ( !defined( $config->{auto_access_regex} ) ) { + $config->{auto_access_regex} = '-access'; + } + if ( !defined( $config->{auto_error_regex} ) ) { + $config->{auto_error_regex} = '-error'; + } + if ( -d $config->{auto_dir} && -r $config->{auto_dir} ) { + my $access_log_regex = $config->{auto_access_regex} . $config->{auto_end_regex}; + my $error_log_regex = $config->{auto_access_regex} . $config->{auto_end_regex}; + my @dir = read_dir( $config->{auto_dir} ); + foreach my $dir_entry (@dir) { + my $full_path = $config->{auto_dir} . '/' . $dir_entry; + if ( -f $full_path && -r $full_path && $dir_entry =~ /$access_log_regex/ ) { + my $name = $dir_entry; + $name =~ s/$access_log_regex//; + $config->{access}{$name} = $full_path; + } elsif ( -f $full_path && -r $full_path && $dir_entry =~ /$error_log_regex/ ) { + my $name = $dir_entry; + $name =~ s/$error_log_regex//; + $config->{error}{$name} = $full_path; + } + } ## end foreach my $dir_entry (@dir) + } ## end if ( -d $config->{auto_dir} && -r $config->...) +} ## end if ( $config->{auto} ) + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + totals => { + 'GET' => 0, + 'HEAD' => 0, + 'POST' => 0, + 'PUT' => 0, + 'DELETE' => 0, + 'CONNECT' => 0, + 'OPTIONS' => 0, + 'PATCH' => 0, + refer => 0, + no_refer => 0, + user => 0, + no_user => 0, + bytes => 0, + bytes_min => 0, + bytes_max => 0, + bytes_range => 0, + bytes_mean => 0, + bytes_median => 0, + bytes_mode => 0, + http1_0 => 0, + http1_1 => 0, + http2 => 0, + http3 => 0, + '1xx' => 0, + '100' => 0, + '101' => 0, + '102' => 0, + '103' => 0, + '2xx' => 0, + '200' => 0, + '201' => 0, + '202' => 0, + '203' => 0, + '204' => 0, + '205' => 0, + '206' => 0, + '207' => 0, + '208' => 0, + '218' => 0, + '226' => 0, + '3xx' => 0, + '301' => 0, + '302' => 0, + '303' => 0, + '304' => 0, + '305' => 0, + '306' => 0, + '307' => 0, + '308' => 0, + '4xx' => 0, + '400' => 0, + '401' => 0, + '402' => 0, + '403' => 0, + '404' => 0, + '405' => 0, + '406' => 0, + '407' => 0, + '408' => 0, + '409' => 0, + '410' => 0, + '411' => 0, + '412' => 0, + '413' => 0, + '414' => 0, + '415' => 0, + '416' => 0, + '417' => 0, + '419' => 0, + '420' => 0, + '421' => 0, + '422' => 0, + '423' => 0, + '424' => 0, + '425' => 0, + '429' => 0, + '431' => 0, + '444' => 0, + '451' => 0, + '494' => 0, + '495' => 0, + '496' => 0, + '497' => 0, + '499' => 0, + '5xx' => 0, + '500' => 0, + '501' => 0, + '502' => 0, + '503' => 0, + '504' => 0, + '505' => 0, + '506' => 0, + '507' => 0, + '508' => 0, + '509' => 0, + '510' => 0, + '511' => 0, + size => 0, + error_size => 0, + }, + logs => { + + }, +}; +my @bytes_total; + +foreach my $log_name ( keys( %{ $config->{access} } ) ) { + my @bytes_log; + my $new_entry = { + GET => 0, + HEAD => 0, + POST => 0, + PUT => 0, + DELETE => 0, + CONNECT => 0, + OPTIONS => 0, + PATCH => 0, + refer => 0, + no_refer => 0, + user => 0, + no_user => 0, + bytes => 0, + bytes_min => 0, + bytes_max => 0, + bytes_range => 0, + bytes_mean => 0, + bytes_median => 0, + bytes_mode => 0, + http1_0 => 0, + http1_1 => 0, + http2 => 0, + http3 => 0, + '1xx' => 0, + '100' => 0, + '101' => 0, + '102' => 0, + '103' => 0, + '2xx' => 0, + '200' => 0, + '201' => 0, + '202' => 0, + '203' => 0, + '204' => 0, + '205' => 0, + '206' => 0, + '207' => 0, + '208' => 0, + '218' => 0, + '226' => 0, + '3xx' => 0, + '301' => 0, + '302' => 0, + '303' => 0, + '304' => 0, + '305' => 0, + '306' => 0, + '307' => 0, + '308' => 0, + '4xx' => 0, + '400' => 0, + '401' => 0, + '402' => 0, + '403' => 0, + '404' => 0, + '405' => 0, + '406' => 0, + '407' => 0, + '408' => 0, + '409' => 0, + '410' => 0, + '411' => 0, + '412' => 0, + '413' => 0, + '414' => 0, + '415' => 0, + '416' => 0, + '417' => 0, + '419' => 0, + '420' => 0, + '421' => 0, + '422' => 0, + '423' => 0, + '424' => 0, + '425' => 0, + '429' => 0, + '431' => 0, + '444' => 0, + '451' => 0, + '494' => 0, + '495' => 0, + '496' => 0, + '497' => 0, + '499' => 0, + '5xx' => 0, + '500' => 0, + '501' => 0, + '502' => 0, + '503' => 0, + '504' => 0, + '505' => 0, + '506' => 0, + '507' => 0, + '508' => 0, + '509' => 0, + '510' => 0, + '511' => 0, + size => 0, + error_size => 0, + }; + + eval { + my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat( $config->{access}{$log_name} ); + $new_entry->{size} = $size; + $data->{totals}{size} = $data->{totals}{size} + $size; + + if ( defined( $config->{errors}{$log_name} ) ) { + if ( -f $config->{errors}{$log_name} ) { + ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat( $config->{errors}{$log_name} ); + $new_entry->{error_size} = $size; + $data->{totals}{error_size} = $data->{totals}{error_size} + $size; + } + } + }; + + eval { + my $bw = File::ReadBackwards->new( $config->{access}{$log_name} ); + my $line = $bw->readline; + my $process_log = 1; + while ( $process_log && defined($line) ) { + my $parsed = parse($line); + + # if not defined log_time, we faised to process the log time... don't process this entry + if ( $parsed->{timestamp} < $target_time ) { + # if true, then don't contiue process thig log file as we are now before the target time + $process_log = 0; + } else { + if ( defined( $parsed->{bytes} ) && $parsed->{bytes} =~ /^[0-9]+$/ ) { + $data->{totals}{bytes} += $parsed->{bytes}; + $new_entry->{bytes} += $parsed->{bytes}; + push( @bytes_total, $parsed->{bytes} ); + push( @bytes_log, $parsed->{bytes} ); + } + } + + if ( defined( $parsed->{proto} ) && defined( $new_entry->{ $parsed->{proto} } ) ) { + $new_entry->{ $parsed->{proto} }++; + $data->{totals}{ $parsed->{proto} }++; + } + + if ( defined( $parsed->{method} ) && defined( $new_entry->{ $parsed->{method} } ) ) { + $new_entry->{ $parsed->{method} }++; + $data->{totals}{ $parsed->{method} }++; + } + + if ( defined( $parsed->{code} ) ) { + if ( defined( $new_entry->{ $parsed->{code} } ) ) { + $new_entry->{ $parsed->{code} }++; + $data->{totals}{ $parsed->{code} }++; + } + if ( $parsed->{code} =~ /^1\d\d$/ ) { + $new_entry->{'1xx'}++; + $data->{totals}{'1xx'}++; + } elsif ( $parsed->{code} =~ /^2\d\d$/ ) { + $new_entry->{'2xx'}++; + $data->{totals}{'2xx'}++; + } elsif ( $parsed->{code} =~ /^3\d\d$/ ) { + $new_entry->{'3xx'}++; + $data->{totals}{'3xx'}++; + } elsif ( $parsed->{code} =~ /^4\d\d$/ ) { + $new_entry->{'4xx'}++; + $data->{totals}{'4xx'}++; + } elsif ( $parsed->{code} =~ /^5\d\d$/ ) { + $new_entry->{'5xx'}++; + $data->{totals}{'5xx'}++; + } + } ## end if ( defined( $parsed->{code} ) ) + + if ( defined( $parsed->{proto} ) ) { + if ( $parsed->{proto} eq 'HTTP/1.0' ) { + $new_entry->{'http1_0'}++; + $data->{totals}{'http1_0'}++; + } elsif ( $parsed->{proto} eq 'HTTP/1.1' ) { + $new_entry->{'http1_1'}++; + $data->{totals}{'http1_1'}++; + } elsif ( $parsed->{proto} eq 'HTTP/2' ) { + $new_entry->{'http2'}++; + $data->{totals}{'http2'}++; + } elsif ( $parsed->{proto} eq 'HTTP/3' ) { + $new_entry->{'http3'}++; + $data->{totals}{'http3'}++; + } + } ## end if ( defined( $parsed->{proto} ) ) + + if ( defined( $parsed->{user} ) ) { + if ( $parsed->{user} eq '-' ) { + $new_entry->{'no_user'}++; + $data->{totals}{'no_user'}++; + } else { + $new_entry->{'user'}++; + $data->{totals}{'user'}++; + } + } + + if ( defined( $parsed->{refer} ) ) { + if ( $parsed->{refer} eq '-' ) { + $new_entry->{'no_refer'}++; + $data->{totals}{'no_refer'}++; + } else { + $new_entry->{'refer'}++; + $data->{totals}{'refer'}++; + } + } + + if ($process_log) { + $line = $bw->readline; + } + } ## end while ( $process_log && defined($line) ) + }; + if ( defined( $bytes_log[0] ) ) { + $new_entry->{bytes_min} = min(@bytes_log); + $new_entry->{bytes_max} = max(@bytes_log); + $new_entry->{bytes_mean} = mean(@bytes_log); + $new_entry->{bytes_median} = median(@bytes_log); + $new_entry->{bytes_mode} = mode(@bytes_log); + $new_entry->{bytes_range} = range(@bytes_log); + } + $data->{logs}{$log_name} = $new_entry; + +} ## end foreach my $log_name ( keys( %{ $config->{access...}})) + +if ( defined( $bytes_total[0] ) ) { + $data->{totals}{bytes_min} = min(@bytes_total); + $data->{totals}{bytes_max} = max(@bytes_total); + $data->{totals}{bytes_mean} = mean(@bytes_total); + $data->{totals}{bytes_median} = median(@bytes_total); + $data->{totals}{bytes_mode} = mode(@bytes_total); + $data->{totals}{bytes_range} = range(@bytes_total); +} + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + write_file( $cache_base . '.snmp', $compressed ); + + if ( !$if_write_be_quiet ) { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end else [ if ($write) ] From a9559eb6ec79295a32267c95678cfb0aafeed45c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 27 Jul 2024 10:54:19 -0500 Subject: [PATCH 313/332] http_access_log_combined: add 300, 426, and 428 response codes and a few POD fixes (#542) --- snmp/http_access_log_combined | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined index af7b841a4..dfb8cfc12 100755 --- a/snmp/http_access_log_combined +++ b/snmp/http_access_log_combined @@ -20,7 +20,7 @@ http_access_log_combined --version|-v =head1 SNMPD CONFIG - extend http_access_log_combined /usr/local/etc/snmp/poudriere -b + extend http_access_log_combined /usr/local/etc/snmp/http_access_log_combined -b or if using cron... @@ -28,7 +28,7 @@ or if using cron... 4/5 * * * * root /usr/local/etc/snmp/http_access_log_combined -b -q # snmpd.conf - extend poudriere cat /var/cache/http_access_log_combined.json.snmp + extend http_access_log_combined cat /var/cache/http_access_log_combined.json.snmp =head1 FLAGS @@ -321,6 +321,7 @@ my $data = { '218' => 0, '226' => 0, '3xx' => 0, + '300' => 0, '301' => 0, '302' => 0, '303' => 0, @@ -355,6 +356,8 @@ my $data = { '423' => 0, '424' => 0, '425' => 0, + '426' => 0, + '428' => 0, '429' => 0, '431' => 0, '444' => 0, @@ -430,6 +433,7 @@ foreach my $log_name ( keys( %{ $config->{access} } ) ) { '218' => 0, '226' => 0, '3xx' => 0, + '300' => 0, '301' => 0, '302' => 0, '303' => 0, @@ -464,6 +468,8 @@ foreach my $log_name ( keys( %{ $config->{access} } ) ) { '423' => 0, '424' => 0, '425' => 0, + '426' => 0, + '428' => 0, '429' => 0, '431' => 0, '444' => 0, From c4f9f35c7e4308849e561d0b11db4fe0770d8220 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 28 Jul 2024 12:48:44 -0500 Subject: [PATCH 314/332] add extend for Samba (#543) * start work on samba * finalize Samba extend --- snmp/samba | 444 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 444 insertions(+) create mode 100755 snmp/samba diff --git a/snmp/samba b/snmp/samba new file mode 100755 index 000000000..418335974 --- /dev/null +++ b/snmp/samba @@ -0,0 +1,444 @@ +#!/usr/bin/env perl + +=head1 NAME + +samba - LibreNMS JSON style SNMP extend for monitoring Samba + +=head1 VERSION + +0.1.0 + +=head1 SYNOPSIS + +samba B<-w> [B<-o> ] [B<-q>] + +samba [<-b>] [B<-d>] + +samba --help|-h + +samba --version|-v + +=head1 SNMPD CONFIG + + extend samba /usr/local/etc/snmp/samba -b -a -z + +or if using cron... + + # cron + 4/5 * * * * root /usr/local/etc/snmp/samba -b -a -z -q + + # snmpd.conf + extend samba cat /var/cache/samba.json.snmp + +=head1 FLAGS + +=head2 -b + +Encapsulate the result in GZip+Base64 if -w is not used. + +=head2 -q + +If -w is specified, do not print the results to stdout. + +=head2 -w + +Write the results out. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/samba.json', +meaning it will be written out to the two locations. + + /var/cache/samba.json + /var/cache/samba.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=head1 REQUIREMENTS + + File::Slurp + MIME::Base64 + JSON + + # FreeBSD + pkg add p5-File-Slurp p5-MIME-Base64 p5-JSON + + # Debian + apt-get install libfile-slurp-perl libmime-base64-perl libjson-perl + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; + +#the version of returned data +my $VERSION = 1; + +# ensure sbin is in the path +$ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin'; + +my $pretty; +my $cache_base = '/var/cache/samba.json'; +my $write; +my $compress; +my $version; +my $help; +my $if_write_be_quiet; +GetOptions( + b => \$compress, + h => \$help, + help => \$help, + 'o=s' => \$cache_base, + q => \$if_write_be_quiet, + v => \$version, + w => \$write, + version => \$version, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + general => { + connect_count => undef, + disconnect_count => undef, + idle_count => undef, + cpu_user_time => undef, + cpu_system_time => undef, + request_count => undef, + push_sec_ctx_count => undef, + push_sec_ctx_time => undef, + set_sec_ctx_count => undef, + set_sec_ctx_time => undef, + set_root_sec_ctx_count => undef, + set_root_sec_ctx_time => undef, + pop_sec_ctx_count => undef, + pop_sec_ctx_time => undef, + syscall_count => 0, + syscall_time => 0, + syscall_idle => 0, + syscall_bytes => 0, + syscall_read_bytes => 0, + syscall_read_time => 0, + syscall_read_idle => 0, + syscall_read_count => 0, + syscall_write_bytes => 0, + syscall_write_count => 0, + syscall_write_time => 0, + syscall_write_idle => 0, + syscall_other_count => 0, + syscall_other_time => 0, + acl_count => 0, + acl_time => 0, + acl_get_count => 0, + acl_get_time => 0, + acl_set_count => 0, + acl_set_time => 0, + statcache_lookups_count => undef, + statcache_misses_count => undef, + statcache_hits_count => undef, + smb_count => 0, + smb_time => 0, + smb_read_count => 0, + smb_read_time => 0, + smb_write_count => 0, + smb_write_time => 0, + smb_other_count => 0, + smb_other_time => 0, + smb2_count => 0, + smb2_time => 0, + smb2_bytes => 0, + smb2_idle => 0, + smb2_read_count => 0, + smb2_read_time => 0, + smb2_read_bytes => 0, + smb2_read_idle => 0, + smb2_write_count => 0, + smb2_write_time => 0, + smb2_write_bytes => 0, + smb2_write_idle => 0, + smb2_other_count => 0, + smb2_other_time => 0, + trans2_time => 0, + trans2_count => 0, + nt_transact_time => 0, + nt_transact_count => 0, + }, + procs => [], + shares => [], +}; + +### +### +### get profiling info via smbstatus -P +### +### +my @profiling_lines = grep( !/^\*/, split( /\n/, `smbstatus -P 2> /dev/null` ) ); +foreach my $line (@profiling_lines) { + $line =~ s/\s//g; + my @line_split = split( /\:/, $line ); + if ( $line_split[1] =~ /^[0-9]+$/ ) { + if ( $line_split[0] =~ /^syscall_/ ) { + if ( $line_split[0] =~ /read/ || $line_split[0] =~ /recv/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{syscall_count} = $data->{general}{syscall_count} + $line_split[1]; + $data->{general}{syscall_read_count} = $data->{general}{syscall_read_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{syscall_bytes} = $data->{general}{syscall_bytes} + $line_split[1]; + $data->{general}{syscall_read_bytes} = $data->{general}{syscall_read_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{syscall_time} = $data->{general}{syscall_time} + $line_split[1]; + $data->{general}{syscall_read_time} = $data->{general}{syscall_read_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{syscall_idle} = $data->{general}{syscall_idle} + $line_split[1]; + $data->{general}{syscall_read_idle} = $data->{general}{syscall_read_idle} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /write/ || $line_split[0] =~ /send/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{syscall_count} = $data->{general}{syscall_count} + $line_split[1]; + $data->{general}{syscall_write_count} = $data->{general}{syscall_write_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{syscall_bytes} = $data->{general}{syscall_bytes} + $line_split[1]; + $data->{general}{syscall_write_bytes} = $data->{general}{syscall_write_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{syscall_time} = $data->{general}{syscall_time} + $line_split[1]; + $data->{general}{syscall_write_time} = $data->{general}{syscall_write_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{syscall_idle} = $data->{general}{syscall_idle} + $line_split[1]; + $data->{general}{syscall_write_idle} = $data->{general}{syscall_write_idle} + $line_split[1]; + } else { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{syscall_count} = $data->{general}{syscall_count} + $line_split[1]; + $data->{general}{syscall_other_count} = $data->{general}{syscall_other_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{syscall_time} = $data->{general}{syscall_time} + $line_split[1]; + $data->{general}{syscall_other_time} = $data->{general}{syscall_other_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{syscall_idle} = $data->{general}{syscall_idle} + $line_split[1]; + $data->{general}{syscall_other_idle} = $data->{general}{syscall_other_idle} + $line_split[1]; + } + } ## end else [ if ( $line_split[0] =~ /read/ || $line_split...)] + } elsif ( $line_split[0] =~ /^[fgs]+et_nt_acl/ ) { + if ( $line_split[0] =~ /get/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{acl_count} = $data->{general}{acl_count} + $line_split[1]; + $data->{general}{acl_get_count} = $data->{general}{acl_get_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{acl_time} = $data->{general}{acl_time} + $line_split[1]; + $data->{general}{acl_get_time} = $data->{general}{acl_get_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /set/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{acl_count} = $data->{general}{acl_count} + $line_split[1]; + $data->{general}{acl_set_count} = $data->{general}{acl_set_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{acl_time} = $data->{general}{acl_time} + $line_split[1]; + $data->{general}{acl_set_time} = $data->{general}{acl_set_time} + $line_split[1]; + } + } + } elsif ( $line_split[0] =~ /^SMB/ ) { + # Samba apparent does not have byte counters for these... that said looks like this one is not really used any more + if ( $line_split[0] =~ /read/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb_count} = $data->{general}{smb_count} + $line_split[1]; + $data->{general}{smb_read_count} = $data->{general}{smb_read_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb_time} = $data->{general}{smb_time} + $line_split[1]; + $data->{general}{smb_read_time} = $data->{general}{smb_read_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /write/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb_count} = $data->{general}{smb_count} + $line_split[1]; + $data->{general}{smb_write_count} = $data->{general}{smb_write_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb_time} = $data->{general}{smb_time} + $line_split[1]; + $data->{general}{smb_write_time} = $data->{general}{smb_write_time} + $line_split[1]; + } + } else { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb_count} = $data->{general}{smb_count} + $line_split[1]; + $data->{general}{smb_other_count} = $data->{general}{smb_other_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb_time} = $data->{general}{smb_time} + $line_split[1]; + $data->{general}{smb_other_time} = $data->{general}{smb_other_time} + $line_split[1]; + } + } + } elsif ( $line_split[0] =~ /^Trans2_/ ) { + # Samba does not appear to have any that are read/write for this really... also no bytes coutners + if ( $line_split[0] =~ /count/ ) { + $data->{general}{trans2_count} = $data->{general}{trans2_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{trans2_time} = $data->{general}{trans2_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /^NT_transact_/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{nt_transact_count} = $data->{general}{nt_transact_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{nt_transact_time} = $data->{general}{nt_transact_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /^smb2_/ ) { + if ( $line_split[0] =~ /read/ || $line_split[0] =~ /recv/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb2_count} = $data->{general}{smb2_count} + $line_split[1]; + $data->{general}{smb2_read_count} = $data->{general}{smb2_read_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{smb2_bytes} = $data->{general}{smb2_bytes} + $line_split[1]; + $data->{general}{smb2_read_bytes} = $data->{general}{smb2_read_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb2_time} = $data->{general}{smb2_time} + $line_split[1]; + $data->{general}{smb2_read_time} = $data->{general}{smb2_read_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{smb2_idle} = $data->{general}{smb2_idle} + $line_split[1]; + $data->{general}{smb2_read_idle} = $data->{general}{smb2_read_idle} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /write/ || $line_split[0] =~ /send/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb2_count} = $data->{general}{smb2_count} + $line_split[1]; + $data->{general}{smb2_write_count} = $data->{general}{smb2_write_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{smb2_bytes} = $data->{general}{smb2_bytes} + $line_split[1]; + $data->{general}{smb2_write_bytes} = $data->{general}{smb2_write_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb2_time} = $data->{general}{smb2_time} + $line_split[1]; + $data->{general}{smb2_write_time} = $data->{general}{smb2_write_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{smb2_idle} = $data->{general}{smb2_idle} + $line_split[1]; + $data->{general}{smb2_write_idle} = $data->{general}{smb2_write_idle} + $line_split[1]; + } else { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb2_count} = $data->{general}{smb2_count} + $line_split[1]; + $data->{general}{smb2_other_count} = $data->{general}{smb2_other_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb2_time} = $data->{general}{smb2_time} + $line_split[1]; + $data->{general}{smb2_other_time} = $data->{general}{smb2_other_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{smb2_idle} = $data->{general}{smb2_idle} + $line_split[1]; + $data->{general}{smb2_other_idle} = $data->{general}{smb2_other_idle} + $line_split[1]; + } + } ## end else [ if ( $line_split[0] =~ /read/ || $line_split...)] + } else { + $data->{general}{ $line_split[0] } = $line_split[1]; + } + } ## end if ( $line_split[1] =~ /^[0-9]+$/ ) +} ## end foreach my $line (@profiling_lines) + +### +### +### get process info via smbstatus -p +### +### +my @process_lines = grep( /^\d/, split( /\n/, `smbstatus -p 2> /dev/null` ) ); +foreach my $line (@process_lines) { +# lines look like this +# 5420 bar foo 192.168.1.2 (ipv4:192.168.1.2:497) SMB3_11 - partial(AES-128-CMAC) + my $new_proc = {}; + my $client_info; + ( + $new_proc->{pid}, $new_proc->{user}, $new_proc->{group}, $new_proc->{machine}, + $client_info, $new_proc->{version}, $new_proc->{encryption}, $new_proc->{signing}, + ) = split( /\s+/, $line, 8 ); + $client_info =~ s/^\(//; + $client_info =~ s/\)$//; + $new_proc->{ip} = $client_info; + $new_proc->{ip} =~ s/^[a-zA-Z0-9]+\://; + $new_proc->{ip} =~ s/:\d+$//; + $new_proc->{ip} =~ s/[\[\]]//g; + $new_proc->{port} = $client_info; + $new_proc->{port} =~ s/.*\]//g; + $new_proc->{port} =~ s/.*\://g; + + push( @{ $data->{procs} }, $new_proc ); +} ## end foreach my $line (@process_lines) + +### +### +### get share info via smbstatus -S +### +### +my @share_lines = grep( /^\w+\s+\d+/, split( /\n/, `smbstatus -S 2> /dev/null` ) ); +foreach my $line (@share_lines) { + # lines look like... sometimes spaces on the end + # foo 5423 192.168.1.2 Tue Jul 16 02:39:53 2024 CDT - - + my $new_share = {}; + my $rest_of_line; + ( $new_share->{service}, $new_share->{pid}, $new_share->{machine}, $rest_of_line ) = split( /\s+/, $line, 4 ); + $rest_of_line =~ s/\s+$//; + # reverse it to make parsing out the date easy + $rest_of_line = reverse $rest_of_line; + ( $new_share->{signing}, $new_share->{encryption}, $new_share->{connected_at} ) = split( /\s+/, $rest_of_line, 3 ); + $new_share->{signing} = reverse $new_share->{signing}; + $new_share->{encryption} = reverse $new_share->{encryption}; + $new_share->{connected_at} = reverse $new_share->{connected_at}; + + push( @{ $data->{shares} }, $new_share ); +} ## end foreach my $line (@share_lines) + +### +### +### get locks info via smbstatus -L +### +### +my @lock_lines = grep( /^\d+\s+/, split( /\n/, `smbstatus -L 2> /dev/null` ) ); +$data->{general}{lock_count} = $#lock_lines + 1; + +### +### +### finalize it +### +### + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + write_file( $cache_base . '.snmp', $compressed ); + + if ( !$if_write_be_quiet ) { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end else [ if ($write) ] From 6d406eae05414c0f4898e7b8e11bdd7245c15f3d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 30 Jul 2024 13:29:59 -0500 Subject: [PATCH 315/332] snmp/samba: for generic vars, ensure we have a value (#544) --- snmp/samba | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snmp/samba b/snmp/samba index 418335974..f3c6428c8 100755 --- a/snmp/samba +++ b/snmp/samba @@ -339,7 +339,9 @@ foreach my $line (@profiling_lines) { } } ## end else [ if ( $line_split[0] =~ /read/ || $line_split...)] } else { - $data->{general}{ $line_split[0] } = $line_split[1]; + if (defined($line_split[1])) { + $data->{general}{ $line_split[0] } = $line_split[1]; + } } } ## end if ( $line_split[1] =~ /^[0-9]+$/ ) } ## end foreach my $line (@profiling_lines) From b9795274b79cc99b9217495c1a7c01ad3b4ec8b1 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 30 Jul 2024 13:30:41 -0500 Subject: [PATCH 316/332] http_access_log_combined: use env to call perl (#545) --- snmp/http_access_log_combined | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined index dfb8cfc12..93c82e89a 100755 --- a/snmp/http_access_log_combined +++ b/snmp/http_access_log_combined @@ -1,4 +1,4 @@ -#!/usr/local/bin/perl +#!/usr/bin/env perl =head1 NAME From 5b59645f5d376d06f3ff1e1e0bee8177b37b72ae Mon Sep 17 00:00:00 2001 From: Tr4sK Date: Tue, 1 Oct 2024 16:37:34 +0200 Subject: [PATCH 317/332] Gather vlan names from /interface vlan (#524) thanks! --- snmp/Routeros/LNMS_vlans.scr | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/snmp/Routeros/LNMS_vlans.scr b/snmp/Routeros/LNMS_vlans.scr index 3ac920ed5..dd12825de 100644 --- a/snmp/Routeros/LNMS_vlans.scr +++ b/snmp/Routeros/LNMS_vlans.scr @@ -9,13 +9,19 @@ :foreach i in [/interface bridge vlan find] do={ :local intf [/interface bridge vlan get $i bridge] :local vlid [/interface bridge vlan get $i vlan-ids] + :local vname + + :foreach i in [/interface vlan find where vlan-id=$vlid] do={ + :local intname [/interface vlan get $i name] + :set $vname ($intname) + } :foreach t in [/interface bridge vlan get $i tagged] do={ - :set $vlanst ($vlanst, "$vlid,$t") + :set $vlanst ($vlanst, "$vlid,$t,$vname") } :foreach u in [/interface bridge vlan get $i current-untagged] do={ - :set $vlansu ($vlansu, "$vlid,$u") + :set $vlansu ($vlansu, "$vlid,$u,$vname") } :foreach u in [/interface bridge port find where bridge=$intf and pvid=$vlid] do={ @@ -28,7 +34,7 @@ } } :if ( $fl != 1 ) do={ - :set $vlansu ($vlansu, "$vlid,$iu") + :set $vlansu ($vlansu, "$vlid,$iu,$vname") } } } @@ -36,6 +42,7 @@ :foreach vl in [/interface vlan find ] do={ :local intf [/interface vlan get $vl interface] :local vlid [/interface vlan get $vl vlan-id] + :local vname [/interface vlan get $vl name] :local fl 0 :foreach tmp in $vlanst do={ @@ -45,7 +52,7 @@ } } :if ( $fl != 1 ) do={ - :set $vlanst ($vlanst, "$vlid,$intf") + :set $vlanst ($vlanst, "$vlid,$intf,$vname") } } From bb7385a6849368f6557803e092edad5b5dd435d9 Mon Sep 17 00:00:00 2001 From: samburney Date: Wed, 2 Oct 2024 00:12:03 +0930 Subject: [PATCH 318/332] Update powerdns.py to use 'list' instead of 'show *' (#514) thanks! --- snmp/powerdns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/powerdns.py b/snmp/powerdns.py index 088273da7..74e9517b0 100755 --- a/snmp/powerdns.py +++ b/snmp/powerdns.py @@ -6,7 +6,7 @@ pdnscontrol = "/usr/bin/pdns_control" process = subprocess.Popen( - [pdnscontrol, "show", "*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + [pdnscontrol, "list"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) input = process.communicate() stdout = input[0].decode() From 11092a2ea89217519a67aa27e32fe6af79caf974 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 2 Oct 2024 10:56:22 -0500 Subject: [PATCH 319/332] opensearch extend update add -w (#547) * rework to use pod2doc and do compression * tweak this a bit * derp, fix the compress stuff a bit more * finish some doc stuff --- snmp/opensearch | 197 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 146 insertions(+), 51 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 11b57edd4..7341072ea 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2023, Zane C. Bowers-Hadley +#Copyright (c) 2024, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -23,63 +23,143 @@ #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. -=for comment +use warnings; +use strict; + +=pod + +=head1 NAME + +opensearch - LibreNMS JSON SNMP extend for gathering backups for borg + +=head1 VERSION + +0.1.0 + +=cut -Add this to snmpd.conf as below and restart snmpd. +our $VERSION = '0.1.0'; + +=head1 SYNOPSIS + +opensearch [B<-a> ] [B<-c> ] [B<-h> ] [B<-p> ] [B<-S>] +[B<-I>] [B<-P>] [B<-S>] [B<-w>] [B<-o> ] + +opensearch [B<--help>] + +opensearch [B<--version>] + +=head1 DESCRIPTION + +Needs enabled in snmpd.conf like below. extend opensearch /etc/snmp/extends/opensearch -Supported command line options are as below. +If you have issues with it timing taking to long to poll and +occasionally timing out, you can set it up in cron like this. + + */5 * * * * /etc/snmp/extends/opensearch -q -w + +And then in snmpd.conf like below. + + extend opensearch /bin/cat /var/cache/opensearch_extend.json.snmp + +Installing the depends can be done like below. + + # FreeBSD + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-libwww p5-LWP-Protocol-https + + # Debian + apt-get install libjson-perl libfile-slurp-perl liblwp-protocol-https-perl + +=head1 FLAGS + +=head2 -a + +Auth token path. + +=head2 -c + +CA file path. + +Default: empty + +=head2 -h + +The host to connect to. + +Default: 127.0.0.1 + +=head2 -I + +Do not verify hostname (when used with -S). + +=head2 -o + +The base name for the output. + +Default: /var/cache/opensearch_extend.json - -a Auth token path. - -c CA file path. - Default: empty - -h The host to connect to. - Default: 127.0.0.1 - -p The port to use. - Default: 9200 - -S Use https instead of http. - -I Do not verify hostname (when used with -S). - -P Pretty print. - -S Use HTTPS. +=head2 -p + +The port to use. + +Default: 9200 + +=head2 -P + +Pretty print. + +=head2 -q + +Do not print the output. + +Useful for with -w. + +=head2 -S + +Use HTTPS. The last is only really relevant to the usage with SNMP. +=head2 -w + +Write the results out to two files based on what is specified +via -o . + +Default Raw JSON: /var/cache/opensearch_extend.json + +Default SNMP Return: /var/cache/opensearch_extend.json.snmp + =cut -use warnings; -use strict; use Getopt::Std; use JSON; use LWP::UserAgent (); +use File::Slurp; +use Pod::Usage; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "Elastic/Opensearch SNMP extend 0.0.0\n"; + print 'opensearch LibreNMS extend version '.$VERSION."\n"; } sub main::HELP_MESSAGE { - print "\n" - . "-a Auth token path.\n" - . "-c CA file path.\n" - . "-h The host to connect to.\n" - . " Default: 127.0.0.1\n" - . "-p The port to use.\n" - . " Default: 9200\n" - . "-S Use https instead of http.\n" - . "-I Do not verify hostname (when used with -S).\n" - . "-P Pretty print.\n"; + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } -my $protocol = 'http'; -my $host = '127.0.0.1'; -my $port = 9200; -my $schema = 'http'; +my $protocol = 'http'; +my $host = '127.0.0.1'; +my $port = 9200; +my $schema = 'http'; +my $output_base = '/var/cache/opensearch_extend.json'; #gets the options my %opts; -getopts( 'a:c:h:p:PIS', \%opts ); +getopts( 'a:c:h:p:PISqo:w', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } @@ -89,6 +169,9 @@ if ( defined( $opts{p} ) ) { if ( $opts{S} ) { $schema = 'https'; } +if ( defined( $opts{o} ) ) { + $output_base = $opts{o}; +} my $auth_token; if ( defined( $opts{a} ) ) { @@ -124,12 +207,11 @@ my $stats_response = $ua->get($stats_url); if ( defined( $opts{c} ) ) { # set ca file - $ua->ssl_opts( SSL_ca_file => $opts{c}); + $ua->ssl_opts( SSL_ca_file => $opts{c} ); } -my $stats_response; if ( defined( $opts{a} ) ) { - $stats_response = $ua->get($stats_url, "Authorization" => $auth_token,); + $stats_response = $ua->get( $stats_url, "Authorization" => $auth_token, ); } else { $stats_response = $ua->get($stats_url); } @@ -146,8 +228,7 @@ if ( $stats_response->is_success ) { } exit; } -} -else { +} else { $to_return->{errorString} = 'Failed to get "' . $stats_url . '"... ' . $stats_response->status_line; $to_return->{error} = 1; print $json->encode($to_return); @@ -159,7 +240,7 @@ else { my $health_response; if ( defined( $opts{a} ) ) { - $health_response = $ua->get($health_url, "Authorization" => $auth_token,); + $health_response = $ua->get( $health_url, "Authorization" => $auth_token, ); } else { $health_response = $ua->get($health_url); } @@ -176,8 +257,7 @@ if ( $health_response->is_success ) { } exit; } -} -else { +} else { $to_return->{errorString} = 'Failed to get "' . $health_url . '"... ' . $health_response->status_line; $to_return->{error} = 1; print $json->encode($to_return); @@ -212,14 +292,11 @@ $to_return->{data}{c_act_shards_perc} = $health_json->{active_shards_percent_as # unknown = 3 if ( $health_json->{status} =~ /[Gg][Rr][Ee][Ee][Nn]/ ) { $to_return->{data}{status} = 0; -} -elsif ( $health_json->{status} =~ /[Yy][Ee][Ll][Ll][Oo][Ww]/ ) { +} elsif ( $health_json->{status} =~ /[Yy][Ee][Ll][Ll][Oo][Ww]/ ) { $to_return->{data}{status} = 1; -} -elsif ( $health_json->{status} =~ /[Rr][Ee][Dd]/ ) { +} elsif ( $health_json->{status} =~ /[Rr][Ee][Dd]/ ) { $to_return->{data}{status} = 2; -} -else { +} else { $to_return->{data}{status} = 3; } @@ -244,8 +321,7 @@ if ( defined( $stats_json->{_all}{total}{indexing}{is_throttled} ) && $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) { $to_return->{data}{ti_throttled} = 1; -} -else { +} else { $to_return->{data}{ti_throttled} = 0; } @@ -316,8 +392,27 @@ $to_return->{data}{trc_misses} = $stats_json->{_all}{total}{request_cache}{mi $to_return->{data}{tst_size} = $stats_json->{_all}{total}{store}{size_in_bytes}; $to_return->{data}{tst_res_size} = $stats_json->{_all}{total}{store}{reserved_in_bytes}; -print $json->encode($to_return); +my $raw_json = $json->encode($to_return); if ( !$opts{P} ) { - print "\n"; + $raw_json = $raw_json . "\n"; } + +if ( !$opts{q} ) { + print $raw_json; +} + +if ( !$opts{w} ) { + exit 0; +} + +write_file( $output_base, { atomic => 1 }, $raw_json ); + +my $compressed_string; +gzip \$raw_json => \$compressed_string; +my $compressed = encode_base64($compressed_string); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; + +write_file( $output_base . '.snmp', { atomic => 1 }, $compressed ); + exit 0; From 04896e17675b6c8ece78224432d81501d9705ee4 Mon Sep 17 00:00:00 2001 From: adamus1red Date: Thu, 3 Oct 2024 16:36:25 +0100 Subject: [PATCH 320/332] Add inital IPMItools functionality for Powermon extension. (#405) * Add inital IPMItools functionality created `getIPMIdata()` based on existing `getHPASMData()`. Leverages ipmitool to gather power usage data. * fix some lint errors --- snmp/powermon-snmp.py | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py index d9f179c1b..c98f1ed5d 100755 --- a/snmp/powermon-snmp.py +++ b/snmp/powermon-snmp.py @@ -62,8 +62,9 @@ # 20210204 - v1.2 - added top-level reading, librenms option # 20210205 - v1.3 - added cents per kWh # 20210205 - v1.4 - improvement to UI +# 20220513 - v1.5 - Add inital IPMItool method -version = 1.4 +version = 1.5 ### Libraries @@ -97,7 +98,7 @@ + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" ) -methods = ["sensors", "hpasmcli"] +methods = ["sensors", "hpasmcli", "ipmitool"] # costPerkWh = 0.15 # <<<< CHANGE ### General functions @@ -138,6 +139,10 @@ def getData(method): elif method == "hpasmcli": data = getHPASMData() + + elif method == "ipmitool": + data = getIPMIdata() + else: usageError("You must specify a method.") @@ -290,6 +295,42 @@ def getHPASMData(): return hdata +def getIPMIdata(): + global error, errorString + error = 2 + errorString = "No power sensor found" + + exe = shutil.which("ipmitool") + # if not os.access(candidate, os.W_OK): + cmd = [exe, "dcmi", "power", "reading"] + warningMsg("ipmitool only runs as root") + + try: + output = subprocess.run( + cmd, capture_output=True, check=True, text=True, timeout=2 + ) + + except subprocess.CalledProcessError as e: + errorMsg(str(e) + ": " + str(e.stdout).strip("\n")) + sys.exit(1) + + psu_reading = "^\s+Instantaneous power reading:\s+" + + rawdata = str(output.stdout).replace("\t", " ").replace("\n ", "\n").split("\n") + + hdata = {} + hdata["psu"] = {} # Init PSU data structure + hdata["psu"][0] = {} # Only one value is returned. + + for line in rawdata: + if re.match(psu_reading, line): + verboseMsg("found power meter reading: " + line) + junk, meter_reading = line.split(":", 1) + hdata["psu"][0]["reading"] = psu_reading.replace("Watts", "").strip() + + return hdata + + # Argument Parsing try: opts, args = getopt.gnu_getopt( From 3a5d2ff6cca957b6cb3fdd8ffeaae6414145f6bb Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 3 Oct 2024 11:46:20 -0500 Subject: [PATCH 321/332] "Failed in segment" is now considered a read failure for smart (#548) * add check for "Failed in segment" for self test * minor version bump --- snmp/smart-v1 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 545282b99..935ed7a62 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -1,5 +1,5 @@ #!/usr/bin/env perl -#Copyright (c) 2023, Zane C. Bowers-Hadley +#Copyright (c) 2024, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -113,7 +113,7 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.3.1\n"; + print "SMART SNMP extend 0.3.2\n"; } sub main::HELP_MESSAGE { @@ -784,6 +784,8 @@ foreach my $line (@disks) { $IDs{'interrupted'} = scalar @interrupted; my @read_failure = grep( /read failure/, @outputA ); $IDs{'read_failure'} = scalar @read_failure; + my @read_failure2 = grep( /Failed in segment/, @outputA ); + $IDs{'read_failure'} = $IDs{'read_failure'} + scalar @read_failure2; my @unknown_failure = grep( /unknown failure/, @outputA ); $IDs{'unknown_failure'} = scalar @unknown_failure; my @extended = grep( /\d.*\ ([Ee]xtended|[Ll]ong).*(?![Dd]uration)/, @outputA ); From eb6b60a64790c3cd628f3b816e7cb596eede3164 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 4 Oct 2024 01:20:52 -0500 Subject: [PATCH 322/332] add read, write, and checksum error gathering for zpools (#549) --- snmp/zfs | 57 +++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 7b2412ced..4033553c5 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -6,7 +6,7 @@ zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS =head1 VERSION -0.1.1 +0.2.0 =head1 DESCRIPTION @@ -79,6 +79,7 @@ use File::Slurp; use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; +use Scalar::Util qw(looks_like_number); sub main::VERSION_MESSAGE { pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); @@ -124,14 +125,18 @@ if ($help) { my $zpool_output = `/sbin/zpool list -pH`; my @pools = split( /\n/, $zpool_output ); my $pools_int = 0; -$tojson{online} = 0; -$tojson{degraded} = 0; -$tojson{offline} = 0; -$tojson{faulted} = 0; -$tojson{health} = 1; -$tojson{unavail} = 0; -$tojson{removed} = 0; -$tojson{unknown} = 0; +$tojson{online} = 0; +$tojson{degraded} = 0; +$tojson{offline} = 0; +$tojson{faulted} = 0; +$tojson{health} = 1; +$tojson{unavail} = 0; +$tojson{removed} = 0; +$tojson{unknown} = 0; +$tojson{read_errors} = 0; +$tojson{write_errors} = 0; +$tojson{checksum_errors} = 0; +$tojson{total_errors} = 0; my @toShoveIntoJSON; while ( defined( $pools[$pools_int] ) ) { @@ -211,6 +216,40 @@ while ( defined( $pools[$pools_int] ) ) { } } + # get read/write/checksum info for spools + $newPool{read_errors} = 0; + $newPool{write_errors} = 0; + $newPool{checksum_errors} = 0; + my $pool_status = `zpool status $newPool{name}`; + my @pool_status_split = split(/\n/, $pool_status); + my $pool_config_start; + foreach my $line (@pool_status_split) { + if ($pool_config_start && $line =~ /^[\ \t]*$/) { + $pool_config_start = 0; + } elsif ($line =~ /NAME[\ \t]+STATE[\ \t]+READ[\ \t]+WRITE[\ \t]+CKSUM/) { + $pool_config_start = 1; + } elsif ($pool_config_start) { + my @pool_line_split = split(/[\ \t]+/, $line); + if ( + defined($pool_line_split[3]) && + looks_like_number($pool_line_split[3]) && + defined($pool_line_split[4]) && + looks_like_number($pool_line_split[4]) && + defined($pool_line_split[5]) && + looks_like_number($pool_line_split[5]) + ) { + $newPool{read_errors} = $newPool{read_errors} + $pool_line_split[3]; + $newPool{write_errors} = $newPool{write_errors} + $pool_line_split[4]; + $newPool{checksum_errors} = $newPool{checksum_errors} + $pool_line_split[5]; + } + } + } + $newPool{total_errors} = $newPool{read_errors} + $newPool{write_errors} + $newPool{checksum_errors}; + $tojson{read_errors} = $tojson{read_errors} + $newPool{read_errors}; + $tojson{write_errors} = $tojson{write_errors} + $newPool{write_errors}; + $tojson{checksum_errors} = $tojson{checksum_errors} + $newPool{checksum_errors}; + $tojson{total_errors} = $tojson{total_errors} + $newPool{total_errors}; + push( @toShoveIntoJSON, \%newPool ); $pools_int++; From f1c5153fd668e5348b59adb3456af12d2f745362 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 19 Oct 2024 07:57:04 -0500 Subject: [PATCH 323/332] fixes for ZFS for getting perf stats (#550) * off by one fix and formatting cleanup * add a missing value --- snmp/zfs | 68 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 4033553c5..0273eb7b0 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -6,7 +6,7 @@ zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS =head1 VERSION -0.2.0 +0.3.0 =head1 DESCRIPTION @@ -193,22 +193,27 @@ while ( defined( $pools[$pools_int] ) ) { my $iostat = `zpool iostat -l -q -p -H $newPool{name}`; chomp($iostat); - $iostat =~ s/\t/,/g; + $iostat =~ s/\t+/,/g; $iostat =~ s/\,\-\,\-\,/\,0\,0\,/g; $iostat =~ s/\%//g; $iostat =~ s/\,([0-1\.]*)x\,/,$1,/; chomp($iostat); my $parsed; ( - $parsed, $parsed, $newPool{operations_r}, $newPool{operations_w}, - $newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, $newPool{total_wait_w}, - $newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, $newPool{syncq_wait_w}, - $newPool{asyncq_wait_w}, $newPool{scrub_wait}, $newPool{trim_wait}, $newPool{syncq_read_p}, - $newPool{syncq_read_a}, $newPool{syncq_write_p}, $newPool{syncq_write_a}, $newPool{asyncq_read_p}, - $newPool{asyncq_read_a}, $newPool{asyncq_write_p}, $newPool{asyncq_write_a}, $newPool{scrubq_read_p}, - $newPool{scrubq_read_a}, $newPool{trimq_write_p}, $newPool{trimq_write_a}, + $parsed, $parsed, $parsed, $newPool{operations_r}, + $newPool{operations_w}, $newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, + $newPool{total_wait_w}, $newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, + $newPool{syncq_wait_w}, $newPool{asyncq_wait_r}, $newPool{asyncq_wait_w}, $newPool{scrub_wait}, + $newPool{trim_wait}, $newPool{syncq_read_p}, $newPool{syncq_read_a}, $newPool{syncq_write_p}, + $newPool{syncq_write_a}, $newPool{asyncq_read_p}, $newPool{asyncq_read_a}, $newPool{asyncq_write_p}, + $newPool{asyncq_write_a}, $newPool{scrubq_read_p}, $newPool{scrubq_read_a}, $newPool{trimq_write_p}, + $newPool{trimq_write_a}, ) = split( /\,/, $iostat ); + if ( $newPool{trim_wait} eq '-' ) { + $newPool{trim_wait} = 0; + } + my @pool_keys = keys(%newPool); foreach my $item (@pool_keys) { if ( $item ne 'altroot' && $newPool{$item} eq '-' ) { @@ -220,35 +225,34 @@ while ( defined( $pools[$pools_int] ) ) { $newPool{read_errors} = 0; $newPool{write_errors} = 0; $newPool{checksum_errors} = 0; - my $pool_status = `zpool status $newPool{name}`; - my @pool_status_split = split(/\n/, $pool_status); + my $pool_status = `zpool status $newPool{name}`; + my @pool_status_split = split( /\n/, $pool_status ); my $pool_config_start; foreach my $line (@pool_status_split) { - if ($pool_config_start && $line =~ /^[\ \t]*$/) { + if ( $pool_config_start && $line =~ /^[\ \t]*$/ ) { $pool_config_start = 0; - } elsif ($line =~ /NAME[\ \t]+STATE[\ \t]+READ[\ \t]+WRITE[\ \t]+CKSUM/) { + } elsif ( $line =~ /NAME[\ \t]+STATE[\ \t]+READ[\ \t]+WRITE[\ \t]+CKSUM/ ) { $pool_config_start = 1; } elsif ($pool_config_start) { - my @pool_line_split = split(/[\ \t]+/, $line); - if ( - defined($pool_line_split[3]) && - looks_like_number($pool_line_split[3]) && - defined($pool_line_split[4]) && - looks_like_number($pool_line_split[4]) && - defined($pool_line_split[5]) && - looks_like_number($pool_line_split[5]) - ) { - $newPool{read_errors} = $newPool{read_errors} + $pool_line_split[3]; - $newPool{write_errors} = $newPool{write_errors} + $pool_line_split[4]; + my @pool_line_split = split( /[\ \t]+/, $line ); + if ( defined( $pool_line_split[3] ) + && looks_like_number( $pool_line_split[3] ) + && defined( $pool_line_split[4] ) + && looks_like_number( $pool_line_split[4] ) + && defined( $pool_line_split[5] ) + && looks_like_number( $pool_line_split[5] ) ) + { + $newPool{read_errors} = $newPool{read_errors} + $pool_line_split[3]; + $newPool{write_errors} = $newPool{write_errors} + $pool_line_split[4]; $newPool{checksum_errors} = $newPool{checksum_errors} + $pool_line_split[5]; - } - } - } - $newPool{total_errors} = $newPool{read_errors} + $newPool{write_errors} + $newPool{checksum_errors}; - $tojson{read_errors} = $tojson{read_errors} + $newPool{read_errors}; - $tojson{write_errors} = $tojson{write_errors} + $newPool{write_errors}; + } ## end if ( defined( $pool_line_split[3] ) && looks_like_number...) + } ## end elsif ($pool_config_start) + } ## end foreach my $line (@pool_status_split) + $newPool{total_errors} = $newPool{read_errors} + $newPool{write_errors} + $newPool{checksum_errors}; + $tojson{read_errors} = $tojson{read_errors} + $newPool{read_errors}; + $tojson{write_errors} = $tojson{write_errors} + $newPool{write_errors}; $tojson{checksum_errors} = $tojson{checksum_errors} + $newPool{checksum_errors}; - $tojson{total_errors} = $tojson{total_errors} + $newPool{total_errors}; + $tojson{total_errors} = $tojson{total_errors} + $newPool{total_errors}; push( @toShoveIntoJSON, \%newPool ); @@ -474,7 +478,7 @@ $tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses}; my %head_hash; $head_hash{data} = \%tojson; -$head_hash{version} = 3; +$head_hash{version} = 4; $head_hash{error} = 0; $head_hash{errorString} = ''; From ac68be87ca1e3d297d1d5ef15c73bfaa7159406d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 25 Oct 2024 15:08:41 -0500 Subject: [PATCH 324/332] linux_softnet_stat nolonger uses Gzip::Faster... uses IO::Compress::Gzip as it comes default (#551) --- snmp/linux_softnet_stat | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/snmp/linux_softnet_stat b/snmp/linux_softnet_stat index f7987a391..cc547a1f3 100755 --- a/snmp/linux_softnet_stat +++ b/snmp/linux_softnet_stat @@ -1,10 +1,15 @@ #!/usr/bin/env perl +use strict; +use warnings; + =head1 DESCRIPTION This is a SNMP extend for monitoring /proc/net/softnet_stat on Linux for use with LibreNMS. -For more information, see L. +This just needs added to snmpd.conf like below. + + extend linux_softnet_stat /etc/snmp/linux_softnet_stat -b =head1 SWITCHES @@ -16,24 +21,29 @@ Pretty print the JSON. If used with -b, this switch will be ignored. Gzip the output and convert to Base64. +=head1 VERSION + +0.1.0 + =cut -use strict; -use warnings; +our $VERSION = '0.1.0'; + use JSON; use Getopt::Std; use File::Slurp; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "Linux softnet stats extend 0.0.1\n"; + print 'Linux softnet stats extend ' . $VERSION . "\n"; } sub main::HELP_MESSAGE { - + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } #this will be dumped to json at the end @@ -132,13 +142,11 @@ if ( !$opts{p} && !$opts{b} ) { exit 0; } -my $compressed = encode_base64( gzip($return_string) ); +my $toReturnCompressed; +gzip \$return_string => \$toReturnCompressed; +my $compressed = encode_base64($toReturnCompressed); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; -if ( length($compressed) > length($return_string) ) { - print $return_string. "\n"; -} else { - print $compressed; -} +print $compressed; exit 0; From ca187e197e3c9c27f95712f385cae954d58075f8 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 25 Oct 2024 20:31:55 -0500 Subject: [PATCH 325/332] privoxy: doc update, no longer needs Gzip::Faster, add -w (#552) --- snmp/privoxy | 124 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 110 insertions(+), 14 deletions(-) diff --git a/snmp/privoxy b/snmp/privoxy index 26e87cddd..4af9a00a5 100755 --- a/snmp/privoxy +++ b/snmp/privoxy @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2023, Zane C. Bowers-Hadley +#Copyright (c) 2024, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -23,32 +23,91 @@ #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. -=for comment +use warnings; +use strict; + +=head1 NAME + +privoxy - LibreNMS JSON style SNMP extend for monitoring Privoxy + +=head1 VERSION + +0.2.0 + +=cut + +our $VERSION = '0.2.0'; + +=head1 SYNOPSIS + +privoxy B<-w> [B<-o> ] [B<-f> ] [B<-p>] + +privoxy [B<-o> ] [B<-f> ] [B<-p>] + +=head1 SNMPD CONFIG Add this to snmpd.conf as below and restart snmpd. extend privoxy /etc/snmp/extends/privoxy -Supported command line options are as below. +Or if using cron... + + # cron + */5 * * * * root /etc/snmp/privoxy -w > /dev/null + + # snmpd.conf + extend privoxy /bin/cat /var/cache/privoxy_extend.json.snmp + +=head1 FLAGS + +=head2 -f + +The Privoxy logfile. + +Default: /var/log/privoxy/logfile + +=head2 -c + +Use gzip+base64 LibreNMS style compression. + +=head2 -p + +Pretty print. + +=head2 -o + +Where to write it out to. + +Default: /var/cache/privoxy_extend.json - -f Logfile. - Default: /var/log/privoxy/logfile - -c gzip+base64 compression - -p Pretty print. +=head2 -w -The last is only really relevant to the usage with SNMP. +Write out. Implies -c + +=head1 INSTALL + +FreeBSD... + + pkg install p5-JSON p5-MIME-Base64 p5-File-Slurp p5-File-ReadBackwards p5-IPC-Run3 p5-Time-Piece + +Debian... + + apt-get install libjson-perl libmime-base64-perl libfile-slurp-perl libfile-readbackwards-perl libipc-run3-perl cpanminus + cpanm Time::Piece =cut -use strict; -use warnings; use Getopt::Std; use File::ReadBackwards; use JSON; use Time::Piece; use IPC::Run3; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Pod::Usage; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; # get the current time my $t = localtime; @@ -71,7 +130,7 @@ my $compress; #gets the options my %opts; -getopts( 'f:cp', \%opts ); +getopts( 'f:cpwo', \%opts ); if ( defined( $opts{f} ) ) { $logfile = $opts{f}; } @@ -79,6 +138,22 @@ if ( defined( $opts{c} ) ) { $compress = 1; } +if ($opts{w}) { + $opts{c} = 1; +} + +sub main::VERSION_MESSAGE { + print 'privoxy LibreNMS extend v. ' . $VERSION . "\n"; +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +if ( !defined( $opts{o} ) ) { + $opts{o} = '/var/cache/privoxy_extend.json'; +} + my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{p} ) { $json->pretty(); @@ -426,8 +501,29 @@ if ($compress) { exit 0; } ## end if ($compress) -print $json->encode($to_return); +my $raw_json_return = $json->encode($to_return); if ( !$opts{p} ) { - print "\n"; + $raw_json_return = $raw_json_return . "\n"; } + +if ( $opts{w} ) { + write_file( $opts{o}, $raw_json_return ); +} + +if ( $opts{c} ) { + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json_return => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; + + if ( $opts{w} ) { + write_file( $opts{o} . '.snmp', $compressed ); + } +} else { + print $raw_json_return; +} + exit 0; From 9be4b6c1b7ab8582bb321b4a4ae272b30f884b85 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Nov 2024 21:10:01 -0600 Subject: [PATCH 326/332] add a extend for nextcloud (#554) * very early initial work * checkpoint * add last_seen handling --- snmp/nextcloud | 337 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) create mode 100644 snmp/nextcloud diff --git a/snmp/nextcloud b/snmp/nextcloud new file mode 100644 index 000000000..e7d648737 --- /dev/null +++ b/snmp/nextcloud @@ -0,0 +1,337 @@ +#!/usr/bin/env perl + +=head1 NAME + +nextcloud - LibreNMS JSON SNMP extend for gathering backups for Nextcloud + +=head1 VERSION + +0.0.1 + +=head1 DESCRIPTION + +For more information, see L. + +=head1 SWITCHES + +=head2 -i + +Dir location for the Nextcloud install. + +The defaults are as below. + +FreeBSD: /usr/local/www/nextcloud +Linux: /var/www/nextcloud + +=head2 -m + +If set, does consider the user directories to not all be under the same mountpoint. + +=head2 -o + +Where to write the output to. + +Default: /var/cache/nextcloud_extend + +=head2 -q + +Don't print the JSON results when done. + +=head1 SETUP + +Create the required directory to write to. + + mkdir /var/cache/nextcloud_extend + chown -R $nextcloud_user /var/cache/nextcloud_extend + +snmpd.conf + + extend nextcloud /bin/cat /var/cache/nextcloud_extend/snmp + +cron, specify -o or -i if needed/desired + + */5 * * * * /etc/snmpd/nextcloud -q 2> /dev/null + +=head1 REQUIREMENTS + +Debian... + + apt-get install libjson-perl libfile-slurp-perl libmime-base64-perl cpanminus + cpanm Time::Piece + +FreeBSD... + + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-Time-Piece + +Generic cpanm... + + cpanm JSON File::Slurp Mime::Base64 + +=cut + +#Copyright (c) 2024, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska +# for zfs-stats and figuring out the math for all the stats +# +# Thanks to dlangille for pointing out the issues on 14 and Bobzikwick figuring out the fix in issues/501 + +use strict; +use warnings; +use JSON; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use String::ShellQuote; +use Time::Piece; + +sub main::VERSION_MESSAGE { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +#this will be dumped to json at the end +my %tojson; +$tojson{total} = 0; +$tojson{user_count} = 0; +$tojson{free} = 0; +$tojson{used} = 0; +$tojson{enabled_apps} = 0; +$tojson{disabled_apps} = 0; +$tojson{encryption_enabled} = 0; +$tojson{calendars} = 0; +$tojson{multimount} = 0; +$tojson{users} = {}; + +# current user +my $current_user = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<); + +#gets the options +my %opts; +my $be_quiet; +my $output_dir = '/var/cache/nextcloud_extend'; +my $install_dir; +my $version; +my $help; +my $multimount; +GetOptions( + q => \$be_quiet, + 'o=s' => \$output_dir, + 'i=s' => \$install_dir, + v => \$version, + version => \$version, + h => \$help, + help => \$help, + m => \$multimount, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +if ($multimount) { + $tojson{multimount} = 1; +} + +# get what to use for the install dir if not specified +if ( !defined($install_dir) ) { + if ( $^O eq 'freebsd' ) { + $install_dir = '/usr/local/www/nextcloud'; + } elsif ( $^O eq 'linux' ) { + $install_dir = '/var/www/nextcloud'; + } else { + die('-i not specified for the install dir for Nextcloud'); + } +} + +# ensure the install dir exists +if ( !-d $install_dir ) { + die( 'the Nextcloud install directory, "' . $install_dir . '", is not a directory or does not exist' ); +} + +# change to the install dir +chdir($install_dir) || die( 'failed to chdir to the Nextcloud install dir, "' . $install_dir . '",' ); + +# ensure the config exists +if ( !-f './config/config.php' ) { + die( '"./config/config.php" does not exist under the Nextcloud install dir ,"' . $install_dir . '",' ); +} + +# ensure ./occ happens +if ( !-f './occ' ) { + die( '"./occ" does not exist under the Nextcloud install dir ,"' . $install_dir . '",' ); +} + +# ensure the install dir exists and try to create it if it does not +if ( !-d $output_dir ) { + mkdir($output_dir) || die( '"' . $output_dir . '" does not exist and could not be created' ); +} + +### +### +### get user info +### +### +my $user_list_raw = `php occ user:list --output=json`; +if ( $? != 0 ) { + die( '"php occ user:list" existed non-zero with.... ' . "\n" . $user_list_raw . "\n..." ); +} +my @users; +eval { + my $decodes_users = decode_json($user_list_raw); + @users = keys( %{$decodes_users} ); +}; + +foreach my $user (@users) { + my $quoted_user = shell_quote($user); + my $user_info_raw = `php occ user:info --output=json $quoted_user`; + eval { + my $user_info = decode_json($user_info_raw); + if ( defined( $user_info->{user_id} ) + && defined( $user_info->{storage} ) + && ref( $user_info->{storage} ) eq 'HASH' + && defined( $user_info->{last_seen} ) ) + { + my $last_seen = $user_info->{last_seen}; + if ( $last_seen eq '1970-01-01T00:00:00+00:00' ) { + $last_seen = -1; + } else { + eval { + $last_seen =~ s/(\d+)\:(\d+)$/$1$2/; + my $t1 = gmtime; + my $t2 = Time::Piece->strptime( $last_seen, "%Y-%m-%dT%H:%M:%S%z" ); + $last_seen = $t1->epoch - $t2->epoch; + }; + if ($@) { + $last_seen = undef; + } + } ## end else [ if ( $last_seen eq '1970-01-01T00:00:00+00:00')] + $tojson{users}{$user} = { + 'free' => $user_info->{storage}{free}, + 'quota' => $user_info->{storage}{quota}, + 'relative' => $user_info->{storage}{relative}, + 'total' => $user_info->{storage}{total}, + 'used' => $user_info->{storage}{used}, + 'last_seen' => $last_seen, + 'calendars' => 0, + }; + $tojson{free} = $user_info->{storage}{free}; + $tojson{used} = $tojson{used} + $user_info->{storage}{used}; + if ( $user_info->{storage}{quota} > 0 ) { + $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; + } + $tojson{user_count}++; + # does not currently support output options + my $calendar_info_raw = `php occ dav:list-calendars $quoted_user 2> /dev/null`; + if ( $? == 0 ) { + # if the table has more than 4 lines the other lines contain calender info + # so given it is zero index the number of calendars can be fournd via subtracting 3 + my @calendar_info_split = split( /\n/, $calendar_info_raw ); + if ( $#calendar_info_split > 3 ) { + $tojson{users}{$user}{'calendars'} = $#calendar_info_split - 3; + $tojson{calendars} = $tojson{'calendars'} + $tojson{users}{$user}{'calendars'}; + } + } + } ## end if ( defined( $user_info->{user_id} ) && defined...) + }; +} ## end foreach my $user (@users) + +### +### +### get app info +### +### +my $app_info_raw = `php occ app:list --output=json`; +if ( $? == 0 ) { + eval { + my $app_info = decode_json($app_info_raw); + if ( defined( $app_info->{disabled} ) + && ref( $app_info->{disabled} ) eq 'HASH' ) + { + my @disabled_apps = keys( %{ $app_info->{disabled} } ); + $tojson{disabled_apps} = $#disabled_apps + 1; + } + if ( defined( $app_info->{enabled} ) + && ref( $app_info->{enabled} ) eq 'HASH' ) + { + my @disabled_apps = keys( %{ $app_info->{enabled} } ); + $tojson{enabled_apps} = $#disabled_apps + 1; + } + }; +} ## end if ( $? == 0 ) + +### +### +### get encryption status +### +### +my $encrption_info_raw = `php occ encryption:status --output=json`; +if ( $? == 0 ) { + eval { + my $encrption_info = decode_json($encrption_info_raw); + if ( defined($encrption_info) + && ref( $encrption_info->{enabled} ) eq '' + && $encrption_info->{enabled} =~ /^(1|[Tt][Rr][Uu][Ee])$/ ) + { + $tojson{encryption_enabled} = 1; + } + }; +} ## end if ( $? == 0 ) + +my %head_hash; +$head_hash{data} = \%tojson; +$head_hash{version} = 1; +$head_hash{error} = 0; +$head_hash{errorString} = ''; + +my $json_output = encode_json( \%head_hash ); + +if ( !$be_quiet ) { + print $json_output. "\n"; +} + +eval { write_file( $output_dir . '/json', $json_output ); }; +if ($@) { + warn( 'failed to write out "' . $output_dir . '/json" ... ' . $@ ); +} + +my $toReturnCompressed; +gzip \$json_output => \$toReturnCompressed; +my $compressed = encode_base64($toReturnCompressed); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; + +eval { write_file( $output_dir . '/snmp', $compressed ); }; +if ($@) { + warn( 'failed to write out "' . $output_dir . '/snmp" ... ' . $@ ); +} From 6d3527451df0943338657a7f79956412913fbf9b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 23 Nov 2024 19:18:37 -0600 Subject: [PATCH 327/332] add last_seen_string for nextcloud user info (#555) --- snmp/nextcloud | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/snmp/nextcloud b/snmp/nextcloud index e7d648737..6d0d231e5 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -237,13 +237,14 @@ foreach my $user (@users) { } } ## end else [ if ( $last_seen eq '1970-01-01T00:00:00+00:00')] $tojson{users}{$user} = { - 'free' => $user_info->{storage}{free}, - 'quota' => $user_info->{storage}{quota}, - 'relative' => $user_info->{storage}{relative}, - 'total' => $user_info->{storage}{total}, - 'used' => $user_info->{storage}{used}, - 'last_seen' => $last_seen, - 'calendars' => 0, + 'free' => $user_info->{storage}{free}, + 'quota' => $user_info->{storage}{quota}, + 'relative' => $user_info->{storage}{relative}, + 'total' => $user_info->{storage}{total}, + 'used' => $user_info->{storage}{used}, + 'last_seen' => $last_seen, + 'last_seen_string' => $user_info->{last_seen}, + 'calendars' => 0, }; $tojson{free} = $user_info->{storage}{free}; $tojson{used} = $tojson{used} + $user_info->{storage}{used}; From 178931b861eae4a0728de02d5060efb0f8ef3535 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 25 Nov 2024 03:05:24 -0600 Subject: [PATCH 328/332] save the total for nextcloud storage info (#556) --- snmp/nextcloud | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/nextcloud b/snmp/nextcloud index 6d0d231e5..df670b50f 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -247,6 +247,7 @@ foreach my $user (@users) { 'calendars' => 0, }; $tojson{free} = $user_info->{storage}{free}; + $tojson{total} = $user_info->{storage}{total}; $tojson{used} = $tojson{used} + $user_info->{storage}{used}; if ( $user_info->{storage}{quota} > 0 ) { $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; From 87995812196db5bcb7667bc1256b5a4e1ca91a5f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 25 Nov 2024 03:36:01 -0600 Subject: [PATCH 329/332] ensure quota will always be present for a total for nextcloud (#557) --- snmp/nextcloud | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/snmp/nextcloud b/snmp/nextcloud index df670b50f..37fdddd70 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -128,6 +128,7 @@ $tojson{encryption_enabled} = 0; $tojson{calendars} = 0; $tojson{multimount} = 0; $tojson{users} = {}; +$tojson{quota} = 0; # current user my $current_user = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<); @@ -246,9 +247,9 @@ foreach my $user (@users) { 'last_seen_string' => $user_info->{last_seen}, 'calendars' => 0, }; - $tojson{free} = $user_info->{storage}{free}; + $tojson{free} = $user_info->{storage}{free}; $tojson{total} = $user_info->{storage}{total}; - $tojson{used} = $tojson{used} + $user_info->{storage}{used}; + $tojson{used} = $tojson{used} + $user_info->{storage}{used}; if ( $user_info->{storage}{quota} > 0 ) { $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; } From 206f3f0f1911a4d03acf1d45975c502f42c1f17f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 30 Nov 2024 00:55:02 -0600 Subject: [PATCH 330/332] a bit of cleanup for user count and saving the files is now atomic for nextcloud (#558) --- snmp/nextcloud | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/snmp/nextcloud b/snmp/nextcloud index 37fdddd70..449c98e3f 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -212,10 +212,12 @@ eval { my $decodes_users = decode_json($user_list_raw); @users = keys( %{$decodes_users} ); }; +$tojson{user_count} = $#users; +$tojson{user_count}++; foreach my $user (@users) { my $quoted_user = shell_quote($user); - my $user_info_raw = `php occ user:info --output=json $quoted_user`; + my $user_info_raw = `php occ user:info --output=json $quoted_user 2> /dev/null`; eval { my $user_info = decode_json($user_info_raw); if ( defined( $user_info->{user_id} ) @@ -253,7 +255,6 @@ foreach my $user (@users) { if ( $user_info->{storage}{quota} > 0 ) { $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; } - $tojson{user_count}++; # does not currently support output options my $calendar_info_raw = `php occ dav:list-calendars $quoted_user 2> /dev/null`; if ( $? == 0 ) { @@ -274,7 +275,7 @@ foreach my $user (@users) { ### get app info ### ### -my $app_info_raw = `php occ app:list --output=json`; +my $app_info_raw = `php occ app:list --output=json 2> /dev/null`; if ( $? == 0 ) { eval { my $app_info = decode_json($app_info_raw); @@ -298,7 +299,7 @@ if ( $? == 0 ) { ### get encryption status ### ### -my $encrption_info_raw = `php occ encryption:status --output=json`; +my $encrption_info_raw = `php occ encryption:status --output=json 2> /dev/null`; if ( $? == 0 ) { eval { my $encrption_info = decode_json($encrption_info_raw); @@ -323,7 +324,7 @@ if ( !$be_quiet ) { print $json_output. "\n"; } -eval { write_file( $output_dir . '/json', $json_output ); }; +eval { write_file( $output_dir . '/json', { atomic => 1 }, $json_output ); }; if ($@) { warn( 'failed to write out "' . $output_dir . '/json" ... ' . $@ ); } @@ -334,7 +335,7 @@ my $compressed = encode_base64($toReturnCompressed); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; -eval { write_file( $output_dir . '/snmp', $compressed ); }; +eval { write_file( $output_dir . '/snmp', { atomic => 1 }, $compressed ); }; if ($@) { warn( 'failed to write out "' . $output_dir . '/snmp" ... ' . $@ ); } From 20715999451402e0dd291ecafdc7e07937844dd7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 1 Dec 2024 17:45:26 -0600 Subject: [PATCH 331/332] add text_blob extend and update perlcritic options (#559) * ignore ProhibitBitwiseOperators * add text_blob extend --- .perlcriticrc | 2 +- snmp/text_blob | 370 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 371 insertions(+), 1 deletion(-) create mode 100755 snmp/text_blob diff --git a/.perlcriticrc b/.perlcriticrc index ab2e45531..732ce4851 100644 --- a/.perlcriticrc +++ b/.perlcriticrc @@ -1 +1 @@ -exclude = ProhibitExplicitReturnUndef ProhibitOneArgBless ProhibitStringyEval +exclude = ProhibitExplicitReturnUndef ProhibitOneArgBless ProhibitStringyEval ProhibitBitwiseOperators diff --git a/snmp/text_blob b/snmp/text_blob new file mode 100755 index 000000000..030a583bd --- /dev/null +++ b/snmp/text_blob @@ -0,0 +1,370 @@ +#!/usr/bin/env perl + +use warnings; +use strict; + +=head1 NAME + +text_blob - LinbreNMS JSON extend for text blob stuff. + +=head1 VERSION + +0.0.1 + +=cut + +our $VERSION = '0.0.1'; + +=head1 SYNOPSIS + +wireguard [B<-c> ] [B<-q>] + +wireguard [B<-v>|B<--version>] + +wireguard [B<-h>|B<--help>] + +=head1 SWITCHES + +=head2 -c + +Config file to use. + +Default: /usr/local/etc/text_blob_extend.json + +=head2 -h|--help + +Print help info. + +=head2 -q + +Be quiet when running it. + +=head2 -v|--version + +Print version info. + +=head1 INSTALL + +Install the depends. + + # FreeBSD + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 + + # Debian + apt-get install libjson-perl libmime-base64-perl libfile-slurp-perl + +Then set it up in SNMPD. + +=head1 CONFIG + +The default config is /usr/local/etc/text_blob_extend.json . + + - .blobs :: A hash of commands to run. The key values are the name of the blob. + + - .global_envs :: A hash of enviromental values set. + + - .blob_envs :: A hash of per blob env values. The key name of the blob and each value is + a sub hash of enviromental values to set. + + - .output_dir :: Output directory to use. + - Default :: /var/cache/text_blob_extend + +Example + + { + "blobs":{ + "jls": "jls", + "dmesg": "dmesg", + "top_io": "top -b -m io -j", + "top_cpu": "top -b -m cpu -w -j", + "ps": "ps axuw", + "netstat": "netstat -rn" + } + } + +=cut + +use JSON; +use Getopt::Std; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Pod::Usage; + +sub main::VERSION_MESSAGE { + print 'text_blob LibreNMS extend v. ' . $VERSION . "\n"; +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +#gets the options +my %opts = (); +getopts( 'c:qvh', \%opts ); + +if ( $opts{v} ) { + &main::VERSION_MESSAGE; + exit 255; +} + +if ( $opts{h} ) { + &main::HELP_MESSAGE; + exit 255; +} + +if ( !defined( $opts{c} ) ) { + $opts{c} = '/usr/local/etc/text_blob_extend.json'; +} + +my $return_json = { + error => 0, + errorString => '', + version => 2, + data => { + non_zero_exits => 0, + warns => [], + blobs => {}, + blob_exit_val => {}, + blob_exit_signal => {}, + blob_has_coredump => {}, + }, +}; + +## +## +## get original env stuff +## +## +my @original_envs = keys(%ENV); +my %original_envs_vals; +foreach my $item (@original_envs) { + $original_envs_vals{$item} = $ENV{$item}; +} + +## +## +## real in the config +## +## +our $config = { + global_envs => {}, + blob_envs => {}, + blobs => {}, + output_dir => '/var/cache/text_blob_extend', +}; +my @global_envs; +my @blobs; +if ( -f $opts{c} ) { + eval { + my $raw_config = read_file( $opts{c} ); + my $parsed_config = decode_json($raw_config); + # process .global_envs if it exists + if ( defined( $parsed_config->{global_envs} ) + && ref( $parsed_config->{global_envs} ) eq 'HASH' ) + { + @global_envs = keys( %{ $parsed_config->{global_envs} } ); + foreach my $item (@global_envs) { + if ( ref( $parsed_config->{global_envs}{$item} ) ne '' ) { + my $warning + = '".global_envs.' + . $item + . '" has a ref value of ' + . ref( $parsed_config->{global_envs}{$item} ) + . ' and not ""'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + push( @global_envs, $item ); + $config->{global_envs}{$item} = $parsed_config->{global_envs}{$item}; + } + } ## end foreach my $item (@global_envs) + } elsif ( defined( $parsed_config->{global_envs} ) + && ref( $parsed_config->{global_envs} ) ne 'HASH' ) + { + my $warning = '.global_envs is not a hash but "' . ref( $parsed_config->{global_envs} ) . '"'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } + # process .blob_envs + if ( defined( $parsed_config->{blob_envs} ) + && ref( $parsed_config->{blob_envs} ) eq 'HASH' ) + { + # ensure all .blob_envs are hashes + my @blob_envs = keys( %{ $parsed_config->{blob_envs} } ); + foreach my $item (@blob_envs) { + if ( ref( $parsed_config->{blob_envs}{$item} ) ne 'HASH' ) { + my $warning + = '".blob_envs.' + . $item + . '" has a ref value of ' + . ref( $parsed_config->{blob_envs}{$item} ) + . ' and not "HASH"'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + my @envs_for_blobs = keys( %{ $parsed_config->{blob_envs}{$item} } ); + # only create the hash if we have actual keys + if ( defined( $envs_for_blobs[0] ) ) { + $config->{blob_envs}{$item} = {}; + # we have keys, so only add scalars + foreach my $item2 (@envs_for_blobs) { + if ( ref( $parsed_config->{blob_envs}{$item}{$item2} ) ne '' ) { + my $warning + = '".blob_envs.' + . $item . '.' + . $item2 + . '" has a ref value of ' + . ref( $parsed_config->{blob_envs}{$item}{$item2} ) + . ' and not ""'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + $config->{blob_envs}{$item}{$item2} = $parsed_config->{blob_envs}{$item}{$item2}; + } + } ## end foreach my $item2 (@envs_for_blobs) + } ## end if ( defined( $envs_for_blobs[0] ) ) + } ## end else [ if ( ref( $parsed_config->{blob_envs}{$item...}))] + } ## end foreach my $item (@blob_envs) + } elsif ( defined( $parsed_config->{blob_envs} ) + && ref( $parsed_config->{blob_envs} ) ne 'HASH' ) + { + my $warning = '.blob_envs is not a hash but "' . ref( $parsed_config->{blob_envs} ) . '"'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } + # process .blobs + if ( defined( $parsed_config->{blobs} ) + && ref( $parsed_config->{blobs} ) eq 'HASH' ) + { + # if here, it is a hash, now to check to make sure it is all sane + my @blobs_check = keys( %{ $parsed_config->{blobs} } ); + if ( !defined( $blobs_check[0] ) ) { + my $warning = '.blobs has no keys defined under it'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + # process + foreach my $item (@blobs_check) { + if ( ref( $parsed_config->{blobs}{$item} ) ne '' ) { + my $warning + = '".blobs.' + . $item + . '" has a ref value of ' + . ref( $parsed_config->{senvs}{$item} ) + . ' and not ""'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + push( @blobs, $item ); + $config->{blobs}{$item} = $parsed_config->{blobs}{$item}; + } + } ## end foreach my $item (@blobs_check) + } ## end else [ if ( !defined( $blobs_check[0] ) ) ] + } elsif ( defined( $parsed_config->{blobs} ) + && ref( $parsed_config->{blobs} ) ne 'HASH' ) + { + # .blobs must always be a hash + die( '.blobs is not a hash but "' . ref( $parsed_config->{blob_envs} ) . '"' ); + } else { + # .blobs must always be defined and a hash + die('.blobs not defined and not a hash'); + } + # process .output_dir + if ( defined( $parsed_config->{output_dir} ) + && ref( $parsed_config->{output_dir} ) eq '' ) + { + # defined and is a scalar, so save it + $config->{output_dir} = $parsed_config->{output_dir}; + } elsif ( defined( $parsed_config->{output_dir} ) + && ref( $parsed_config->{output_dir} ) ne '' ) + { + # hash or array, so die + die( '.output_dir is not a string but a ref type of "' . ref( $parsed_config->{output_dir} ) . '"' ); + } + }; + if ($@) { + $return_json->{error} = 1; + $return_json->{errorString} = $@; + return_the_data( $return_json, $opts{B} ); + exit 0; + } +} else { + my $warning = 'Config file, "' . $opts{c} . '", does not exist or is not a file'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); +} + +if ( -e $config->{output_dir} && !-d $config->{output_dir} ) { + die( 'Output dir, "' . $config->{output_dir} . '", is not a directory but it exists' ); +} elsif ( !-e $config->{output_dir} ) { + mkdir( $config->{output_dir} ) || die( 'Output dir, "' . $config->{output_dir} . '", could not be created' ); +} + +## +## +## process each specified text blob +## +## +foreach my $blob (@blobs) { + # + # reset default envs from run time + # + foreach my $item ( keys(%ENV) ) { + if ( !defined( $original_envs_vals{$item} ) ) { + delete( $ENV{$item} ); + } else { + $ENV{$item} = $original_envs_vals{$item}; + } + } + # + # set the global vars + # + foreach my $item (@global_envs) { + $ENV{$item} = $config->{global_envs}{$item}; + } + # + # set the blob envs + # + if ( defined( $config->{ blob_envs} { $blob } ) ) { + foreach my $item ( keys( %{ $config->{blob_envs}{$blob} } ) ) { + $ENV{$item} = $config->{blob_envs}{$blob}{$item}; + } + } + # + # run the command and get the stdout + # + my $command = $config->{blobs}{$blob}; + my $output = `$command`; + if ($? != 0) { + $return_json->{data}{non_zero_exits}++; + } + $return_json->{data}{blobs}{$blob} = $output; + $return_json->{data}{blob_exit_val}{$blob} = $? >> 8; + $return_json->{data}{blob_exit_signal}{$blob} = $? & 127; + $return_json->{data}{blob_has_coredump}{$blob} = $? & 128; +} ## end foreach my $blob (@blobs) + +## +## +## write the output +## +## + +my $raw_json = encode_json($return_json); + +if ( !$opts{q} ) { + print $raw_json. "\n"; +} + +write_file( $config->{output_dir} . '/json', { atomic => 1 }, $raw_json . "\n" ); + +my $compressed_string; +gzip \$raw_json => \$compressed_string; +my $compressed = encode_base64($compressed_string); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +my $print_compressed = 0; +write_file( $config->{output_dir} . '/snmp', { atomic => 1 }, $compressed ); From 1ea82243cf736fe6f4bfceb4aa91811cd566b341 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 1 Dec 2024 19:45:14 -0600 Subject: [PATCH 332/332] POD cleanups and fix handling of global_envs (#560) --- snmp/text_blob | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/snmp/text_blob b/snmp/text_blob index 030a583bd..24bf6aec1 100755 --- a/snmp/text_blob +++ b/snmp/text_blob @@ -9,19 +9,19 @@ text_blob - LinbreNMS JSON extend for text blob stuff. =head1 VERSION -0.0.1 +0.0.2 =cut -our $VERSION = '0.0.1'; +our $VERSION = '0.0.2'; =head1 SYNOPSIS -wireguard [B<-c> ] [B<-q>] +text_blob [B<-c> ] [B<-q>] -wireguard [B<-v>|B<--version>] +text_blob [B<-v>|B<--version>] -wireguard [B<-h>|B<--help>] +text_blob [B<-h>|B<--help>] =head1 SWITCHES @@ -55,6 +55,14 @@ Install the depends. Then set it up in SNMPD. + extend text_blob /bin/cat /var/cache/text_blob_extend/snmp + +Setup cron... + + */5 * * * * /etc/snmp/text_blob -q + +Create a config file at /usr/local/etc/text_blob_extend.json . + =head1 CONFIG The default config is /usr/local/etc/text_blob_extend.json . @@ -72,13 +80,17 @@ The default config is /usr/local/etc/text_blob_extend.json . Example { + "global_envs":{ + "NO_COLOR": 1 + }, "blobs":{ "jls": "jls", "dmesg": "dmesg", "top_io": "top -b -m io -j", "top_cpu": "top -b -m cpu -w -j", "ps": "ps axuw", - "netstat": "netstat -rn" + "routes": "netstat -rn", + "netstat": "ncnetstat -n --pct 2> /dev/null" } } @@ -177,7 +189,6 @@ if ( -f $opts{c} ) { warn($warning); push( @{ $return_json->{data}{warns} }, $warning ); } else { - push( @global_envs, $item ); $config->{global_envs}{$item} = $parsed_config->{global_envs}{$item}; } } ## end foreach my $item (@global_envs) @@ -286,10 +297,7 @@ if ( -f $opts{c} ) { } }; if ($@) { - $return_json->{error} = 1; - $return_json->{errorString} = $@; - return_the_data( $return_json, $opts{B} ); - exit 0; + die($@); } } else { my $warning = 'Config file, "' . $opts{c} . '", does not exist or is not a file'; @@ -328,7 +336,7 @@ foreach my $blob (@blobs) { # # set the blob envs # - if ( defined( $config->{ blob_envs} { $blob } ) ) { + if ( defined( $config->{blob_envs}{$blob} ) ) { foreach my $item ( keys( %{ $config->{blob_envs}{$blob} } ) ) { $ENV{$item} = $config->{blob_envs}{$blob}{$item}; } @@ -338,7 +346,7 @@ foreach my $blob (@blobs) { # my $command = $config->{blobs}{$blob}; my $output = `$command`; - if ($? != 0) { + if ( $? != 0 ) { $return_json->{data}{non_zero_exits}++; } $return_json->{data}{blobs}{$blob} = $output;