Add debian common role

This commit is contained in:
2023-05-05 15:47:27 -04:00
parent b387d68eda
commit bf6bfe2809
79 changed files with 3166 additions and 0 deletions

View File

@ -0,0 +1,11 @@
#!/bin/bash
# Backup check for Check_MK
# Installed by BLSE 2.x ansible
SHARELIST=( $( cat /var/backups/shares ) )
echo "<<<backup>>>"
for SHARE in ${SHARELIST[@]}; do
echo "${SHARE} $( cat ${SHARE}/.backup )"
done

View File

@ -0,0 +1,15 @@
echo '<<<cephfsmounts>>>'
sed -n '/ ceph\? /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts |
sed 's/\\040/ /g' |
while read MP
do
if [ ! -r $MP ]; then
echo "$MP Permission denied"
elif [ $STAT_VERSION != $STAT_BROKE ]; then
waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" || \
echo "$MP hanging 0 0 0 0"
else
waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" && \
printf '\n'|| echo "$MP hanging 0 0 0 0"
fi
done

View File

@ -0,0 +1,33 @@
#!/bin/bash
# Apt and dpkg status check for Check_MK
# Installed by BLSE 2.x ansible
TMP_DPKG="$( COLUMNS=200 dpkg --list )"
TMP_AWK="$( awk '
{ if (NR>5) {
if ($1 != "ii") bad_package[$2]=$1;
}}
END {
print NR-5;
bad_package_count=asort(bad_package,junk)
if (bad_package_count) {
for (package in bad_package)
print package "[" bad_package[package] "]"
exit 1
}
}
' <<<"$TMP_DPKG" )"
DEBIAN_VERSION="$( cat /etc/debian_version )"
TOTAL_PACKAGES=$( head --lines=1 <<<"${TMP_AWK}" )
UPGRADABLE_PACKAGES=( $( apt list --upgradable 2>/dev/null | grep -v '^Listing' | awk '{ gsub(/\]/,"",$NF); print $1 "[" $NF "<>" $2 "]" }' ) )
INCONSISTENT_PACKAGES=( $( tail --lines=+2 <<<"${TMP_AWK}" ) )
OLD_CONFIG_FILES=( $( ionice -c3 find /etc -type f -a \( -name '*.dpkg-*' -o -name '*.ucf-*' -o -name '*.update-*' \) 2>/dev/null ) )
echo "<<<dpkg>>>"
echo "debian_version ${DEBIAN_VERSION}"
echo "total_packages ${TOTAL_PACKAGES}"
echo "upgradable_packages ${#UPGRADABLE_PACKAGES[*]} ${UPGRADABLE_PACKAGES[*]}"
echo "inconsistent_packages ${#INCONSISTENT_PACKAGES[*]} ${INCONSISTENT_PACKAGES[*]}"
echo "obsolete_configuration_files ${#OLD_CONFIG_FILES[*]} ${OLD_CONFIG_FILES[*]}"

View File

@ -0,0 +1,16 @@
#!/bin/bash
# Entropy availability check for Check_MK
# Installed by BLSE 2.x ansible
if [ -e /proc/sys/kernel/random/entropy_avail ]; then
echo '<<<entropy_avail>>>'
echo -n "entropy_avail "
cat /proc/sys/kernel/random/entropy_avail
echo -n "poolsize "
cat /proc/sys/kernel/random/poolsize
fi

View File

@ -0,0 +1,103 @@
#!/usr/bin/env python
# Check for freshness of various components using needrestart
import subprocess
import re
import json
try:
nrout = subprocess.run(["/usr/sbin/needrestart", "-b"], timeout=5, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.TimeoutExpired:
exit(2)
except Exception:
exit(1)
stdout = nrout.stdout.decode("ascii").split('\n')
stderr = nrout.stdout.decode("ascii").split('\n')
# Output data structure after parsing needrestart output
data = {
'kernel': {
'current': None,
'pending': None,
'state': 0,
},
'microcode': {
'current': None,
'pending': None,
'state': 0,
},
'services': {
'count': 0,
'list': list(),
},
'containers': {
'count': 0,
'list': list(),
},
'sessions': {
'count': 0,
'list': list(),
},
}
# NEEDRESTART-VER: 3.4
# NEEDRESTART-KCUR: 4.19.0-6-amd64
# NEEDRESTART-KEXP: 4.19.0-20-amd64
# NEEDRESTART-KSTA: 3
# NEEDRESTART-UCSTA: 2
# NEEDRESTART-UCCUR: 0xb000038
# NEEDRESTART-UCEXP: 0xb000040
# NEEDRESTART-SVC: acpid
# NEEDRESTART-SVC: cron
# NEEDRESTART-SVC: irqbalance
# NEEDRESTART-SVC: mcelog
# NEEDRESTART-SVC: munin-node
# NEEDRESTART-SVC: ntp
# NEEDRESTART-SVC: ssh
# NEEDRESTART-SVC: syslog-ng
# NEEDRESTART-SVC: trousers
# NEEDRESTART-SVC: watchdog
# NEEDRESTART-SVC: wd_keepalive
# NEEDRESTART-CONT: LXC web1
# NEEDRESTART-SESS: metabase @ user manager service
# NEEDRESTART-SESS: root @ session #28017
# STA:
# 0: unknown or failed to detect
# 1: no pending upgrade
# 2: ABI compatible upgrade pending
# 3: version upgrade pending
for line in stdout:
# Kernel version
if re.match(r'^NEEDRESTART-KSTA', line):
data['kernel']['state'] = int(line.split(': ')[-1])
elif re.match(r'^NEEDRESTART-KCUR', line):
data['kernel']['current'] = line.split(': ')[-1]
elif re.match(r'^NEEDRESTART-KEXP', line):
data['kernel']['pending'] = line.split(': ')[-1]
# Microcode version
elif re.match(r'^NEEDRESTART-UCSTA', line):
data['microcode']['state'] = int(line.split(': ')[-1])
elif re.match(r'^NEEDRESTART-UCCUR', line):
data['microcode']['current'] = line.split(': ')[-1]
elif re.match(r'^NEEDRESTART-UCEXP', line):
data['microcode']['pending'] = line.split(': ')[-1]
# Services needing restart
elif re.match(r'^NEEDRESTART-SVC', line):
data['services']['count'] += 1
data['services']['list'].append(' '.join(line.split(': ')[1:]))
# Containers needing restart
elif re.match(f'^NEEDRESTART-CONT', line):
data['containers']['count'] += 1
data['containers']['list'].append(' '.join(line.split(': ')[1:]))
# Sessions needing restart
elif re.match(f'^NEEDRESTART-SESS', line):
data['sessions']['count'] += 1
data['sessions']['list'].append(' '.join(line.split(': ')[1:]))
print("<<<freshness>>>")
print(json.dumps(data))
exit(0)

View File

@ -0,0 +1,51 @@
#!/usr/bin/env python3
taints_msg = list()
taints_err = list()
taint_map = {
0: { "text": "Proprietary module was loaded", "error": False },
1: { "text": "Module was force loaded", "error": True },
2: { "text": "Kernel running on an out of specification system", "error": True },
3: { "text": "Module was force unloaded", "error": True },
4: { "text": "Processor reported a Machine Check Exception (MCE)", "error": True },
5: { "text": "Bad page referenced or some unexpected page flags", "error": True },
6: { "text": "Taint requested by userspace application", "error": True },
7: { "text": "Kernel died recently (OOPS or BUG)", "error": True },
8: { "text": "ACPI table overridden by user", "error": True },
9: { "text": "Kernel issued warning", "error": True },
10: { "text": "Staging driver was loaded", "error": False },
11: { "text": "Workaround for bug in platform firmware applied", "error": True },
12: { "text": "Externally-built module was loaded", "error": False },
13: { "text": "Unsigned module was loaded", "error": False },
14: { "text": "Soft lockup occurred", "error": True },
15: { "text": "Kernel has been live patched", "error": True },
16: { "text": "Auxiliary taint", "error": True },
17: { "text": "Kernel was built with the struct randomization plugin", "error": True },
18: { "text": "An in-kernel test has been run", "error": True },
}
with open("/proc/sys/kernel/tainted") as tfh:
taint_val = int(tfh.read().strip())
for i in range(0, 18):
xor_val = i ** 2
val = (taint_val >> i) & 1
if val == 0:
continue
taint = taint_map[i]
taints_msg.append(taint['text'])
taints_err.append(taint['error'])
if len(taints_msg) < 1:
taints_err.append(False)
taints_msg = ["Kernel is untainted"]
if all(taints_err):
state = "WARN"
else:
state = "OK"
print("<<<kernel_taint>>>")
print(f'{state} {"; ".join(taints_msg)}')

View File

@ -0,0 +1,564 @@
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Call with -d for debug mode: colored output, no saving of status
import sys, os, re, time, glob
# .--MEI-Cleanup---------------------------------------------------------.
# | __ __ _____ ___ ____ _ |
# | | \/ | ____|_ _| / ___| | ___ __ _ _ __ _ _ _ __ |
# | | |\/| | _| | |_____| | | |/ _ \/ _` | '_ \| | | | '_ \ |
# | | | | | |___ | |_____| |___| | __/ (_| | | | | |_| | |_) | |
# | |_| |_|_____|___| \____|_|\___|\__,_|_| |_|\__,_| .__/ |
# | |_| |
# +----------------------------------------------------------------------+
# In case the program crashes or is killed in a hard way, the frozen binary .exe
# may leave temporary directories named "_MEI..." in the temporary path. Clean them
# up to prevent eating disk space over time.
########################################################################
############## DUPLICATE CODE WARNING ##################################
### This code is also used in the cmk-update-agent frozen binary #######
### Any changes to this class should also be made in cmk-update-agent ##
### In the bright future we will move this code into a library #########
########################################################################
class MEIFolderCleaner(object):
def pid_running(self, pid):
import ctypes
kernel32 = ctypes.windll.kernel32
SYNCHRONIZE = 0x100000
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
else:
return False
def find_and_remove_leftover_folders(self, hint_filenames):
if not hasattr(sys, "frozen"):
return
import win32file # pylint: disable=import-error
import tempfile
base_path = tempfile.gettempdir()
for f in os.listdir(base_path):
try:
path = os.path.join(base_path, f)
if not os.path.isdir(path):
continue
# Only care about directories related to our program
invalid_dir = False
for hint_filename in hint_filenames:
if not os.path.exists(os.path.join(path, hint_filename)):
invalid_dir = True
break
if invalid_dir:
continue
pyinstaller_tmp_path = win32file.GetLongPathName(sys._MEIPASS).lower() # pylint: disable=no-member
if pyinstaller_tmp_path == path.lower():
continue # Skip our own directory
# Extract the process id from the directory and check whether or not it is still
# running. Don't delete directories of running processes!
# The name of the temporary directories is "_MEI<PID><NR>". We try to extract the PID
# by stripping of a single digit from the right. In the hope the NR is a single digit
# in all relevant cases.
pid = int(f[4:-1])
if self.pid_running(pid):
continue
shutil.rmtree(path)
except Exception, e:
# TODO: introduce verbose mode for mk_logwatch
pass
#.
os_type = "linux"
try:
import platform
os_type = platform.system().lower()
except:
pass
if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
tty_red = '\033[1;31m'
tty_green = '\033[1;32m'
tty_yellow = '\033[1;33m'
tty_blue = '\033[1;34m'
tty_normal = '\033[0m'
debug = True
else:
tty_red = ''
tty_green = ''
tty_yellow = ''
tty_blue = ''
tty_normal = ''
debug = False
# The configuration file and status file are searched
# in the directory named by the environment variable
# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
# If that is not set either, the current directory ist
# used.
logwatch_dir = os.getenv("LOGWATCH_DIR")
if logwatch_dir:
mk_confdir = logwatch_dir
mk_vardir = logwatch_dir
else:
mk_confdir = os.getenv("MK_CONFDIR") or "."
mk_vardir = os.getenv("MK_VARDIR") or os.getenv("MK_STATEDIR") or "."
sys.stdout.write("<<<logwatch>>>\n")
config_filename = mk_confdir + "/logwatch.cfg"
config_dir = mk_confdir + "/logwatch.d/*.cfg"
# Determine the name of the state file
# $REMOTE set -> logwatch.state.$REMOTE
# $REMOTE not set and a tty -> logwatch.state.local
# $REMOTE not set and not a tty -> logwatch.state
remote_hostname = os.getenv("REMOTE", "")
remote_hostname = remote_hostname.replace(":", "_")
if remote_hostname != "":
status_filename = "%s/logwatch.state.%s" % (mk_vardir, remote_hostname)
else:
if sys.stdout.isatty():
status_filename = "%s/logwatch.state.local" % mk_vardir
else:
status_filename = "%s/logwatch.state" % mk_vardir
# Copy the last known state from the logwatch.state when there is no status_filename yet.
if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % mk_vardir):
import shutil
shutil.copy("%s/logwatch.state" % mk_vardir, status_filename)
def is_not_comment(line):
if line.lstrip().startswith('#') or \
line.strip() == '':
return False
return True
def parse_filenames(line):
return line.split()
def parse_pattern(level, pattern, line):
if level not in [ 'C', 'W', 'I', 'O' ]:
raise Exception("Invalid pattern line '%s'" % line)
try:
compiled = re.compile(pattern)
except:
raise Exception("Invalid regular expression in line '%s'" % line)
return (level, compiled)
def read_config():
config_lines = []
try:
config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
except IOError, e:
if debug:
raise
# Add config from a logwatch.d folder
for config_file in glob.glob(config_dir):
config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
have_filenames = False
config = []
cont_list = []
rewrite_list = []
for line in config_lines:
if line[0].isspace(): # pattern line
if not have_filenames:
raise Exception("Missing logfile names")
level, pattern = line.split(None, 1)
if level == 'A':
cont_list.append(parse_cont_pattern(pattern))
elif level == 'R':
rewrite_list.append(pattern)
else:
level, compiled = parse_pattern(level, pattern, line)
# New pattern for line matching => clear continuation and rewrite patterns
cont_list = []
rewrite_list = []
# TODO: Fix the code and remove the pragma below!
patterns.append((level, compiled, cont_list, rewrite_list)) # pylint: disable=used-before-assignment
else: # filename line
patterns = []
cont_list = [] # Clear list of continuation patterns from last file
rewrite_list = [] # Same for rewrite patterns
config.append((parse_filenames(line), patterns))
have_filenames = True
return config
def parse_cont_pattern(pattern):
try:
return int(pattern)
except:
try:
return re.compile(pattern)
except:
if debug:
raise
raise Exception("Invalid regular expression in line '%s'" % pattern)
# structure of statusfile
# # LOGFILE OFFSET INODE
# /var/log/messages|7767698|32455445
# /var/test/x12134.log|12345|32444355
def read_status():
if debug:
return {}
status = {}
for line in file(status_filename):
# TODO: Remove variants with spaces. rsplit is
# not portable. split fails if logfilename contains
# spaces
inode = -1
try:
parts = line.split('|')
filename = parts[0]
offset = parts[1]
if len(parts) >= 3:
inode = parts[2]
except:
try:
filename, offset = line.rsplit(None, 1)
except:
filename, offset = line.split(None, 1)
status[filename] = int(offset), int(inode)
return status
def save_status(status):
f = file(status_filename, "w")
for filename, (offset, inode) in status.items():
f.write("%s|%d|%d\n" % (filename, offset, inode))
pushed_back_line = None
def next_line(file_handle):
global pushed_back_line
if pushed_back_line != None:
line = pushed_back_line
pushed_back_line = None
return line
else:
try:
line = file_handle.next()
# Avoid parsing of (yet) incomplete lines (when acutal application
# is just in the process of writing)
# Just check if the line ends with a \n. This handles \n and \r\n
if not line.endswith("\n"):
begin_of_line_offset = file_handle.tell() - len(line)
os.lseek(file_handle.fileno(), begin_of_line_offset, 0)
return None
return line
except:
return None
def is_inode_cabable(path):
if "linux" in os_type:
return True
elif "windows" in os_type:
volume_name = "%s:\\\\" % path.split(":", 1)[0]
import win32api # pylint: disable=import-error
volume_info = win32api.GetVolumeInformation(volume_name)
volume_type = volume_info[-1]
if "ntfs" in volume_type.lower():
return True
else:
return False
else:
return False
def process_logfile(logfile, patterns):
global pushed_back_line
# Look at which file offset we have finished scanning
# the logfile last time. If we have never seen this file
# before, we set the offset to -1
offset, prev_inode = status.get(logfile, (-1, -1))
try:
file_desc = os.open(logfile, os.O_RDONLY)
if not is_inode_cabable(logfile):
inode = 1 # Create a dummy inode
else:
inode = os.fstat(file_desc)[1] # 1 = st_ino
except:
if debug:
raise
sys.stdout.write("[[[%s:cannotopen]]]\n" % logfile)
return
sys.stdout.write("[[[%s]]]\n" % logfile)
# Seek to the current end in order to determine file size
current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4
status[logfile] = current_end, inode
# If we have never seen this file before, we just set the
# current pointer to the file end. We do not want to make
# a fuss about ancient log messages...
if offset == -1:
if not debug:
return
else:
offset = 0
# If the inode of the logfile has changed it has appearently
# been started from new (logfile rotation). At least we must
# assume that. In some rare cases (restore of a backup, etc)
# we are wrong and resend old log messages
if prev_inode >= 0 and inode != prev_inode:
offset = 0
# Our previously stored offset is the current end ->
# no new lines in this file
if offset == current_end:
return # nothing new
# If our offset is beyond the current end, the logfile has been
# truncated or wrapped while keeping the same inode. We assume
# that it contains all new data in that case and restart from
# offset 0.
if offset > current_end:
offset = 0
# now seek to offset where interesting data begins
os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
if os_type == "windows":
import io # Available with python 2.6
import codecs
# Some windows files are encoded in utf_16
# Peak the first two bytes to determine the encoding...
peak_handle = os.fdopen(file_desc, "rb")
first_two_bytes = peak_handle.read(2)
use_encoding = None
if first_two_bytes == "\xFF\xFE":
use_encoding = "utf_16"
elif first_two_bytes == "\xFE\xFF":
use_encoding = "utf_16_be"
os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
file_handle = io.open(file_desc, encoding = use_encoding)
else:
file_handle = os.fdopen(file_desc)
worst = -1
outputtxt = ""
lines_parsed = 0
start_time = time.time()
while True:
line = next_line(file_handle)
if line == None:
break # End of file
# Handle option maxlinesize
if opt_maxlinesize != None and len(line) > opt_maxlinesize:
line = line[:opt_maxlinesize] + "[TRUNCATED]\n"
lines_parsed += 1
# Check if maximum number of new log messages is exceeded
if opt_maxlines != None and lines_parsed > opt_maxlines:
outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
opt_overflow, opt_maxlines)
worst = max(worst, opt_overflow_level)
os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
break
# Check if maximum processing time (per file) is exceeded. Check only
# every 100'th line in order to save system calls
if opt_maxtime != None and lines_parsed % 100 == 10 \
and time.time() - start_time > opt_maxtime:
outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
opt_overflow, opt_maxtime)
worst = max(worst, opt_overflow_level)
os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
break
level = "."
for lev, pattern, cont_patterns, replacements in patterns:
matches = pattern.search(line[:-1])
if matches:
level = lev
levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
worst = max(levelint, worst)
# Check for continuation lines
for cont_pattern in cont_patterns:
if type(cont_pattern) == int: # add that many lines
for _unused_x in range(cont_pattern):
cont_line = next_line(file_handle)
if cont_line == None: # end of file
break
line = line[:-1] + "\1" + cont_line
else: # pattern is regex
while True:
cont_line = next_line(file_handle)
if cont_line == None: # end of file
break
elif cont_pattern.search(cont_line[:-1]):
line = line[:-1] + "\1" + cont_line
else:
pushed_back_line = cont_line # sorry for stealing this line
break
# Replacement
for replace in replacements:
line = replace.replace('\\0', line.rstrip()) + "\n"
for nr, group in enumerate(matches.groups()):
line = line.replace('\\%d' % (nr+1), group)
break # matching rule found and executed
color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
if debug:
line = line.replace("\1", "\nCONT:")
if level == "I":
level = "."
if opt_nocontext and level == '.':
continue
outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4
status[logfile] = new_offset, inode
# output all lines if at least one warning, error or ok has been found
if worst > -1:
sys.stdout.write(outputtxt)
sys.stdout.flush()
# Handle option maxfilesize, regardless of warning or errors that have happened
if opt_maxfilesize != None and (offset / opt_maxfilesize) < (new_offset / opt_maxfilesize):
sys.stdout.write("%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
(tty_yellow, opt_maxfilesize, new_offset / opt_maxfilesize, tty_normal))
try:
# This removes leftover folders which may be generated by crashing frozen binaries
folder_cleaner = MEIFolderCleaner()
folder_cleaner.find_and_remove_leftover_folders(hint_filenames = ["mk_logwatch.exe.manifest"])
except Exception, e:
sys.stdout.write("ERROR WHILE DOING FOLDER: %s\n" % e)
sys.exit(1)
try:
config = read_config()
except Exception, e:
if debug:
raise
sys.stdout.write("CANNOT READ CONFIG FILE: %s\n" % e)
sys.exit(1)
# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
# with an empty status. That keeps the monitoring up and running - even if we might lose a
# message in the extreme case of a corrupted status file.
try:
status = read_status()
except Exception, e:
status = {}
logfile_patterns = {}
# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
for filenames, patterns in config:
# Initialize options with default values
opt_maxlines = None
opt_maxtime = None
opt_maxlinesize = None
opt_maxfilesize = None
opt_regex = None
opt_overflow = 'C'
opt_overflow_level = 2
opt_nocontext = False
try:
options = [ o.split('=', 1) for o in filenames if '=' in o ]
for key, value in options:
if key == 'maxlines':
opt_maxlines = int(value)
elif key == 'maxtime':
opt_maxtime = float(value)
elif key == 'maxlinesize':
opt_maxlinesize = int(value)
elif key == 'maxfilesize':
opt_maxfilesize = int(value)
elif key == 'overflow':
if value not in [ 'C', 'I', 'W', 'O' ]:
raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
opt_overflow = value
opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
elif key == 'regex':
opt_regex = re.compile(value)
elif key == 'iregex':
opt_regex = re.compile(value, re.I)
elif key == 'nocontext':
opt_nocontext = True
else:
raise Exception("Invalid option %s" % key)
except Exception, e:
if debug:
raise
sys.stdout.write("INVALID CONFIGURATION: %s\n" % e)
sys.exit(1)
for glob_pattern in filenames:
if '=' in glob_pattern:
continue
logfiles = glob.glob(glob_pattern)
if opt_regex:
logfiles = [ f for f in logfiles if opt_regex.search(f) ]
if len(logfiles) == 0:
sys.stdout.write('[[[%s:missing]]]\n' % glob_pattern)
else:
for logfile in logfiles:
logfile_patterns[logfile] = logfile_patterns.get(logfile, []) + patterns
for logfile, patterns in logfile_patterns.items():
process_logfile(logfile, patterns)
if not debug:
save_status(status)

View File

@ -0,0 +1,71 @@
#!/bin/bash
# File ownership check for Check_MK
# Ensures that no files outside of homedirs are owned by administrative users
# Installed by BLSE 2.x ansible
ADMIN_UID_MIN=200
ADMIN_UID_MAX=599
# http://www.debian.org/doc/debian-policy/ch-opersys.html
# 0-99: Globally allocated by the Debian project
# 100-199: (BLSE) Dynamically allocated system users and groups
# 200-299: (BLSE) BLSE service users and groups
# 300-499: (BLSE) reserved
# 500-599: (BLSE) system administrators
# 600-999: (BLSE) Dynamically allocated system users and groups
# 64045: (BLSE) ceph
function is_element_of {
local TO_FIND=$1
shift
for ARRAY_ELEMENT in $*
do
if test $TO_FIND = $ARRAY_ELEMENT
then
return 0
fi
done
return 1
}
OK=0
WARNING=1
FILESYSTEMs=(/ /var/log)
MOUNTs=($(awk '{print $2}' '/proc/mounts'))
FILEs=()
for FILESYSTEM in ${FILESYSTEMs[@]}; do
while IFS= read -r -d $'\0' FILE
do
if ! is_element_of "$FILE" ${FILESYSTEMs[*]}; then
if is_element_of $FILE ${MOUNTs[*]}; then
continue
fi
fi
FILEs+=($FILE)
done < <( find ${FILESYSTEM} -xdev -uid +${ADMIN_UID_MIN} -uid -${ADMIN_UID_MAX} \
-not \( -type d -a \( -path /media -o -path /mnt \) \) \
-not \( -name '.*.swp' -a -mtime -3 \) \
-not \( -path '*/.git' -o -path '*/.git/*' \) \
-not \( -path '*.dirtrack.Storable' \) \
-not \( -path '/home/*' \) \
-not \( -path '/tmp/*' \) \
-not \( -path '/var/home/*' \) \
-not \( -path '/var/log/gitlab/*' \) \
-not \( -path '/var/spool/cron/crontabs/*' \) \
-print0 2>/dev/null )
done
echo "<<<file_ownership>>>"
if ! test ${#FILEs[*]} -eq 0; then
echo -n "${#FILEs[*]} file(s) found with invalid ownership (must be UID outside of ${ADMIN_UID_MIN}-${ADMIN_UID_MAX}): "
echo "${FILEs[*]}"
exit $WARNING
else
echo "All files have valid ownership"
exit $OK
fi

Binary file not shown.