Initial commit of PVC Ansible role

This commit is contained in:
2023-09-01 15:42:19 -04:00
commit 6dfaf433dc
92 changed files with 4709 additions and 0 deletions

13
roles/pvc/README.md Normal file
View File

@ -0,0 +1,13 @@
# package-pvc
This package configures the PVC virtual cluster system.
# Supplemental variables
## Configurable
### `ceph_storage_secret_key`: The Ceph storage secret key in base64 format.
* Should be obtained from Ceph cluster.
### `ceph_storage_secret_uuid`: A UUID for the Ceph secret in libvirt.
* Should be unique per cluster.

View File

@ -0,0 +1,57 @@
---
# Ceph storage
ceph_storage_secret_key: ""
ceph_storage_secret_uuid: ""
# Database
pvc_dns_database_name: "pvcdns"
pvc_dns_database_user: "pvcdns"
pvc_dns_database_password: "PVCdnsPassw0rd"
# Coordinators
pvc_nodes:
- hostname: "pvc1"
is_coordinator: yes
node_id: 1
router_id: "10.0.0.1"
cluster_ip: "by-id"
storage_ip: "by-id"
upstream_ip: ""
ipmi_host: "pvc1-lom"
ipmi_user: ""
ipmi_password: ""
- hostname: "pvc2"
is_coordinator: yes
node_id: 2
router_id: "10.0.0.2"
cluster_ip: "by-id"
storage_ip: "by-id"
upstream_ip: ""
ipmi_host: "pvc2-lom"
ipmi_user: ""
ipmi_password: ""
- hostname: "pvc3"
is_coordinator: yes
node_id: 3
router_id: "10.0.0.3"
cluster_ip: "by-id"
storage_ip: "by-id"
upstream_ip: ""
ipmi_host: "pvc3-lom"
ipmi_user: ""
ipmi_password: ""
# Networks
pvc_asn: "65001"
pvc_routers:
- ""
pvc_cluster_device: "eth0"
pvc_cluster_domain: "pvc.local"
pvc_cluster_subnet: "10.0.0.0/24"
pvc_cluster_floatingip: "10.0.0.251/24"
pvc_storage_device: "eth1"
pvc_storage_domain: "pvc.storage"
pvc_storage_subnet: "10.0.1.0/24"
pvc_storage_floatingip: "10.0.1.251/24"
pvc_upstream_device: "eth2"
pvc_upstream_domain: ""
pvc_upstream_subnet: ""
pvc_upstream_floatingip: ""
pvc_upstream_gatewayip: ""

View File

@ -0,0 +1,90 @@
#!/bin/bash
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# gets optional socket as argument
function do_query() {
INSTANCE=$(echo $1|awk -v FS="=" '{print $2}')
COUNT=$(ps -efww | grep [/]usr/sbin/mysqld | grep socket | wc -l)
if [ $COUNT -gt 1 ]
then
INSTANCE_NAME=$(ps -efww|grep socket|grep "${INSTANCE}"|grep "[u]ser" | sed -ne 's/.*socket=\([^.]*\).*/\1/p')
INSTANCE_NAME="[[${INSTANCE_NAME##*/}]]"
else
INSTANCE_NAME="[[$(ps -efww|grep socket|grep "${INSTANCE}"|grep "[u]ser" | sed -ne 's/.*user=\([^ ]*\).*/\1/p')]]"
fi
# Check if mysqld is running and root password setup
echo "<<<mysql_ping>>>"
echo $INSTANCE_NAME
mysqladmin --defaults-extra-file=/root/.my.cnf $1 ping 2>&1
if [ $? -eq 0 ]; then
echo "<<<mysql>>>"
echo $INSTANCE_NAME
mysql --defaults-extra-file=/root/.my.cnf $1 -sN \
-e "show global status ; show global variables ;"
echo "<<<mysql_capacity>>>"
echo $INSTANCE_NAME
mysql --defaults-extra-file=/root/.my.cnf $1 -sN \
-e "SELECT table_schema, sum(data_length + index_length), sum(data_free)
FROM information_schema.TABLES GROUP BY table_schema"
echo "<<<mysql_slave>>>"
echo $INSTANCE_NAME
mysql --defaults-extra-file=/root/.my.cnf $1 -s \
-e "show slave status\G"
fi
}
if which mysqladmin >/dev/null
then
mysql_sockets=$(fgrep socket /root/.my.cnf|sed -ne 's/.*socket=\([^ ]*\).*/\1/p')
if [ -z "$mysql_sockets" ] ; then
mysql_sockets=$(ps -efww | grep mysqld | grep "[s]ocket" | sed -ne 's/.*socket=\([^ ]*\).*/\1/p')
fi
if [ -z "$mysql_sockets" ] ; then
do_query ""
else
for socket in $mysql_sockets ; do
do_query "--socket="$socket
done
fi
#echo "<<<mysql_version>>>"
#mysql -V
echo "<<<mysql_port>>>"
ps -efww|grep mysqld|while read LINE; do echo $LINE|grep "[u]ser" | sed -ne 's/.*user=\([^ ]*\).*/\1/p'; echo $LINE|grep mysqld | grep "[p]ort"|sed -ne 's/.*port=\([^ ]*\).*/\1/p' ; done|xargs -n2
#echo "<<<mysql_instances>>>"
#mysql --defaults-extra-file=/root/.my.cnf $1 -s \
# -e "show INSTANCES"
fi

485
roles/pvc/files/patroni/postgres Executable file
View File

@ -0,0 +1,485 @@
#!/bin/bash
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2015 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# TODO postgres_connections output format
# .--common funcs--------------------------------------------------------.
# | __ |
# | ___ ___ _ __ ___ _ __ ___ ___ _ __ / _|_ _ _ __ ___ ___ |
# | / __/ _ \| '_ ` _ \| '_ ` _ \ / _ \| '_ \ | |_| | | | '_ \ / __/ __| |
# || (_| (_) | | | | | | | | | | | (_) | | | || _| |_| | | | | (__\__ \ |
# | \___\___/|_| |_| |_|_| |_| |_|\___/|_| |_||_| \__,_|_| |_|\___|___/ |
# | |
# '----------------------------------------------------------------------'
function compare_version_greater_equal() {
local GREATER_ONE
GREATER_ONE=$(echo "$1 $2" | awk '{if ($1 >= $2) print $1; else print $2}')
if [ "$GREATER_ONE" == "$1" ] ; then
return 0
else
return 1
fi
}
#.
# .--section funcs-------------------------------------------------------.
# | _ _ __ |
# | ___ ___ ___| |_(_) ___ _ __ / _|_ _ _ __ ___ ___ |
# | / __|/ _ \/ __| __| |/ _ \| '_ \ | |_| | | | '_ \ / __/ __| |
# | \__ \ __/ (__| |_| | (_) | | | | | _| |_| | | | | (__\__ \ |
# | |___/\___|\___|\__|_|\___/|_| |_| |_| \__,_|_| |_|\___|___/ |
# | |
# '----------------------------------------------------------------------'
function postgres_instances() {
echo '<<<postgres_instances>>>'
# If we have no instances we take db id (pqsql/postgres) because
# ps output may be unreadable
# In case of instances ps output shows them readable
if [ ! -z "${1}" ]; then
echo "[[[${1}]]]"
fi
pgrep -laf bin/postgres
}
function postgres_sessions() {
# Postgres 9.2 uses 'query' instead of 'current_query'
local OUTPUT
OUTPUT="$(echo "\echo '<<<postgres_sessions>>>${INSTANCE_SECTION}'
SELECT (
SELECT column_name
FROM information_schema.columns
WHERE table_name='pg_stat_activity' AND column_name in ('query', 'current_query')
) = '<IDLE>' as query, count(*)
FROM pg_stat_activity
GROUP BY (query = '<IDLE>');" |\
sudo -u "$DBUSER" $export_PGPASSFILE $psql -X --variable ON_ERROR_STOP=1 -d $PGDATABASE ${EXTRA_ARGS} -A -t -F' ' 2>/dev/null)"
echo "$OUTPUT"
# line with number of idle sessions is sometimes missing on Postgres 8.x. This can lead
# to an altogether empty section and thus the check disappearing.
echo "$OUTPUT" | grep -q '^t ' || echo "t 0"
}
function postgres_simple_queries() {
# Querytime
# Supports versions >= 8.3, > 9.1
local QUERYTIME_QUERY
if compare_version_greater_equal "$POSTGRES_VERSION" "9.2" ; then
QUERYTIME_QUERY="SELECT datname, datid, usename, client_addr, state AS state, COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0) AS seconds,
pid, regexp_replace(query, E'[\\n\\r\\u2028]+', ' ', 'g' ) AS current_query FROM pg_stat_activity WHERE (query_start IS NOT NULL AND (state NOT LIKE 'idle%' OR state IS NULL)) ORDER BY query_start, pid DESC;"
else
QUERYTIME_QUERY="SELECT datname, datid, usename, client_addr, '' AS state, COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0) AS seconds,
procpid as pid, regexp_replace(current_query, E'[\\n\\r\\u2028]+', ' ', 'g' ) AS current_query FROM pg_stat_activity WHERE (query_start IS NOT NULL AND current_query NOT LIKE '<IDLE>%') ORDER BY query_start, procpid DESC;"
fi
# Number of current connections per database
# We need to output the databases, too.
# This query does not report databases without an active query
local CONNECTIONS_QUERY
if compare_version_greater_equal "$POSTGRES_VERSION" "9.2" ; then
CONNECTIONS_QUERY="SELECT COUNT(datid) AS current,
(SELECT setting AS mc FROM pg_settings WHERE name = 'max_connections') AS mc,
d.datname
FROM pg_database d
LEFT JOIN pg_stat_activity s ON (s.datid = d.oid) WHERE state <> 'idle'
GROUP BY 2,3
ORDER BY datname;"
else
CONNECTIONS_QUERY="SELECT COUNT(datid) AS current,
(SELECT setting AS mc FROM pg_settings WHERE name = 'max_connections') AS mc,
d.datname
FROM pg_database d
LEFT JOIN pg_stat_activity s ON (s.datid = d.oid) WHERE current_query <> '<IDLE>'
GROUP BY 2,3
ORDER BY datname;"
fi
echo "\pset footer off
\echo '<<<postgres_stat_database:sep(59)>>>${INSTANCE_SECTION}'
SELECT datid, datname, numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, pg_database_size(datname) AS datsize FROM pg_stat_database;
\echo '<<<postgres_locks:sep(59)>>>${INSTANCE_SECTION}'
\echo '[databases_start]'
$ECHO_DATABASES
\echo '[databases_end]'
SELECT datname, granted, mode FROM pg_locks l RIGHT JOIN pg_database d ON (d.oid=l.database) WHERE d.datallowconn;
\echo '<<<postgres_query_duration:sep(59)>>>${INSTANCE_SECTION}'
\echo '[databases_start]'
$ECHO_DATABASES
\echo '[databases_end]'
$QUERYTIME_QUERY
\echo '<<<postgres_connections:sep(59)>>>${INSTANCE_SECTION}'
\echo '[databases_start]'
$ECHO_DATABASES
\echo '[databases_end]'
$CONNECTIONS_QUERY" \
| sudo -u "$DBUSER" $export_PGPASSFILE $psql -X -d $PGDATABASE ${EXTRA_ARGS} -q -A -F';'
}
function postgres_stats() {
# Contains last vacuum time and analyze time
local LASTVACUUM="SELECT current_database() AS datname, nspname AS sname, relname AS tname,
CASE WHEN v IS NULL THEN -1 ELSE round(extract(epoch FROM v)) END AS vtime,
CASE WHEN g IS NULL THEN -1 ELSE round(extract(epoch FROM v)) END AS atime
FROM (SELECT nspname, relname, GREATEST(pg_stat_get_last_vacuum_time(c.oid), pg_stat_get_last_autovacuum_time(c.oid)) AS v,
GREATEST(pg_stat_get_last_analyze_time(c.oid), pg_stat_get_last_autoanalyze_time(c.oid)) AS g
FROM pg_class c, pg_namespace n
WHERE relkind = 'r' AND n.oid = c.relnamespace AND n.nspname <> 'information_schema'
ORDER BY 3) AS foo;"
local FIRST=
local QUERY="\pset footer off
BEGIN;
SET statement_timeout=30000;
COMMIT;
\echo '<<<postgres_stats:sep(59)>>>${INSTANCE_SECTION}'
\echo '[databases_start]'
$ECHO_DATABASES
\echo '[databases_end]'"
for db in $DATABASES ; do
QUERY="$QUERY
\c $db
$LASTVACUUM
"
if [ -z $FIRST ] ; then
FIRST=false
QUERY="$QUERY
\pset tuples_only on
"
fi
done
echo "$QUERY" | sudo -u "$DBUSER" $export_PGPASSFILE $psql -X ${EXTRA_ARGS} -q -A -F';' | grep -v -e 'COMMIT$' -e 'SET$' -e 'BEGIN$'
}
function postgres_version() {
# Postgres version an connection time
echo -e "<<<postgres_version:sep(1)>>>${INSTANCE_SECTION}"
(TIMEFORMAT='%3R'; time echo "SELECT version() AS v" |\
sudo -u "$DBUSER" $export_PGPASSFILE $psql -X -d $PGDATABASE ${EXTRA_ARGS} -t -A -F';'; echo -e "<<<postgres_conn_time>>>${INSTANCE_SECTION}") 2>&1
}
function postgres_bloat() {
# Bloat index and tables
# Supports versions <9.0, >=9.0
# This huge query has been gratefully taken from Greg Sabino Mullane's check_postgres.pl
local BLOAT_QUERY
if compare_version_greater_equal "$POSTGRES_VERSION" "9.0" ; then
BLOAT_QUERY="SELECT
current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta,
ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat,
CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages,
CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes,
CASE WHEN relpages < otta THEN 0 ELSE (bs*(relpages-otta))::bigint END AS wastedsize,
iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta,
ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat,
CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages,
CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes,
CASE WHEN ipages < iotta THEN 0 ELSE (bs*(ipages-iotta))::bigint END AS wastedisize,
CASE WHEN relpages < otta THEN
CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END
ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint)
ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END
END AS totalwastedbytes
FROM (
SELECT
nn.nspname AS schemaname,
cc.relname AS tablename,
COALESCE(cc.reltuples,0) AS reltuples,
COALESCE(cc.relpages,0) AS relpages,
COALESCE(bs,0) AS bs,
COALESCE(CEIL((cc.reltuples*((datahdr+ma-
(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta,
COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols
FROM
pg_class cc
JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema'
LEFT JOIN
(
SELECT
ma,bs,foo.nspname,foo.relname,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM (
SELECT
ns.nspname, tbl.relname, hdr, ma, bs,
SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth,
MAX(coalesce(null_frac,0)) AS maxfracsum,
hdr+(
SELECT 1+count(*)/8
FROM pg_stats s2
WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname
) AS nullhdr
FROM pg_attribute att
JOIN pg_class tbl ON att.attrelid = tbl.oid
JOIN pg_namespace ns ON ns.oid = tbl.relnamespace
LEFT JOIN pg_stats s ON s.schemaname=ns.nspname
AND s.tablename = tbl.relname
AND s.inherited=false
AND s.attname=att.attname,
(
SELECT
(SELECT current_setting('block_size')::numeric) AS bs,
CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\[0-9]+.[0-9]+#\%' for '#')
IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma
FROM (SELECT version() AS v) AS foo
) AS constants
WHERE att.attnum > 0 AND tbl.relkind='r'
GROUP BY 1,2,3,4,5
) AS foo
) AS rs
ON cc.relname = rs.relname AND nn.nspname = rs.nspname
LEFT JOIN pg_index i ON indrelid = cc.oid
LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
) AS sml
WHERE sml.relpages - otta > 0 OR ipages - iotta > 10 ORDER BY totalwastedbytes DESC LIMIT 10;"
else
BLOAT_QUERY="SELECT
current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta,
ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat,
CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages,
CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes,
CASE WHEN relpages < otta THEN '0 bytes'::text ELSE (bs*(relpages-otta))::bigint || ' bytes' END AS wastedsize,
iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta,
ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat,
CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages,
CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes,
CASE WHEN ipages < iotta THEN '0 bytes' ELSE (bs*(ipages-iotta))::bigint || ' bytes' END AS wastedisize,
CASE WHEN relpages < otta THEN
CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END
ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint)
ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END
END AS totalwastedbytes
FROM (
SELECT
nn.nspname AS schemaname,
cc.relname AS tablename,
COALESCE(cc.reltuples,0) AS reltuples,
COALESCE(cc.relpages,0) AS relpages,
COALESCE(bs,0) AS bs,
COALESCE(CEIL((cc.reltuples*((datahdr+ma-
(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta,
COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols
FROM
pg_class cc
JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema'
LEFT JOIN
(
SELECT
ma,bs,foo.nspname,foo.relname,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM (
SELECT
ns.nspname, tbl.relname, hdr, ma, bs,
SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth,
MAX(coalesce(null_frac,0)) AS maxfracsum,
hdr+(
SELECT 1+count(*)/8
FROM pg_stats s2
WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname
) AS nullhdr
FROM pg_attribute att
JOIN pg_class tbl ON att.attrelid = tbl.oid
JOIN pg_namespace ns ON ns.oid = tbl.relnamespace
LEFT JOIN pg_stats s ON s.schemaname=ns.nspname
AND s.tablename = tbl.relname
AND s.attname=att.attname,
(
SELECT
(SELECT current_setting('block_size')::numeric) AS bs,
CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\"[0-9]+.[0-9]+#\"%' for '#')
IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma
FROM (SELECT version() AS v) AS foo
) AS constants
WHERE att.attnum > 0 AND tbl.relkind='r'
GROUP BY 1,2,3,4,5
) AS foo
) AS rs
ON cc.relname = rs.relname AND nn.nspname = rs.nspname
LEFT JOIN pg_index i ON indrelid = cc.oid
LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
) AS sml
WHERE sml.relpages - otta > 0 OR ipages - iotta > 10 ORDER BY totalwastedbytes DESC LIMIT 10;"
fi
local FIRST=
local QUERY="\pset footer off
\echo '<<<postgres_bloat:sep(59)>>>${INSTANCE_SECTION}'
\echo '[databases_start]'
$ECHO_DATABASES
\echo '[databases_end]'"
for db in $DATABASES ; do
QUERY="$QUERY
\c $db
$BLOAT_QUERY
"
if [ -z $FIRST ] ; then
FIRST=false
QUERY="$QUERY
\pset tuples_only on
"
fi
done
echo "$QUERY" | sudo -u "$DBUSER" $export_PGPASSFILE $psql -X ${EXTRA_ARGS} -q -A -F';'
}
#.
# .--main----------------------------------------------------------------.
# | _ |
# | _ __ ___ __ _(_)_ __ |
# | | '_ ` _ \ / _` | | '_ \ |
# | | | | | | | (_| | | | | | |
# | |_| |_| |_|\__,_|_|_| |_| |
# | |
# '----------------------------------------------------------------------'
### postgres.cfg ##
# DBUSER=OS_USER_NAME
# INSTANCE=/home/postgres/db1.env:USER_NAME:/PATH/TO/.pgpass
# INSTANCE=/home/postgres/db2.env:USER_NAME:/PATH/TO/.pgpass
# TODO @dba USERNAME in .pgpass ?
# INSTANCE=/home/postgres/db2.env:/PATH/TO/.pgpass
function postgres_main() {
if [ -z "$DBUSER" ] || [ -z "$PGDATABASE" ] ; then
exit 0
fi
EXTRA_ARGS=""
if [ ! -z "$PGUSER" ]; then
EXTRA_ARGS=$EXTRA_ARGS" -U $PGUSER"
fi
if [ ! -z "$PGPORT" ]; then
EXTRA_ARGS=$EXTRA_ARGS" -p $PGPORT"
fi
if [ ! -z "$PGPASSFILE" ]; then
export_PGPASSFILE="export PGPASSFILE=$PGPASSFILE; "
fi
DATABASES="$(echo "SELECT datname FROM pg_database WHERE datistemplate = false;" |\
sudo -u "$DBUSER" $export_PGPASSFILE $psql -X -d $PGDATABASE ${EXTRA_ARGS} -t -A -F';')"
ECHO_DATABASES="$(echo "$DATABASES" | sed 's/^/\\echo /')"
POSTGRES_VERSION=$(sudo -u "$DBUSER" $psql -X -V -d $PGDATABASE ${EXTRA_ARGS} | egrep -o '[0-9]{1,}\.[0-9]{1,}')
postgres_sessions
postgres_simple_queries
#postgres_stats
postgres_version
postgres_bloat
}
MK_CONFFILE=$MK_CONFDIR/postgres.cfg
if [ -e "$MK_CONFFILE" ]; then
postgres_instances
DBUSER=$(grep DBUSER "$MK_CONFFILE" | sed 's/.*=//g')
cat "$MK_CONFFILE" | while read line
do
case $line in
INSTANCE*)
instance=$line
;;
*)
instance=
;;
esac
if [ ! -z "$instance" ]; then
instance_path=$(echo "$instance" | sed 's/.*=\(.*\):.*:.*$/\1/g')
instance_name=$(echo "$instance_path" | sed -e 's/.*\/\(.*\)/\1/g' -e 's/\.env$//g')
if [ ! -z "$instance_name" ]; then
INSTANCE_SECTION="\n[[[$instance_name]]]"
else
INSTANCE_SECTION=""
fi
psql="/$DBUSER/$(grep "^export PGVERSION=" "$instance_path" |
sed -e 's/.*=//g' -e 's/\s*#.*$//g')/bin/psql"
PGUSER=$(echo "$instance" | sed 's/.*=.*:\(.*\):.*$/\1/g')
PGPASSFILE="$(echo "$instance" | sed 's/.*=.*:.*:\(.*\)$/\1/g')"
PGDATABASE=$(grep "^export PGDATABASE=" "$instance_path" |
sed -e 's/.*=//g' -e 's/\s*#.*$//g')
PGPORT=$(grep "^export PGPORT=" "$instance_path" |
sed -e 's/.*=//g' -e 's/\s*#.*$//g')
# Fallback
if [ ! -f "$psql" ]; then
psql="$(cat $instance_path | grep "^export PGHOME=" |
sed -e 's/.*=//g' -e 's/\s*#.*$//g')/psql"
fi
postgres_main
fi
done
else
if id pgsql >/dev/null 2>&1; then
DBUSER=pgsql
elif id postgres >/dev/null 2>&1; then
DBUSER=postgres
else
exit 0
fi
INSTANCE_SECTION=""
postgres_instances "$DBUSER"
psql="psql"
PGDATABASE=postgres
postgres_main
fi

View File

@ -0,0 +1,94 @@
CREATE TABLE domains (
id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
master VARCHAR(128) DEFAULT NULL,
last_check INT DEFAULT NULL,
type VARCHAR(6) NOT NULL,
notified_serial INT DEFAULT NULL,
account VARCHAR(40) DEFAULT NULL,
CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT)))
);
CREATE UNIQUE INDEX name_index ON domains(name);
CREATE TABLE records (
id BIGSERIAL PRIMARY KEY,
domain_id INT DEFAULT NULL,
name VARCHAR(255) DEFAULT NULL,
type VARCHAR(10) DEFAULT NULL,
content VARCHAR(65535) DEFAULT NULL,
ttl INT DEFAULT NULL,
prio INT DEFAULT NULL,
disabled BOOL DEFAULT 'f',
ordername VARCHAR(255),
auth BOOL DEFAULT 't',
CONSTRAINT domain_exists
FOREIGN KEY(domain_id) REFERENCES domains(id)
ON DELETE CASCADE,
CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT)))
);
CREATE INDEX rec_name_index ON records(name);
CREATE INDEX nametype_index ON records(name,type);
CREATE INDEX domain_id ON records(domain_id);
CREATE INDEX recordorder ON records (domain_id, ordername text_pattern_ops);
CREATE TABLE supermasters (
ip INET NOT NULL,
nameserver VARCHAR(255) NOT NULL,
account VARCHAR(40) NOT NULL,
PRIMARY KEY(ip, nameserver)
);
CREATE TABLE comments (
id SERIAL PRIMARY KEY,
domain_id INT NOT NULL,
name VARCHAR(255) NOT NULL,
type VARCHAR(10) NOT NULL,
modified_at INT NOT NULL,
account VARCHAR(40) DEFAULT NULL,
comment VARCHAR(65535) NOT NULL,
CONSTRAINT domain_exists
FOREIGN KEY(domain_id) REFERENCES domains(id)
ON DELETE CASCADE,
CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT)))
);
CREATE INDEX comments_domain_id_idx ON comments (domain_id);
CREATE INDEX comments_name_type_idx ON comments (name, type);
CREATE INDEX comments_order_idx ON comments (domain_id, modified_at);
CREATE TABLE domainmetadata (
id SERIAL PRIMARY KEY,
domain_id INT REFERENCES domains(id) ON DELETE CASCADE,
kind VARCHAR(32),
content TEXT
);
CREATE INDEX domainidmetaindex ON domainmetadata(domain_id);
CREATE TABLE cryptokeys (
id SERIAL PRIMARY KEY,
domain_id INT REFERENCES domains(id) ON DELETE CASCADE,
flags INT NOT NULL,
active BOOL,
content TEXT
);
CREATE INDEX domainidindex ON cryptokeys(domain_id);
CREATE TABLE tsigkeys (
id SERIAL PRIMARY KEY,
name VARCHAR(255),
algorithm VARCHAR(50),
secret VARCHAR(255),
CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT)))
);
CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm);

View File

@ -0,0 +1,25 @@
---
- name: restart zookeeper
service:
name: zookeeper
state: restarted
- name: restart libvirtd
service:
name: libvirtd
state: restarted
- name: restart frr
service:
name: frr
state: restarted
- name: restart patroni
service:
name: patroni
state: restarted
- name: restart pvcd
service:
name: pvcd
state: restarted

48
roles/pvc/tasks/ceph.yml Normal file
View File

@ -0,0 +1,48 @@
---
- name: create ceph group
group:
name: ceph
gid: 64046
state: present
- name: install packages
apt:
name:
- ceph-osd
- ceph-mds
- ceph-mon
- ceph-mgr
- radosgw
- libjemalloc2
state: latest
- name: install sysctl tweaks
template:
src: ceph/sysctl.conf.j2
dest: /etc/sysctl.d/pvc-ceph.conf
- name: activate sysctl tweaks
command: sysctl -p /etc/sysctl.d/pvc-ceph.conf
- name: install user limits overrides
template:
src: ceph/limits.conf.j2
dest: /etc/security/limits.d/99-pvc-ceph.conf
- name: install ceph default config
template:
src: ceph/default.conf.j2
dest: /etc/default/ceph
- name: create ceph configuration directory
file:
dest: /etc/ceph
state: directory
- name: install ceph cluster configurations
template:
src: ceph/{{ item }}.j2
dest: /etc/ceph/{{ item }}
with_items:
- ceph.conf
- ceph.client.admin.keyring

23
roles/pvc/tasks/frr.yml Normal file
View File

@ -0,0 +1,23 @@
---
- name: install frr packages
apt:
name:
- frr
state: latest
- name: install frr configuration
template:
src: frr/{{ item }}.j2
dest: /etc/frr/{{ item }}
with_items:
- daemons
- frr.conf
notify: restart frr
ignore_errors: true
- name: disable services
service:
name: "{{ item }}"
enabled: no
with_items:
- frr

View File

@ -0,0 +1,43 @@
---
- name: install libvirt packages
apt:
name:
- libvirt-daemon-system
- qemu-kvm
- qemu-utils
- qemu-block-extra
- vhostmd
- ceph-common
- libjemalloc2
state: latest
- name: install libvirt configuration
template:
src: libvirt/{{ item }}.j2
dest: /etc/libvirt/{{ item }}
with_items:
- libvirtd.conf
- ceph-secret.xml
notify: restart libvirtd
- name: define ceph secret
command: virsh secret-define /etc/libvirt/ceph-secret.xml
ignore_errors: true
- name: set ceph secret value
command: virsh secret-set-value --secret {{ ceph_storage_secret_uuid }} --base64 {{ ceph_storage_secret_key }}
ignore_errors: true
- name: configure libvirt for listening
replace:
dest: /etc/default/libvirtd
regexp: '#libvirtd_opts=""'
replace: 'libvirtd_opts="--listen"'
notify: restart libvirtd
- name: disable services
service:
name: "{{ item }}"
enabled: no
with_items:
- libvirtd

26
roles/pvc/tasks/main.yml Normal file
View File

@ -0,0 +1,26 @@
---
- name: add module blacklist
template:
src: system/blacklist.j2
dest: /etc/modprobe.d/blacklist.conf
- include_tasks: ceph.yml
tags: pvc-ceph
- include_tasks: zookeeper.yml
tags: pvc-zookeeper
- include_tasks: libvirt.yml
tags: pvc-libvirt
- include_tasks: frr.yml
tags: pvc-frr
- include_tasks: patroni.yml
tags: pvc-patroni
- include_tasks: pvc.yml
tags: pvc-pvc
run_once: true
delegate_to: "{{ item }}"
with_items: "{{ play_hosts }}"

128
roles/pvc/tasks/patroni.yml Normal file
View File

@ -0,0 +1,128 @@
---
- name: install patroni packages via apt
apt:
name:
- python-psycopg2
- python3-kazoo
- patroni
- postgresql-11
state: latest
update-cache: yes
- name: first run check
shell: "echo 'bootstrapped' > /etc/postgresql/pvc"
register: newinstance
args:
creates: /etc/postgresql/pvc
- name: stop and disable postgresql
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items:
- postgresql
- postgresql@11-main
when: newinstance.changed
- name: remove obsolete database directories
file:
dest: "{{ item }}"
state: absent
with_items:
- /etc/postgresql/11
- /var/lib/postgresql/11
when: newinstance.changed
- name: create patroni database directory
file:
dest: /var/lib/postgresql/patroni/pvc
state: directory
owner: postgres
mode: 0700
when: newinstance.changed
- name: install postgresql customization configuration file
template:
src: patroni/postgresql.pvc.conf.j2
dest: /etc/postgresql/postgresql.pvc.conf
owner: postgres
group: sudo
mode: 0640
notify: restart patroni
- name: install patroni configuration file
template:
src: patroni/patroni.yml.j2
dest: /etc/patroni/config.yml
owner: postgres
group: postgres
mode: 0640
notify: restart patroni
- name: install check_mk agent check
copy:
src: patroni/postgres
dest: /usr/lib/check_mk_agent/plugins/postgres
mode: 0755
- name: ensure patroni services are enabled and started
service:
name: "{{ item }}.service"
state: started
enabled: yes
with_items:
- patroni
- name: install initial schema files
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: postgres
group: sudo
mode: 0640
with_items:
- { src: "patroni/powerdns-schema.sql", dest: "/etc/postgresql/powerdns-schema.sql" }
- name: set up PVC DNS database on first host
block:
- name: wait 15s for cluster to initialize
pause:
seconds: 15
- name: create user for role
postgresql_user:
name: "{{ pvc_dns_database_user }}"
password: "{{ pvc_dns_database_password }}"
state: present
login_host: /run/postgresql
- name: create database for role
postgresql_db:
name: "{{ pvc_dns_database_name }}"
owner: "{{ pvc_dns_database_user }}"
encoding: utf8
state: present
login_host: /run/postgresql
- name: set user privs for role
postgresql_user:
name: "{{ pvc_dns_database_user }}"
db: "{{ pvc_dns_database_name }}"
priv: ALL
login_host: /run/postgresql
- name: create extensions
postgresql_ext:
name: "{{ item }}"
db: "{{ pvc_dns_database_name }}"
login_host: /run/postgresql
with_items: "{{ extensions }}"
when: extensions is defined
- name: import dns database schema
command: "psql -U {{ pvc_dns_database_user }} -f /etc/postgresql/powerdns-schema.sql {{ pvc_dns_database_name }}"
become: yes
become_user: postgres
when: newinstance.changed and ansible_local.host_id == '1'

43
roles/pvc/tasks/pvc.yml Normal file
View File

@ -0,0 +1,43 @@
---
- name: install pvc packages
apt:
name:
- pvc-daemon
- pvc-client-cli
- pvc-client-common
state: latest
- name: install pvc configuration
template:
src: pvc/{{ item }}.j2
dest: /etc/pvc/{{ item }}
with_items:
- pvcd.yaml
notify: restart pvcd
- name: verify if cluster has been started
shell: "/usr/share/zookeeper/bin/zkCli.sh stat /nodes 2>&1 | grep -q 'Node does not exist'"
register: cluster_init
failed_when: no
- name: bootstrap a fresh cluster
shell: /usr/bin/pvc init
when: cluster_init.rc == 0 and ansible_local.host_id == 1
- name: stop and disable unneccessary services
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items:
- pdns.service
- name: start and enable services
service:
name: "{{ item }}"
state: started
enabled: yes
with_items:
- pvc-flush.service
- pvcd.service
- pvcd.target

View File

@ -0,0 +1,26 @@
---
- name: install zookeeper packages
apt:
name:
- zookeeperd
- zookeeper-bin
state: latest
- name: install zookeeper configuration
template:
src: zookeeper/{{ item }}.j2
dest: /etc/zookeeper/conf/{{ item }}
with_items:
- configuration.xsl
- environment
- log4j.properties
- myid
- zoo.cfg
notify: restart zookeeper
- name: disable services
service:
name: "{{ item }}"
enabled: no
with_items:
- zookeeper

View File

@ -0,0 +1,4 @@
# Environment file for ceph daemon systemd unit files.
# {{ ansible_managed }}
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1

View File

@ -0,0 +1,4 @@
# Limits for ceph processes
# {{ ansible_managed }}
ceph soft nproc unlimited
ceph soft nofile unlimited

View File

@ -0,0 +1,4 @@
# sysctl: tweak settings for Ceph
# {{ ansible_managed }}
vm.swappiness = 0

View File

@ -0,0 +1,16 @@
# frr daemon status
# {{ ansible_managed }}
zebra=yes
bgpd=yes
ospfd=no
ospf6d=no
ripd=no
ripngd=no
isisd=no
pimd=no
ldpd=no
nhrpd=no
eigrpd=no
babeld=no
sharpd=no
pbrd=no

View File

@ -0,0 +1,53 @@
! frr main configuration
! {{ ansible_managed }}
!
frr version 4.0
frr defaults traditional
hostname cloud-14
no ipv6 forwarding
username cumulus nopassword
!
service integrated-vtysh-config
!
log syslog informational
!
line vty
!
! BGP EVPN mesh configuration
!
router bgp {{ pvc_asn }}
bgp router-id {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.router_id }}{% endfor %}
no bgp default ipv4-unicast
! BGP sessions with route reflectors
neighbor fabric peer-group
neighbor fabric remote-as {{ pvc_asn }}
neighbor fabric capability extended-nexthop
{% for node in pvc_nodes if node.is_coordinator %}
neighbor {{ node.router_id }} peer-group fabric
{% endfor %}
! BGP sessions with upstream routers
neighbor upstream peer-group
neighbor upstream remote-as {{ pvc_asn }}
neighbor upstream capability extended-nexthop
{% for router in pvc_routers %}
neighbor {{ router }} peer-group upstream
{% endfor %}
!
address-family l2vpn evpn
neighbor fabric activate
advertise-all-vni
exit-address-family
address-family ipv4 unicast
neighbor fabric activate
neighbor upstream activate
redistribute connected
exit-address-family
address-family ipv6 unicast
neighbor fabric activate
neighbor upstream activate
redistribute connected
exit-address-family
!
exit
!

View File

@ -0,0 +1,6 @@
<secret ephemeral='no' private='no'>
<uuid>{{ ceph_storage_secret_uuid }}</uuid>
<usage type='ceph'>
<name>client.libvirt secret</name>
</usage>
</secret>

View File

@ -0,0 +1,7 @@
# PVC libvirt daemon configuration file
# {{ ansible_managed }}
listen_tls = 0
listen_tcp = 1
tcp_port = "16509"
auth_tcp = "none"

View File

@ -0,0 +1,63 @@
scope: pvcdns
namespace: /patroni
name: {{ ansible_hostname }}
restapi:
listen: '0.0.0.0:8008'
connect_address: '{{ ansible_fqdn }}:8008'
zookeeper:
hosts: [ {% for host in groups[ansible_local.host_group] %}'{{ host }}.{{ ansible_domain }}:2181',{% endfor %} ]
bootstrap:
dcs:
ttl: 30
loop_wait: 10
retry_timeout: 10
maximum_lag_on_failover: 1048576
postgresql:
use_pg_rewind: true
initdb:
- encoding: UTF8
- data-checksums
pg_hba:
- local all all peer
- host replication replicator 127.0.0.1/32 trust
{% for host in groups[ansible_local.host_group] %}
- host replication replicator {{ host }}.{{ ansible_domain }} trust
{% endfor %}
- host all all 0.0.0.0/0 md5
users:
admin:
password: admin
options:
- createrole
- createdb
postgresql:
listen: '0.0.0.0:5432'
connect_address: '{{ ansible_fqdn }}:5432'
log_destination: 'stderr'
log_min_messages: INFO
custom_conf: /etc/postgresql/postgresql.pvc.conf
bin_dir: /usr/lib/postgresql/11/bin
data_dir: /var/lib/postgresql/patroni/pvc
pgpass: /tmp/pgpass
authentication:
replication:
username: '{{ pvc_replication_database_user }}'
password: '{{ pvc_replication_database_password }}'
superuser:
username: '{{ pvc_superuser_database_user }}'
password: '{{ pvc_superuser_database_password }}'
parameters:
unix_socket_directories: '/run/postgresql'
tags:
nofailover: false
noloadbalance: false
clonefrom: false
nosync: false

View File

@ -0,0 +1,21 @@
# Additional PostgreSQL tuning parameters for PVC Patroni instance
# {{ ansible_managed }}
max_connections = 100
shared_buffers = 64MB
effective_cache_size = 256MB
dynamic_shared_memory_type = posix
random_page_cost = 1
seq_page_cost = 1
log_timezone = 'localtime'
datestyle = 'iso, dmy'
timezone = 'localtime'
lc_messages = 'en_CA.UTF-8'
lc_monetary = 'en_CA.UTF-8'
lc_numeric = 'en_CA.UTF-8'
lc_time = 'en_CA.UTF-8'
default_text_search_config = 'pg_catalog.english'

View File

@ -0,0 +1,75 @@
---
# pvcd cluster configuration
# {{ ansible_managed }}
pvc:
node: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.hostname }}{% endfor %}
functions:
enable_hypervisor: True
enable_networking: True
enable_storage: False
cluster:
coordinators:
{% for node in pvc_nodes if node.is_coordinator %}
- {{ node.hostname }}
{% endfor %}
networks:
cluster:
domain: {{ pvc_cluster_domain }}
network: {{ pvc_cluster_subnet }}
floating_ip: {{ pvc_cluster_floatingip }}
storage:
domain: {{ pvc_storage_domain }}
network: {{ pvc_storage_subnet }}
floating_ip: {{ pvc_storage_floatingip }}
upstream:
domain: {{ pvc_upstream_domain }}
network: {{ pvc_upstream_subnet }}
floating_ip: {{ pvc_upstream_floatingip }}
gateway: {{ pvc_upstream_gatewayip }}
coordinator:
dns:
database:
host: localhost
port: 5432
name: pvcdns
user: pvcdns
pass: PVCdnsPassw0rd
system:
fencing:
intervals:
keepalive_interval: 5
fence_intervals: 6
suicide_intervals: 0
actions:
successful_fence: migrate
failed_fence: None
ipmi:
host: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.ipmi_host }}{% endfor %}
user: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.ipmi_user }}{% endfor %}
pass: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.ipmi_password }}{% endfor %}
migration:
target_selector: mem
configuration:
directories:
dynamic_directory: "/run/pvc"
log_directory: "/var/log/pvc"
console_log_directory: "/var/log/libvirt"
logging:
file_logging: True
stdout_logging: True
console_log_lines: 1000
networking:
devices:
cluster: {{ pvc_cluster_device }}
storage: {{ pvc_storage_device }}
upstream: {{ pvc_upstream_device }}
addresses:
cluster: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.cluster_ip }}{% endfor %}
storage: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.storage_ip }}{% endfor %}
upstream: {% for node in pvc_nodes if node.hostname == ansible_hostname %}{{ node.upstream_ip }}{% endfor %}

View File

@ -0,0 +1,11 @@
# modprobe blacklist
# {{ ansible_managed }}
# Blacklist GPU drivers
blacklist nouveau
blacklist radeon
blacklist amdgpu
blacklist snd_hda_intel
# Blacklist HP Proliant management
blacklist hpwdt

View File

@ -0,0 +1,25 @@
<!-- {{ ansible_managed }} -->
<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="html"/>
<xsl:template match="configuration">
<html>
<body>
<table border="1">
<tr>
<td>name</td>
<td>value</td>
<td>description</td>
</tr>
<xsl:for-each select="property">
<tr>
<td><a name="{name}"><xsl:value-of select="name"/></a></td>
<td><xsl:value-of select="value"/></td>
<td><xsl:value-of select="description"/></td>
</tr>
</xsl:for-each>
</table>
</body>
</html>
</xsl:template>
</xsl:stylesheet>

View File

@ -0,0 +1,10 @@
# {{ ansible_managed }}
ZOOMAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain
ZOOCFGDIR=/etc/zookeeper/conf
ZOOCFG=/etc/zookeeper/conf/zoo.cfg
ZOO_LOG_DIR=/var/log/zookeeper
ZOO_LOG4J_PROP=INFO,ROLLINGFILE
JMXLOCALONLY=false
JAVA_OPTS=""
JAVA=/usr/bin/java
CLASSPATH="/etc/zookeeper/conf:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"

View File

@ -0,0 +1,50 @@
# ZooKeeper Logging Configuration
# {{ ansible_managed }}
# Format is "<default threshold> (, <appender>)+
log4j.rootLogger=${zookeeper.root.logger}
# Example: console appender only
# log4j.rootLogger=INFO, CONSOLE
# Example with rolling log file
#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
# Example with rolling log file and tracing
#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
#
# Log INFO level and above messages to the console
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=INFO
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
#
# Add ROLLINGFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=DEBUG
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
# Max log file size of 10MB
log4j.appender.ROLLINGFILE.MaxFileSize=10MB
# uncomment the next line to limit number of backup files
#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
#
# Add TRACEFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

View File

@ -0,0 +1 @@
{{ ansible_local.host_id }}

View File

@ -0,0 +1,13 @@
# PVC Zookeeper configuration
# {{ ansible_managed }}
tickTime=1000
initLimit=10
syncLimit=5
dataDir=/var/lib/zookeeper
clientPort=2181
{% for node in pvc_nodes if node.is_coordinator %}
server.{{ node.node_id }}={{ node.hostname }}:2888:3888
{% endfor %}