lot of stuff

This commit is contained in:
ben 2020-12-18 14:46:01 +01:00
parent da7073c62e
commit 94151b727d
56 changed files with 1027 additions and 251 deletions

View File

@ -14,7 +14,7 @@
- name: start container
docker_container:
name: bitcoind
image: kylemanna/bitcoind
image: kylemanna/bitcoind:latest
auto_remove: yes # auto remove and no restart policy better for reboots
detach: yes
pull: yes

View File

@ -1,13 +1,72 @@
---
- name: set hostname
hostname:
name: "{{ item }}"
name: "{{ inventory_hostname }}"
use: systemd
with_items:
- "{{ inventory_hostname }}"
tags:
- hostname
- name: set image hostname
hostname:
name: "sensor-image"
use: systemd
tags:
- never
- image
- name: copy ssh keys if known
copy:
src: "{{ item }}"
dest: /etc/ssh/
with_fileglob: "private/ssh_host_keys/{{ inventory_hostname }}/ssh_host_*"
tags:
- ssh
- name: copy sensor-image keys when relevant
copy:
src: "{{ item }}"
dest: /etc/ssh/
with_fileglob: "private/ssh_host_keys/sensor-image/ssh_host_*"
tags:
- never
- image
- name: make dir for host keys
file:
state: directory
path: "private/ssh_host_keys/{{ inventory_hostname }}"
owner: "{{ myusername }}"
group: "{{ myusername }}"
delegate_to: localhost
tags:
- ssh
- name: save hosts keys
fetch:
src: "/etc/ssh/{{ item }}"
dest: private/ssh_host_keys/
with_items:
- ssh_host_ecdsa_key
- ssh_host_ecdsa_key.pub
- ssh_host_ed25519_key
- ssh_host_ed25519_key.pub
- ssh_host_rsa_key
- ssh_host_rsa_key.pub
tags:
- ssh
- name: chown ssh_host_keys
file:
path: private/ssh_host_keys
owner: "{{ myusername }}"
group: "{{ myusername }}"
mode: 0700
recurse: yes
delegate_to: localhost
become: true
tags:
- ssh
- name: Set timezone to Europe/Berlin
timezone:
name: Europe/Berlin
@ -46,33 +105,40 @@
- alternatives
when: ansible_distribution == "Ubuntu"
- name: remove packages that should only be on mainframe (temp and enable me pls)
- name: remove packages that should only be on mainframe
apt:
state: absent
purge: yes
name:
- autoconf # emacs-build
- gnupg2
- gnutls-bin # emacs-build
- irssi
when: "{{ inventory_hostname }} != mainframe.sudo.is"
- kpcli
- libgnutls28-dev # emacs-build
- libncurses-dev # emacs-build
- pkg-config # emacs-build
- texinfo # emacs-build
- libffi-dev
- libssl-dev
- cmake # emacs vterm
- libtool # emacs vterm
- libtool-bin
when: 'inventory_hostname != "mainframe.sudo.is"'
tags:
- remove-packages
- packages
- name: install packages
apt:
state: latest
name:
- autoconf # emacs-build
- texinfo # emacs-build
- gnutls-bin # emacs-build
- libgnutls28-dev # emacs-build
- pkg-config # emacs-build
- libncurses-dev # emacs-build
- apt-transport-https
- aptitude
- lsb-release
- lsb-base
- at
- cmake # emacs vterm
- libtool # emacs vterm
- libtool-bin
- build-essential
- ca-certificates
- cbm
@ -80,7 +146,6 @@
- curl
- dnsutils
- git
- gnupg2
- procmail
- acl
- sshfs
@ -93,9 +158,6 @@
- iotop
- iptraf
- jq
- kpcli
- libffi-dev
- libssl-dev
- lsof
- lvm2
- molly-guard
@ -104,7 +166,6 @@
- nethogs
- nmap
#- ntp
- openssh-server
- openssl
#- python-netaddr
- rsync
@ -127,9 +188,18 @@
- update-motd
- landscape-client
- landscape-common
autoremove: yes
state: absent
purge: yes
tags: packages
- name: make sure openssh-server is installed
apt:
name: openssh-server
state: present
tags:
- packages
- name: remove ntp package (systemd...)
apt:
name: ntp
@ -163,18 +233,22 @@
- name: check if hosts file needs fixing
command: grep {{ inventory_hostname }} /etc/hosts
register: grephosts
tags: etchosts
failed_when: grephosts.rc >= 2
changed_when: grephosts.rc != 0
tags:
- image
- etchosts
- name: fix hosts file
lineinfile:
path: /etc/hosts
line: "127.0.0.1 {{ inventory_hostname }}"
create: no
tags: etchosts
ignore_errors: yes # fails in --check
when: grephosts.rc != 0
tags:
- etchosts
- image
- name: disable password authentication
replace:
@ -209,36 +283,66 @@
recurse: yes
when: ansible_distribution == "Ubuntu"
- name: install unattended-upgrades
- name: install/remove unattended-upgrades
apt:
name: unattended-upgrades
state: present
state: "{% if unatt_enabled|bool %}present{% else %}absent{%endif%}"
update_cache: yes
when: unatt_enabled|bool
tags:
- unatt
- packages
- name: install pip packages
- name: install common pip packages
pip:
name:
- ansible
- poetry
- netaddr
- certbot
- influxdb
state: latest
executable: pip3
tags:
- pip
- packages
- name: install zmq pip package
- name: remove pip packages that are only needed on mainframe
pip:
name: zmq
name:
- ansible
- certbot
- netaddr
state: absent
executable: pip3
when: 'inventory_hostname != "mainframe.sudo.is"'
tags:
- remove-packages
- pip
- packages
when: zmq_install
- name: use grafana instead of emails to notify of reboots needed
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: "{{ item.mode }}"
with_items:
- src: reboot_required.py.j2
dest: /usr/local/bin/reboot_required.py
mode: 770
- src: reboot_required-cron.j2
dest: /etc/cron.d/reboot_required
mode: 660
tags:
- rebootrequired
# - name: install zmq pip package
# pip:
# name: zmq
# executable: pip3
# tags:
# - pip
# - packages
# when: zmq_install
# TODO: Decide on which way...
- name: enable unattended-upgrades (following codenames)
@ -251,3 +355,14 @@
when: unatt_enabled|bool
tags:
- unatt
- name: clean up unattended-upgrades config if not used
file:
path: "/etc/apt/apt.conf.d/{{ item }}"
state: absent
with_items:
- 50unattended-upgrades
- 20auto-upgrades
when: not unatt_enabled|bool
tags:
- unatt2

View File

@ -8,6 +8,7 @@
tags:
- systemusers
- humanusers
- sshkeys
- name: list human users
debug:
@ -25,6 +26,7 @@
- systemusers
- humanusers
- users
- sshkeys
- name: set root shell to zsh
@ -78,6 +80,16 @@
with_items: "{{ human_users }}"
tags: human
- name: chmod and chown for .ssh
file:
path: "~{{ item }}/.ssh"
state: directory
owner: "{{ item }}"
group: "{{ item }}"
mode: 0700
with_items: "{{ human_users }}"
tags: human
- name: move homedirs if host uses alterantive location
user:
name: "{{ userlist[item]['username'] }}"
@ -96,11 +108,14 @@
when: is_local is not defined
- name: set authorized_keys for users with local pubkey files
authorized_key:
user: "{{ item }}"
state: present
key: "{{ lookup('file', 'private/sshkeys/' + item + '.pub') }}"
manage_dir: yes
template:
src: "private/sshkeys/{{ item }}.pub"
dest: "~/.ssh/authorized_keys"
owner: "{{ item }}"
group: "{{ item }}"
mode: 0600
become: true
become_user: "{{ item }}"
with_items: "{{ human_users }}"
when:
- userlist[item]['sshkey']
@ -109,18 +124,39 @@
- name: set up system users
block:
- name: remove system users that shouldnt be on this system
user:
name: "{{ item.key }}"
state: absent
remove: yes
loop_control:
label: "{{ item.key }}"
with_dict: "{{ systemuserlist }}"
when: item.key not in system_users
- name: remove system groups that shouldnt be on this system
group:
name: "{{ item.key }}"
state: absent
loop_control:
label: "{{ item.key }}"
with_dict: "{{ systemuserlist }}"
when: item.key not in system_users
- name: create system user groups
group:
state: "{% if item.key in system_users %}present{% else %}absent{%endif%}"
name: "{{ item.key }}"
gid: "{{ item.value.gid | default(item.value.uid) }}"
with_dict: "{{ systemuserlist }}"
when: item.key in system_users
loop_control:
label: "[{% if item.key in system_users %}present{% else %}absent{%endif%}] {{ item.key }}"
- name: create system users
user:
state: "{% if item.key in system_users %}present{% else %}absent{%endif%}"
state: present
name: "{{ item.key }}"
group: "{{ item.key }}"
groups: "{{ item.value.groups | default([]) }}"
@ -131,6 +167,7 @@
move_home: yes
append: yes
system: yes
when: item.key in system_users
loop_control:
label: "[{% if item.key in system_users %}present{% else %}absent{%endif%}] {{ item.key }}"
with_dict: "{{ systemuserlist }}"

View File

@ -100,7 +100,7 @@ Unattended-Upgrade::AutoFixInterruptedDpkg "true";
// If empty or unset then no email is sent, make sure that you
// have a working mail setup on your system. A package that provides
// 'mailx' must be installed. E.g. "user@example.com"
Unattended-Upgrade::Mail "{{ systems_email }}";
// Unattended-Upgrade::Mail "{{ systems_email }}";
// Set this value to "true" to get emails only on errors. Default
// is to always send a mail if Unattended-Upgrade::Mail is set

View File

@ -0,0 +1,9 @@
# distributed from ansible
@reboot /usr/local/bin/reboot_required.py
# m h dom mon dow
30 */1 * * * root /usr/local/bin/reboot_required.py
#

View File

@ -0,0 +1,41 @@
#!/usr/bin/env python3
import time
import os
import sys
import socket
from datetime import datetime, timezone
from influxdb import InfluxDBClient
client = InfluxDBClient(
host='{{ influxdb_url }}',
port=443,
username='metrics',
password="{{ influxdb_pass.metrics }}",
ssl=True,
verify_ssl=True,
database='metrics'
)
try:
ctime = os.stat('/var/run/reboot-required').st_ctime
age = time.time() - ctime
reboot_required = True
except FileNotFoundError:
age = 0
reboot_required = False
datapoints = [{
"measurement": "reboot_required",
"tags": {'host': socket.gethostname()},
"time": datetime.now(timezone.utc).isoformat(),
"fields": {
'reboot_required': reboot_required,
'age': float(age)
}
}]
if "-v" in sys.argv:
print(datapoints)
client.write_points(datapoints, time_precision='h')

View File

@ -0,0 +1,3 @@
---
macvlan: []

View File

@ -78,7 +78,7 @@
- name: set docker gid
group:
name: docker
gid: 998
gid: "{{ docker_gid | default(998) }}"
state: present
- name: install pip modules for ansible tasks
@ -102,3 +102,26 @@
internal: no
tags:
- docker-network
- name: optional macvlan bridge to host networks
when: macvlan|length > 0
tags:
- macvlan
- docker-network
block:
- name: set up macvlan
docker_network:
name: "macvlan-local"
driver: macvlan
ipam_config:
- subnet: "{{ item.net }}"
gateway: "{{ item.gw }}"
iprange: "{{ item.range }}"
driver_options:
parent: "{{ item.iface }}"
internal: no
with_items: "{{ macvlan }}"

View File

@ -12,7 +12,7 @@
mode: 0750
owner: "{{ myusername }}"
group: "{{ myusername }}"
recurse: yes
- name: pull my dotfiles repo
@ -27,6 +27,10 @@
ignore_errors: yes
register: gitpull
- name: check if the dotfiles repo is managed in ~/projects/dotfiles
stat:
path: "~{{ myusername }}/projects/dotfiles"
register: projects_dotfiles
- name: put my dotfiles in place
copy:
@ -36,7 +40,9 @@
with_items:
- "{{ dotfiles }}"
become_user: "{{ myusername }}"
when: gitpull.changed
when:
- gitpull.changed
- not projects_dotfiles.stat.exists
- name: put dotfiles for root in place
copy:
@ -47,4 +53,6 @@
group: root
with_items:
- "{{ dotfiles }}"
when: gitpull.changed
when:
- gitpull.changed
- not projects_dotfiles.stat.exists

View File

@ -0,0 +1,44 @@
---
- name: create dir structure
file:
path: "{{ systemuserlist.gitea.home }}/{{ item }}"
state: directory
mode: 0750
owner: gitea
group: gitea
with_items:
- data
- name: start container
docker_container:
name: gitea
image: gitea/gitea:1
detach: yes
pull: yes
restart_policy: "no"
state: started
user:
ports:
- 7999:22/tcp
volumes:
- "{{ systemuserlist.gitea.home }}/data:/data"
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
env:
USER_UID: "{{ systemuserlist.gitea.uid }}"
USER_GID: "{{ systemuserlist.gitea.gid }}"
DB_TYPE: mysql
DB_HOST: "{{ ansible_docker0.ipv4.address }}:3306"
DB_NAME: gitea
DB_USER: gitea
DB_PASSWD: "{{ systemuserlist.gitea.mariadb_pass }}"
DISABLE_REGISTRATION: "true"
networks_cli_compatible: no
networks:
- name: bridgewithdns
ipv4_address: "{{ bridgewithdns.gitea }}"
tags:
- gitea-container
- docker-containers

View File

@ -0,0 +1,3 @@
---
- include: gitea.yml
tags: gitea

View File

@ -66,6 +66,7 @@
tags:
- jellyfin-conf
- jellyfin-users
- jellyfin-auth
- nginx-conf
with_items:
- users.digest

View File

@ -1,10 +1,58 @@
---
# should be installed on e.g. ber0 as well,
# so some sort of task to isntall packages on
# human used systems
- name: install packages
apt:
update_cache: yes
name:
- irssi
- autoconf # emacs-build
- ffmpeg
- gnupg2
- gnutls-bin # emacs-build
- irssi
- kpcli
- libgnutls28-dev # emacs-build
- libncurses-dev # emacs-build
- pkg-config # emacs-build
- texinfo # emacs-build
- libffi-dev
- libssl-dev
- cmake # emacs vterm
- libtool # emacs vterm
- libtool-bin
tags:
- packages
- name: install pip packages
pip:
name:
- ansible
- certbot
- netaddr
state: latest
executable: pip3
tags:
- remove-packages
- pip
- packages
- name: maintain master copy of repos
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: 0770
tags:
- unatt
with_items:
- src: git-cron.j2
dest: /etc/cron.d/git-cron
- src: git-cron.sh.j2
dest: /usr/local/sbin/git-cron.sh
- src: ansible-convergence.sh.j2
dest: /usr/local/sbin/ansible-convergence.sh
- src: tf-convergence.sh.j2
dest: /usr/local/sbin/tf-convergence.sh

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -e
{% set base = git_cron.base_path %}
cd {{ base }}/infra
if [ "$1" = "sensors" ]; then
ansible-playbook \
-i hosts-sensors.yml \
sensors.yml \
--diff \
--check > {{ base }}/ansible-convergence-sensors.txt
else
ansible-playbook \
-i hosts.yml \
{{ git_cron.convergence }} \
--diff \
--check > {{ base }}/ansible-convergence.txt
fi

View File

@ -0,0 +1,13 @@
# distributed from ansible
# m h dom mon dow
15 */1 * * * root /usr/local/sbin/git-cron.sh
#13 * * * * root /usr/local/sbin/ansible-convergence.sh
13 3 * * * root /usr/local/sbin/ansible-convergence.sh sensors
17 */3 * * * root /usr/local/sbin/tf-convergence.sh
#

View File

@ -0,0 +1,41 @@
#!/bin/bash
set -e
cd /root
{% set base = git_cron.base_path %}
{% for repo in git_cron.repos %}
if [ ! -d {{ base }}/{{ repo.dest }}/.git ]; then
git clone {{ repo.src }} {{ base }}/{{ repo.dest }}
fi
{% endfor %}
(
{% for repo in git_cron.repos %}
echo "{{ repo.dest }}"
cd {{ base }}/{{ repo.dest }}
git pull
{% endfor %}
) > /dev/null
(
{% for repo in git_cron.repos %}
{% for link_src in repo.private_links|default([]) %}
{% set link = '/'.join([base, git_cron.private, link_src]) %}
ln -s {{ link }} {{ base }}/{{ repo.dest }}/
{% endfor %}
{% endfor %}
mkdir -p /root/.zsh.d/
ln -s /root/dotfiles/zsh/.zsh.d/jreese2.zsh-theme /root/.zsh.d/
ln -s /root/dotfiles/zsh/.zshrc /root/.zshrc
ln -s /root/dotfiles/emacs/.emacs /root/.zshrc
ln -s /root/dotfiles/ssh/config /root/.ssh/config
) &> /dev/null

View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e
{% set base = git_cron.base_path %}
{{ base }}/tf/tf.py plan > convergence-tf.txt

View File

@ -22,6 +22,8 @@
src: users.sql.j2
dest: /usr/local/etc/users.sql
notify: reload mariadb users
tags:
- users
- name: create dump destination
file:

View File

@ -9,18 +9,6 @@
tags:
- grafana-config
# renewal cron job is set in the nginx role (not the best place)
- name: get letsencrypt cert
command: "/usr/local/bin/certbot certonly -n --nginx -d {{ grafana_url }} --agree-tos --email {{ myemail }}"
args:
creates: "/etc/letsencrypt/live/{{ grafana_url }}/fullchain.pem"
environment:
PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin"
tags:
- letsencrypt
- influxdb-letsencrypt
- name: nginx vhost
template:
src: 01-grafana.j2

View File

@ -22,18 +22,6 @@
- influxdb-scripts
# renewal cron job is set in the nginx role (not the best place)
- name: get letsencrypt cert
command: "/usr/local/bin/certbot certonly -n --nginx -d {{ influxdb_url }} --agree-tos --email {{ myemail }}"
args:
creates: "/etc/letsencrypt/live/{{ influxdb_url }}/fullchain.pem"
environment:
PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin"
tags:
- letsencrypt
- influxdb-letsencrypt
- name: nginx vhost
template:
src: 01-influxdb.j2

View File

@ -6,13 +6,13 @@
owner: "{{ myusername }}"
group: www-data
mode: 0755
path: "/var/www/{{ item }}"
path: "/var/www/{{ item.name }}"
with_items: "{{ static_sites }}"
- name: template static website configs
template:
src: nginx-static-vhost.j2
dest: /etc/nginx/sites-enabled/02-{{ item }}
dest: /etc/nginx/sites-enabled/02-{{ item.name }}
owner: www-data
group: www-data
with_items: "{{ static_sites }}"

View File

@ -1,6 +1,6 @@
server {
listen 80;
server_name {{ item }};
server_name {{ item.name }} {{ item.aliases | join (" ") | default([]) }};
return 301 https://$host$request_uri;
}
@ -9,9 +9,9 @@ server {
listen 443 ssl;
listen [::]:443 ssl; # listen for ipv6
server_name {{ item }};
server_name {{ item.name }} {{ item.aliases | join (" ") | default([]) }};
root /var/www/{{ item }};
root /var/www/{{ item.name }};
location / {
autoindex on;
}
@ -19,13 +19,13 @@ server {
add_header Content-Type text/plain;
}
access_log /var/log/nginx/{{ item }}.log main;
error_log /var/log/nginx/{{ item }}_error.log warn;
access_log /var/log/nginx/access_{{ item.name }}.log main;
error_log /var/log/nginx/error_{{ item.name }}.log warn;
ssl_session_timeout 5m;
# using wildcard for our domain, but cloudflare doesnt care
{% set domain = '.'.join(item.split('.')[-2:]) %}
{% set domain = '.'.join(item.name.split('.')[-2:]) %}
ssl_certificate /etc/letsencrypt/live/{{ domain }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ domain }}/privkey.pem;

View File

@ -24,6 +24,8 @@
group: root
mode: "{{ item['mode'] }}"
path: "{{ item['path'] }}"
tags:
- letsencrypt
with_items:
- { path: /etc/letsencrypt, mode: '0755' }
- { path: /etc/letsencrypt/live, mode: '0700' }
@ -35,6 +37,9 @@
owner: root
group: root
mode: 0755
tags:
- letsencrypt
notify: reload nginx
with_items: "{{ letsencrypt_domains }}"
- name: install nginx
@ -125,55 +130,29 @@
enabled: yes
state: started
- name: install certbot from pip (currently the easiest way to get certbot-nginx)
- name: remove certbot, should only be on mainframe
pip:
name:
- certbot
- certbot-nginx
state: absent
executable: pip3
when: 'inventory_hostname != "mainframe.sudo.is"'
tags:
- letsencrypt
- packages
# The command in the nextcloud role might be better than this
# - name: lets encrypt
# command: "/usr/local/bin/certbot -n --nginx -d {{ item }} --agree-tos --email {{ myemail }}"
# args:
# creates: "/etc/letsencrypt/live/{{ item.split(' ')[0] }}/fullchain.pem"
# environment:
# PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin"
# with_items: "{{ letsencrypt_defaults }} + ['{{ inventory_hostname }}'] "
# tags:
# - letsencrypt
- name: remove letsencrypt renewal cron job from the cron module
cron:
name: letsencrypt_renewal
weekday: "0"
minute: "0"
hour: "16"
job: "/usr/local/bin/certbot renew -n" # send via telegram
state: absent
tags: letsencrypt
ignore_errors: yes
# - name: get letsencrypt cert for hostname (can be used by other roles as well)
# command: "/usr/local/bin/certbot -n certonly --standalone -d {{ inventory_hostname }} --agree-tos --email {{ myemail }}"
# args:
# creates: "/etc/letsencrypt/live/{{ inventory_hostname }}/fullchain.pem"
# environment:
# PATH: "{{ ansible_env.PATH }}:/sbin:/usr/sbin"
# tags:
# - letsencrypt
# - postfix
# - nginx
# when:
# - "letsencrypt_hostname|default(True)"
- name: template certbot cron renewal job
template:
src: letsencrypt.cron
dest: /etc/cron.d/letsencrypt
- name: remove letsencrypt cron file
file:
path: /etc/cron.d/letsencrypt
state: absent
tags:
- letsencrypt

View File

@ -3,7 +3,7 @@ server {
listen 443 ssl;
listen [::]:443 ssl; # listen for ipv6
server_name {{ vhost.name }};
server_name {{ vhost.name }} {{ vhost.aliases | default([]) | join(" ") }};
{% if 'ws' in vhost %}
location /{{ vhost.ws }}/ {

View File

@ -2,7 +2,9 @@
- name: include private vars
include_vars: private/vars.yml
tags: docker
tags:
- docker
- pihole-container
- name: create dir structure
file:
@ -24,7 +26,7 @@
owner: root
group: root
mode: 0644
notify: restart pihole
#notify: restart pihole
tags:
- pihole-config
- pihole-overrides
@ -39,7 +41,7 @@
tags:
- pihole-config
when: pihole_dhcp
notify: restart pihole
#notify: restart pihole
- name: install dhcp-helper
apt:
@ -128,6 +130,42 @@
#
# TODO: does pihole run with uid 999? then change it, telegraf wants to use that
# - name: start docker container
# docker_container:
# name: "pihole"
# image: "pihole/pihole:{{ pihole_version | default('latest') }}"
# auto_remove: no
# detach: yes
# pull: yes
# restart_policy: "unless-stopped"
# state: started
# capabilities: "{% if pihole_dhcp %}['NET_ADMIN']{% else %}[]{% endif %}"
# env:
# TZ: "Europe/Berlin"
# WEBPASSWORD: "{{ pihole_web_password }}"
# DNS1: "{{ upstream_dns_1 }}"
# DNS2: "{{ upstream_dns_2 }}"
# CONDITIONAL_FORWARDING: "{{ pihole_cond|string }}"
# CONDITIONAL_FORWARDING_IP: "{{ pihole_cond_ip }}"
# CONDITIONAL_FORWARDING_DOMAIN: "{{ pihole_cond_domain }}"
# ports:
# - "53:53/tcp"
# - "53:53/udp"
# - "{{ pihole_http_inet }}:{{ pihole_http_port }}:80/tcp"
# dns_servers:
# - 127.0.0.1
# - pihole_dns_1
# - pihole_dns_2
# hostname: "{{ inventory_hostname.split('.')[0] }}-pihole"
# networks_cli_compatible: no
# networks:
# - name: bridgewithdns
# ipv4_address: "{{ pihole_docker_ip }}"
# volumes:
# - "{{ pihole_root }}/etc/pihole:/etc/pihole"
# - "{{ pihole_root }}/etc/dnsmasq.d:/etc/dnsmasq.d"
# tags: pihole-container
- name: start docker container
docker_container:
name: "pihole"
@ -137,7 +175,9 @@
pull: yes
restart_policy: "unless-stopped"
state: started
capabilities: "{% if pihole_dhcp %}['NET_ADMIN']{% else %}[]{% endif %}"
network_mode: host
capabilities:
- NET_ADMIN
env:
TZ: "Europe/Berlin"
WEBPASSWORD: "{{ pihole_web_password }}"
@ -147,25 +187,66 @@
CONDITIONAL_FORWARDING_IP: "{{ pihole_cond_ip }}"
CONDITIONAL_FORWARDING_DOMAIN: "{{ pihole_cond_domain }}"
ports:
- "53:53/tcp"
- "53:53/udp"
- "{{ pihole_http_inet }}:{{ pihole_http_port }}:80/tcp"
- "53/tcp"
- "53/udp"
- "80/tcp"
dns_servers:
- 127.0.0.1
- pihole_dns_1
- pihole_dns_2
networks_cli_compatible: no
networks:
- name: bridgewithdns
ipv4_address: "{{ pihole_docker_ip }}"
hostname: "{{ inventory_hostname.split('.')[0] }}-pihole"
volumes:
- "{{ pihole_root }}/etc/pihole:/etc/pihole"
- "{{ pihole_root }}/etc/dnsmasq.d:/etc/dnsmasq.d"
tags: pihole-container
- name: wait for {{ pihole_docker_ip }}:53
# - name: start docker container
# docker_container:
# name: "pihole"
# image: "pihole/pihole:{{ pihole_version | default('latest') }}"
# auto_remove: no
# detach: yes
# pull: yes
# restart_policy: "unless-stopped"
# state: started
# capabilities:
# - NET_ADMIN
# env:
# TZ: "Europe/Berlin"
# WEBPASSWORD: "{{ pihole_web_password }}"
# DNS1: "{{ upstream_dns_1 }}"
# DNS2: "{{ upstream_dns_2 }}"
# CONDITIONAL_FORWARDING: "{{ pihole_cond|string }}"
# CONDITIONAL_FORWARDING_IP: "{{ pihole_cond_ip }}"
# CONDITIONAL_FORWARDING_DOMAIN: "{{ pihole_cond_domain }}"
# ports:
# - "53/tcp"
# - "53/udp"
# - "80/tcp"
# dns_servers:
# - 127.0.0.1
# - upstream_dns_1
# - upstream_dns_2
# hostname: "{{ inventory_hostname.split('.')[0] }}-pihole"
# networks_cli_compatible: no
# networks:
# - name: macvlan-local
# ipv4_address: "{{ pihole_docker_ip }}"
# volumes:
# - "{{ pihole_root }}/etc/pihole:/etc/pihole"
# - "{{ pihole_root }}/etc/dnsmasq.d:/etc/dnsmasq.d"
# tags: pihole-container
# - name: wait for {{ pihole_docker_ip }}:53
# wait_for:
# port: 53
# host: "{{ pihole_docker_ip }}"
# sleep: 4
# tags: pihole-container
- name: wait for port 53
wait_for:
port: 53
host: "{{ pihole_docker_ip }}"
host: localhost
sleep: 4
tags: pihole-container

View File

@ -46,44 +46,3 @@
src: aliases.j2
dest: /etc/aliases
notify: newaliases
- name: set permissions so telegraf can read if enabled
file:
dest: "/var/spool/postfix/{{ item }}"
group: mail
mode: 0750
recurse: yes
with_items:
- active
- deferred
- hold
- incoming
notify:
- restart postfix
tags:
- telegraf
when: enable_telegraf
- name: set permissions on maildrop dir
file:
dest: /var/spool/postfix/maildrop
mode: u=rwx,g=rwx,+t
recurse: yes
notify: restart postfix
tags:
- telegraf
when: enable_telegraf
- name: adding telegraf user to postfix related groups
user:
name: telegraf
append: yes
groups:
- postfix
- mail
- postdrop
tags:
- telegraf
when: enable_telegraf
notify: restart telegraf

View File

@ -0,0 +1,14 @@
---
- name: restart sudoisbot temp_pub
systemd:
state: restarted
daemon_reload: yes
name: "sudoisbot@temp_pub"
when: "'temp' in sensors"
- name: restart sudoisbot rain_pub
systemd:
state: restarted
daemon_reload: yes
name: "sudoisbot@rain_pub"
when: "'rain' in sensors"

View File

@ -1,11 +1,29 @@
---
# probably: libyaml-dev
- name: add {{ myusername }} to groups for sensors
user:
name: "{{ myusername }}"
groups:
- gpio
- dialout
- sensors
append: yes
- name: dhcpd hook for route to vpn
template:
src: 40-route.j2
dest: /lib/dhcpcd/dhcpcd-hooks/40-route
tags:
- vpnclient
- name: enable w1 in bootloader config
lineinfile:
path: /boot/config.txt
line: "dtoverlay=w1-gpio"
create: no
#when: "'ds18b20' in sensors|map(attribute='type')"
when: "'ds18b20' in sensors.temp|map(attribute='kind')"
register: bootconfig
# molly-guard is usually installed, but it symlinks the real binaries
@ -19,43 +37,122 @@
- "/sbin"
when:
- bootconfig.changed
#- "'ds18b20' in sensors|map(attribute='type')"
- "'ds18b20' in sensors.temp|map(attribute='kind')"
- name: install dedendencies for dht.c
apt:
name: wiringpi
state: present
when:
- "'dht' in sensors|map(attribute='type')"
- "'temp' in sensors"
- "'dht' in sensors.temp|map(attribute='kind')"
tags: dht
- name: find /dev/hidraw* files
find:
paths: /dev
patterns: 'hidraw*'
recurse: no
file_type: any
when:
- "'temp' in sensors"
- "'temper' in sensors.temp|map(attribute='kind')"
register: hidraw
tags:
- hidraw
- name: set /dev/hidraw permissions
file:
path: "{{ item.path }}"
group: sensors
mode: 0660
notify: restart sudoisbot temp_pub
when:
- "'temp' in sensors"
- "'temper' in sensors.temp|map(attribute='kind')"
with_items: "{{ hidraw.files }}"
loop_control:
label: "{{ item.path }}"
tags:
- hidraw
- name: copy dht.c
copy:
src: dht.c
dest: /root/dht.c
when:
- "'dht' in sensors|map(attribute='type')"
- "'temp' in sensors"
- "'dht' in sensors.temp|map(attribute='kind')"
register: dht_c
tags:
- dht
- dht-update
- deploy
- name: compile and install dht.c
command: cc -Wall /root/dht.c -o /usr/local/bin/dht -lwiringPi
when:
- dht_c.changed
- "'dht' in sensors|map(attribute='type')"
- "'temp' in sensors"
- "'dht' in sensors.temp|map(attribute='kind')"
tags:
- dht
- dht-update
- deploy
notify:
- restart sudoisbot temp_pub
- restart sudoisbot rain_pub
- name: template sudoisbot config (wrong location right now)
- name: template sudoisbot config to standard location
template:
src: sudoisbot.yml.j2
dest: /tmp/wrongsudoisbot.yml
dest: /usr/local/etc/sudoisbot.yml
owner: root
group: root
mode: 0644
tags:
- sensor-config
- sensors-config
- sensors-sudoisbot
- deploy
notify:
- restart sudoisbot temp_pub
- restart sudoisbot rain_pub
- name: install sudoisbot as pip package
pip:
name: sudoisbot
state: latest
executable: pip3
tags:
- sensors-sudoisbot
- deploy
notify:
- restart sudoisbot temp_pub
- restart sudoisbot rain_pub
- name: install systemd config for sudoisbot
template:
src: sudoisbot.service.j2
dest: "/etc/systemd/system/sudoisbot@.service"
tags:
- sensors-config
- sensors-systemd
- deploy
notify:
- restart sudoisbot temp_pub
- restart sudoisbot rain_pub
- name: enable sudoisbot
systemd:
state: started
enabled: yes
daemon_reload: yes
name: "sudoisbot@{{ item.key }}_pub"
loop_control:
label: "sudoisbot@{{ item.key }}_pub"
with_dict:
- "{{ sensors }}"
tags:
- sensors-config
- sensors-systemd
- deploy

View File

@ -0,0 +1,2 @@
ip route add {{ wireguard_cidr }} via {{ vpn_gw }}
ip route add {{ wireguard_clients['ber0.sudo.is'].ip }}/32 via {{ vpn_gw }}

View File

@ -0,0 +1,14 @@
[Unit]
Description=sudoisbot (sensor)
After=syslog.target
[Service]
User=sensors
Group=sensors
WorkingDirectory={{ systemuserlist['sensors']['home'] }}
ExecStart=/usr/local/bin/sudoisbot %I
Restart=on-success
RestartSec=0
[Install]
WantedBy=multi-user.target

View File

@ -1,22 +1,22 @@
---
# needs to be fixed in the code, it expects a file logger and this will throw an error
logging:
level: ERROR
broker: tcp://{{ broker }}:5559
frequency: {{ frequency | default(240) }}
location: {{ location | default(inventory_hostname.split('.')[1]) }}
sensor_log_no: {{ temp_log_no | default(9) }}
{% if sensors is defined %}
sensors:
{% for sensor in sensors %}
- name: {{ sensor.name }}
type: {{ sensor.type }}
{% if 'id' in sensor %}id: {{ sensor.id }}{% endif %}
{% for sensortype, sensors in sensors.items() %}
{{ sensortype }}:
{% for sensor in sensors %}
- {% for key, value in sensor.items() %} {{ key }}: {{ value }}
{% endfor %}
{% endfor %}
{% endfor %}
{% endif %}
pub:
broker: tcp://{{ broker }}:5559
interval: {{ pub_interval | default(240) }}
#logging:
# level: ERROR

View File

@ -0,0 +1,5 @@
---
- name: restart sudoisbot-proxy
command: docker restart sudoisbot_proxy
become: yes

View File

@ -0,0 +1,3 @@
---
- include: sudoisbot-proxy.yml
tags: sudoisbot-proxy

View File

@ -0,0 +1,23 @@
---
- name: template config
template:
src: sudoisbot-proxy.yml.j2
dest: /usr/local/etc/sudoisbot-proxy.yml
notify: restart sudoisbot-proxy
- name: start docker container
docker_container:
name: "sudoisbot_proxy"
image: "benediktkr/sudoisbot-proxy:latest"
auto_remove: no
detach: yes
pull: yes
restart_policy: "unless-stopped"
state: started
ports:
- "{{ zmq_proxy_backend_port }}:{{ zmq_proxy_backend_port }}/tcp"
- "{{ zmq_proxy_frontend_port }}:{{ zmq_proxy_frontend_port }}/tcp"
volumes:
- /usr/local/etc/sudoisbot-proxy.yml:/etc/sudoisbot-proxy.yml
tags: sudoisbot-proxy-container

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,5 @@
---
- name: restart sudoisbot-proxy
command: docker restart sudoisbot_proxy
become: yes

View File

@ -0,0 +1,3 @@
---
- include: sudoisbot-sink.yml
tags: sudoisbot-sink

View File

@ -0,0 +1,25 @@
---
- name: template config
template:
src: sudoisbot-sink.yml.j2
dest: /usr/local/etc/sudoisbot-sink.yml
notify: restart sudoisbot-sink
- name: start docker container
docker_container:
name: "sudoisbot_sink"
image: "benediktkr/sudoisbot-sink:latest"
auto_remove: no
detach: yes
pull: yes
restart_policy: "unless-stopped"
state: started
env:
SUDOISBOT_CONF: /etc/sudoisbot-sink.yml
ports:
- "{{ zmq_sink_backend_port }}:{{ zmq_sink_frontend_port }}/tcp"
- "{{ zmq_sink_frontend_port }}:{{ zmq_sink_frontend_port }}/tcp"
volumes:
- /usr/local/etc/sudoisbot-sink.yml:/etc/sudoisbot-sink.yml
tags: sudoisbot-sink-container

View File

@ -0,0 +1,12 @@
---
logging:
dir: "/var/log"
rotation: "50 MB"
level: "DEBUG"
backtrace: False
diagnose: False
proxy_pubsub:
zmq_frontend: tcp://*:{{ zmq_proxy_frontend_port }}
zmq_backend: tcp://*:{{ zmq_proxy_backend_port }}

View File

@ -2,3 +2,4 @@
zmq_proxy_frontend_port: 5559
zmq_proxy_backend_port: 5560
zmq_proxy_capture_port: 5561

View File

@ -1,10 +1,17 @@
---
- name: add {{ myusername }} to groups for sensors
user:
name: "{{ myusername }}"
groups:
- sudoisbot
append: yes
- name: create directory for data
file:
path: /srv/sudoisbot/{{ item }}
path: "{{ sudoisbot.fsroot }}/{{ item }}"
state: directory
mode: '0755'
mode: '0664'
owner: sudoisbot
group: sudoisbot
recurse: yes
@ -15,26 +22,28 @@
src: "{{ item }}.yml.j2"
dest: /usr/local/etc/sudoisbot-{{ item }}.yml
with_items: "{{ sudoisbot.docker_containers }}"
tags:
- sudoisbot-config
- name: start docker containers
docker_container:
name: "{{ item }}"
image: "benediktkr/sudoisbot:latest"
auto_remove: no
detach: yes
pull: yes
restart_policy: "no"
state: started
env:
SUDOISBOT_CONF: "/etc/sudoisbot.yml"
SUDOISBOT_LOGFILE: "/data/sudoisbot.log"
volumes:
- /usr/local/etc/sudoisbot-{{ item }}.yml:/etc/sudoisbot.yml
- /srv/sudoisbot/{{ item }}:/data
command: "{{ item }}"
networks_cli_compatible: no
networks:
- name: bridge
- name: bridgewithdns
# - name: start docker containers
# docker_container:
# name: "{{ item }}"
# image: "benediktkr/sudoisbot:latest"
# auto_remove: no
# detach: yes
# pull: yes
# restart_policy: "no"
# state: started
# env:
# SUDOISBOT_CONF: "/etc/sudoisbot.yml"
# SUDOISBOT_LOGFILE: "/data/sudoisbot.log"
# volumes:
# - /usr/local/etc/sudoisbot-{{ item }}.yml:/etc/sudoisbot.yml
# - "{{ sudoisbot.fsroot }}/{{ item }}:/data"
# command: "{{ item }}"
# networks_cli_compatible: no
# networks:
# - name: bridge
# - name: bridgewithdns
with_items: "{{ sudoisbot.docker_containers }}"
# with_items: "{{ sudoisbot.docker_containers }}"

View File

@ -2,3 +2,4 @@
frontend_addr: tcp://*:{{ zmq_proxy_frontend_port }}
backend_addr: tcp://*:{{ zmq_proxy_backend_port }}
capture_addr: tcp://*:{{ zmq_proxy_capture_port }}

View File

@ -0,0 +1,14 @@
---
addr: tcp://{{ broker }}:5559
screen:
weather_location: fhain
rotation: 0
msgs:
- "wash hands & 'void the 'rona!"
- "the dog goes woof"
- "doggie doggie what now!?"
- "wash hands and shoes off"
#logging:
# level: ERROR

View File

@ -1,8 +1,12 @@
---
zflux:
addr: "{{ sudoisbot.sink.zflux.addr }}"
topic: "zflux:{{ zflux.zmq.topic }}"
topic: {{ zflux.zmq.topic }}
{% if zflux.addr is defined %}
addr: {{ zflux.addr }}
{% else %}
addr: "tcp://{{ broker }}:5559"
{% endif %}
sink:
addr: tcp://{{ broker }}:5560
@ -11,3 +15,17 @@ sink:
{% for topic in sudoisbot.sink.topics -%}
- {{ topic }}
{% endfor %}
mysql:
host: "{{ mysql }}"
database: sink
user: sink
password: "{{ systemuserlist.sink.mariadb_pass }}"
screen:
rotation: 0
msgs:
- "wash hands & 'void the 'rona!"
- "the dog goes woof"
- "doggie doggie what now!?"
- "wash hands and shoes off"

View File

@ -0,0 +1,17 @@
---
broker: tcp://{{ broker }}:5559
frequency: {{ frequency | default(240) }}
location: {{ location | default(inventory_hostname.split('.')[1]) }}
{% if sensors is defined %}
sensors:
{% for sensor in sensors %}
- {% for key, value in sensor.items() %} {{ key }}: {{ value }}
{% endfor %}
{% endfor %}
{% endif %}
#logging:
# level: ERROR

View File

@ -0,0 +1,8 @@
---
broker: tcp://{{ broker }}:5559
rotation: 0
msg: "wash hands n' avoid the rona!"
#logging:
# level: ERROR

View File

@ -0,0 +1,18 @@
---
addr: tcp://{{ broker }}:5559
location: s21
unifi:
url: {{ sudoisbot.unifi.url }}
username: {{ sudoisbot.unifi.username }}
password: {{ sudoisbot.unifi.password }}
people:
{% for initials, devices in sudoisbot.unifi.people.items() %}
{{ initials }}:
{% for device in devices %}
- {{ device }}
{% endfor %}
{% endfor %}

View File

@ -2,6 +2,7 @@
addr: tcp://{{ broker }}:5559
owm_token: {{ sudoisbot.weather.owm_token }}
frequency: {{ sudoisbot.weather.frequency | default(300) }}
locations:
{% for location in sudoisbot.weather.locations %}

View File

@ -42,17 +42,10 @@
name: telegraf
state: latest
update_cache: true
tags:
- packages
notify: restart telegraf
- name: enable telegraf
service:
name: telegraf
enabled: yes
notify: restart telegraf
- name: template telegraf config
template:
src: telegraf.conf.j2
@ -61,3 +54,56 @@
- restart telegraf
tags:
- telegraf-config
- name: enable telegraf
service:
name: telegraf
enabled: yes
notify: restart telegraf
- name: adding telegraf to docker group
user:
name: telegraf
append: yes
groups:
- docker
when: "'docker' in group_names"
notify: restart telegraf
- name: set postfix permissions for telegraf
file:
dest: "/var/spool/postfix/{{ item }}"
group: mail
mode: 0750
recurse: yes
with_items:
- active
- deferred
- hold
- incoming
notify:
- restart postfix
tags:
- telegraf
- name: set permissions on maildrop dir for telegraf
file:
dest: /var/spool/postfix/maildrop
mode: u=rwx,g=rwx,+t
recurse: yes
notify: restart postfix
tags:
- telegraf
- name: adding telegraf user to postfix related groups
user:
name: telegraf
append: yes
groups:
- postfix
- mail
- postdrop
tags:
- telegraf
when: enable_telegraf
notify: restart telegraf

View File

@ -7,7 +7,7 @@
[agent]
interval = "60s"
interval = "{{ telegraf_interval | default('60s') }}"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
@ -227,9 +227,9 @@
# fielddrop = ["uptime_format"]
{% if 'docker' in role_names %}
#[[inputs.docker]]
# timeout = "5s"
{% if 'docker' in group_names %}
[[inputs.docker]]
timeout = "5s"
{% endif %}

View File

@ -1,5 +1,4 @@
{% set wg_client = wireguard_clients[inventory_hostname] %}
{% set endpoint = wireguard_endpoints[wg_client['endpoint']] %}
{% set wg_main = wireguard_clients[wireguard_main]['ip'] %}
[Interface]
@ -7,10 +6,13 @@ Address = {{ wg_client['ip'] }}/32
{% if wg_client['vpn_dns'] | default(false) %}
DNS = {{ wireguard_dns }}
{% endif %}
{% if wg_client['listen'] | default(false) %}
ListenPort = {{ wireguard_port }}
{% endif %}
PrivateKey = {{ lookup('file', 'private/wireguard/' + inventory_hostname ) }}
PostUp = /usr/local/bin/wg-post-up.sh
{% set endpoint = wireguard_endpoints[wg_client['endpoint']] %}
[Peer]
{% if 'endpoint_hostname' in wg_client %}
Endpoint = {{ wg_client['endpoint_hostname'] }}:{{ wireguard_port }}
@ -24,3 +26,12 @@ AllowedIPs = 0.0.0.0/0
AllowedIPs = {{ wireguard_cidr }}
{% endif %}
PersistentKeepalive = 240
{% for peername in wg_client.clients | default([]) %}
{% set peer = wireguard_clients[peername] %}
# {{ peername }}
[Peer]
PublicKey = {{ lookup('file', 'private/wireguard/' + peername + '.pub' ) }}
AllowedIPs = {{ peer.ip }}/32
{% endfor %}

View File

@ -1,5 +1,14 @@
---
- name: create keys if they dont exist
shell:
cmd: "wg genkey | tee {{ inventory_hostname }} | wg pubkey > {{ inventory_hostname }}.pub"
chdir: "{{ wireguard_keydir }}"
creates: "{{ wireguard_keydir }}/{{ inventory_hostname }}"
delegate_to: localhost
tags:
- wg-keygen
- name: template iptables rule file
template:
src: "{{ iptables_rules|default('iptables.rules.j2') }}"

View File

@ -15,8 +15,10 @@ ip route add {{ upstream_dns_2 }}/32 via $dgw metric 0
{% endif %}
{% for host in wireguard_endpoints.values() %}
{% set serverip = wireguard_clients[host]['ip'] %}
{% if 'endpoint_for' in wireguard_clients[host] %}
{% set serverip = wireguard_clients[host]['ip'] %}
ping -c 1 {{ serverip }} || true
{% endif %}
{% endfor %}
exit 0

View File

@ -28,7 +28,9 @@ PersistentKeepalive = 240
# {{ peer }}
[Peer]
PublicKey = {{ lookup('file', 'private/wireguard/' + peer + '.pub' ) }}
AllowedIPs = {{ d['ip'] }}/32
{% set clients = [wireguard_clients[c].ip+"/32" for c in d.clients] %}
{% set ips = [d.ip + "/32"] + clients
AllowedIPs = {{ ips | join(", ") }}
{% endif %}
{% endfor %}

View File

@ -4,26 +4,26 @@
template:
src: zflux.yml.j2
dest: /usr/local/etc/zflux.yml
notify: restart zflux
#notify: restart zflux
- name: start docker container
docker_container:
name: "zflux"
image: "benediktkr/zflux:latest"
auto_remove: no
detach: yes
pull: yes
restart_policy: "unless-stopped"
state: started
env:
ZFLUX_LOGFILE: /data/zflux.log
ports:
- "{{ zflux.zmq.bind_port }}:{{ zflux.zmq.bind_port }}/tcp"
volumes:
- /usr/local/etc/zflux.yml:/etc/zflux.yml
- /srv/zflux:/data
networks_cli_compatible: no
networks:
- name: bridge
- name: bridgewithdns
tags: zflux-container
# - name: start docker container
# docker_container:
# name: "zflux"
# image: "benediktkr/zflux:latest"
# auto_remove: no
# detach: yes
# pull: yes
# restart_policy: "unless-stopped"
# state: started
# env:
# ZFLUX_LOGFILE: /data/zflux.log
# ports:
# - "{{ zflux.zmq.bind_port }}:{{ zflux.zmq.bind_port }}/tcp"
# volumes:
# - /usr/local/etc/zflux.yml:/etc/zflux.yml
# - /srv/zflux:/data
# networks_cli_compatible: no
# networks:
# - name: bridge
# - name: bridgewithdns
# tags: zflux-container

View File

@ -1,13 +1,17 @@
---
# should be capable of both bind and connect tbh
zmq:
topic: 'zflux:{{ zflux.zmq.topic }}'
{% if zflux.zmq.bind_port is defined %}
bind: 'tcp://*:{{ zflux.zmq.bind_port|default("5558") }}'
topic: {{ zflux.zmq.topic }}
{% if zflux.bind is defined %}
# bind: 'tcp://*:{{ zflux.zmq.bind_port|default("5558") }}'
bind: {{ zflux.bind }}
{% else %}
connect: 'tcp://{{ broker }}:5560'
{% endif %}
influxdb:
host: {{ ingest }}
db: {{ zflux.influxdb.user }}