catching up changes to some misc roles #56

Merged
ben merged 27 commits from misc into main 2023-04-30 23:10:24 +00:00
54 changed files with 1251 additions and 188 deletions

30
.gitignore vendored
View File

@ -67,32 +67,4 @@ group_vars/
host_vars/ host_vars/
playbooks/ playbooks/
/hosts.yml /*.yml
/common.yml
/homeservers.yml
/hosts-sensors.yml
/hosts.yml
/letsencrypt.yml
/lychener54.yml
/monitoring-server.yml
/nextcloud.yml
/sensors.yml
/site.yml
/vpnservers.yml
/site2.yml
/mathom.yml
/haproxy.yml
/auth.yml
/mainframe.yml
/nginx.yml
/common.yml
/turn.yml
/sensnet.yml
/backup.yml
/jenkins.yml
/kvm.yml
/paperless.yml
/vaultwarden.yml
/mirrors.yml
/lb.yml
/gitea-proxy.yml

View File

@ -8,7 +8,6 @@
- name: restart authelia container - name: restart authelia container
docker_container: docker_container:
name: authelia name: authelia
image: authelia/authelia
state: started state: started
restart: true restart: true

View File

@ -105,7 +105,7 @@
- name: start container - name: start container
docker_container: docker_container:
name: authelia name: authelia
image: authelia/authelia:latest image: ghcr.io/authelia/authelia:master
restart_policy: "unless-stopped" restart_policy: "unless-stopped"
auto_remove: false auto_remove: false
detach: true detach: true

View File

@ -17,7 +17,7 @@ def parse_args():
parser.add_argument('--config', default='/usr/local/etc/restic.json') parser.add_argument('--config', default='/usr/local/etc/restic.json')
parser.add_argument('--log-level', default='INFO') parser.add_argument('--log-level', default='INFO')
parser.add_argument('--dry-run', action='store_true') parser.add_argument('--dry-run', action='store_true')
parser.add_argument('--no-excludes', action='store_true') parser.add_argument('--non-interactive', action='store_true')
parser.add_argument('--log-file', parser.add_argument('--log-file',
default='/var/log/backup/restic-backups.log') default='/var/log/backup/restic-backups.log')
@ -85,19 +85,19 @@ def list_sshfs_mounts():
return [a['target'] for a in mounts['filesystems']] return [a['target'] for a in mounts['filesystems']]
def run_restic(repo_url, restic_args, no_excludes, dry_run): def run_restic(repo_url, restic_args, dry_run, non_interactive):
restic_cmd = ["restic", "-r", repo_url] restic_cmd = ["restic", "-r", repo_url]
restic_cmd.extend(restic_args) restic_cmd.extend(restic_args)
if "backup" in restic_args and not no_excludes: if "backup" in restic_args:
restic_cmd.extend([ restic_cmd.extend([
"--exclude-file", "/usr/local/etc/backup-excludes.txt" "--exclude-file", "/usr/local/etc/backup-excludes.txt"
]) ])
for item in list_sshfs_mounts(): for item in list_sshfs_mounts():
restic_cmd.extend([ restic_cmd.extend([
"--exclude", item "--exclude", item
]) ])
logger.debug(" ".join(restic_cmd)) logger.debug(" ".join(restic_cmd))
if dry_run: if dry_run:
@ -128,7 +128,11 @@ def main():
repo_config = config[args.repo] repo_config = config[args.repo]
prepare_env(args.repo, repo_config) prepare_env(args.repo, repo_config)
return run_restic( return run_restic(
repo_config['url'], restic_args, args.no_excludes, args.dry_run) repo_config['url'],
restic_args,
args.dry_run,
args.non_interactive
)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -47,18 +47,21 @@
- dns-provider-domains.json - dns-provider-domains.json
tags: tags:
- certbot-dns-config - certbot-dns-config
- certbot-domains
- name: template renewal configs # - name: template renewal configs
template: # template:
src: renewal.ini.j2 # src: renewal.ini.j2
dest: /etc/letsencrypt/renewal/{{ item.name }}.conf # dest: /etc/letsencrypt/renewal/{{ item.name }}.conf
owner: root # owner: root
group: root # group: root
mode: 0644 # mode: 0644
with_items: "{{ letsencrypt_sni }}" # with_items: "{{ letsencrypt_sni }}"
loop_control: # loop_control:
label: "{{ item.name }}" # label: "{{ item.name }}"
when: false # when: false
# tags:
# - certbot-domains
# - name: temp force renew all certs # - name: temp force renew all certs
# command: /usr/local/bin/certbot certonly --force-renewal -d {{ item.name }} # command: /usr/local/bin/certbot certonly --force-renewal -d {{ item.name }}
@ -79,6 +82,8 @@
- letsencrypt.sh - letsencrypt.sh
- letsencrypt-hook.py - letsencrypt-hook.py
- letsencrypt-new.py - letsencrypt-new.py
tags:
- certbot-scripts
- name: cron file - name: cron file
template: template:

View File

@ -15,23 +15,27 @@ def renewed_cert(name):
dest_dir = os.path.join(cert_repo, name) dest_dir = os.path.join(cert_repo, name)
os.makedirs(dest_dir, exist_ok=True) os.makedirs(dest_dir, exist_ok=True)
for fname in filenames: try:
src = os.path.join(src_dir, fname) for fname in filenames:
dest = os.path.join(dest_dir, fname) src = os.path.join(src_dir, fname)
shutil.copy(src, dest) dest = os.path.join(dest_dir, fname)
shutil.copy(src, dest)
privkey = os.path.join(dest_dir, 'privkey.pem') privkey = os.path.join(dest_dir, 'privkey.pem')
os.chmod(privkey, 0o640) os.chmod(privkey, 0o640)
shutil.chown(privkey, group="adm") shutil.chown(privkey, group="adm")
print(f"renewed: {name}")
matrixmsg.send(f"cert: `{name}`")
except FileNotFoundError:
matrixmsg.send(f"no files found for `{name}` in `RENEWED_DOAINS`")
print(f"renewed: {name}")
matrixmsg.send(f"cert: `{name}`")
def main(): def main():
try: try:
for name in os.environ['RENEWED_DOMAINS'].split(" "): for name in os.environ['RENEWED_DOMAINS'].split(" "):
renewed_cert(name) renewed_cert(name)
except IndexError: except KeyError:
print("error: no 'RENEWED_DOMAINS' env var present!") print("error: no 'RENEWED_DOMAINS' env var present!")

View File

@ -3,21 +3,32 @@
import subprocess import subprocess
import sys import sys
import json import json
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("fqdn", help="domain to request a cert for")
parser.add_argument("--wildcard", action="store_true", help="wildcard cert")
parser.add_argument("--www", action="store_true", help="include www.")
return parser.parse_args()
def main(): def main():
try: args = parse_args()
fqdn = sys.argv[1] return certbot_new(args.fqdn, wildcard=args.wildcard, www=args.www)
except IndexError:
print(f"usage: {sys.argv[0]} <fqdn>")
sys.exit(1)
return certbot_new(fqdn)
def certbot_new(fqdn):
def certbot_new(fqdn, wildcard, www):
dns_file = "dns-provider-domains.json" dns_file = "dns-provider-domains.json"
with open(f'/usr/local/etc/letsencrypt/{dns_file}', 'r') as f: with open(f'/usr/local/etc/letsencrypt/{dns_file}', 'r') as f:
domains = json.load(f) domains = json.load(f)
# add a --with-www or something instead of guessing like this
# and make 'domain' a list 'domains', and append/extend 'certbot_cmd'
# with a -d for each.
dotted = fqdn.split('.') dotted = fqdn.split('.')
domain = ".".join(dotted[1:]) domain = ".".join(dotted[1:])
@ -31,9 +42,13 @@ def certbot_new(fqdn):
certbot_cmd = [ certbot_cmd = [
"/usr/local/bin/certbot", "/usr/local/bin/certbot",
"certonly", "certonly",
"-d", fqdn, dns_flag,
dns_flag "-d", fqdn
] ]
if wildcard:
certbot_cmd.extend(["-d", f"*.{fqdn}"])
if www:
certbot_cmd.extend(["-d", f"www.{fqdn}"])
print(" ".join(certbot_cmd)) print(" ".join(certbot_cmd))

View File

@ -1,30 +1,5 @@
--- ---
- name: ensure hostname letsencrypt cert exists
command:
cmd: /usr/local/bin/letsencrypt-new.py {{ inventory_hostname }}
creates: /usr/local/etc/letsencrypt/live/{{ inventory_hostname }}/fullchain.pem
delegate_to: localhost
tags:
- letsencrypt-certs
- letsencrypt-hostname-cert
- name: install hostname cert
copy:
src: "/usr/local/etc/letsencrypt/live/{{ item }}"
dest: "/usr/local/etc/certs/"
owner: root
group: root
mode: 0755
tags:
- letsencrypt
- letsencrypt-certs
#notify: reload nginx
vars:
prediff_cmd: echo
with_items:
- "{{ inventory_hostname }}"
- name: install current letsencrypt wildcards where they should be installed - name: install current letsencrypt wildcards where they should be installed
copy: copy:
src: "/usr/local/etc/letsencrypt/live/{{ item }}" src: "/usr/local/etc/letsencrypt/live/{{ item }}"
@ -35,6 +10,7 @@
tags: tags:
- letsencrypt - letsencrypt
- letsencrypt-wildcard - letsencrypt-wildcard
- letsencrypt-certs
#notify: reload nginx #notify: reload nginx
vars: vars:
prediff_cmd: echo prediff_cmd: echo
@ -57,3 +33,4 @@
tags: tags:
- letsencrypt - letsencrypt
- letsencrypt-wildcard - letsencrypt-wildcard
- letsencrypt-certs

View File

@ -6,6 +6,26 @@
tags: tags:
- hostname - hostname
- name: ensure hostname in /etc/hosts
lineinfile:
dest: /etc/hosts
line: "{{ etc_hosts_ip }} {{ inventory_hostname }} {{ inventory_hostname.split('.')[0] }}"
state: present
when: etc_hosts_ip is defined
tags:
- etc-hosts
- name: ensure no 127.0.0.0/8 entries for hostname in /etc/hosts (when required)
lineinfile:
dest: /etc/hosts
regexp: "^127.*{{ item }}.*"
state: absent
with_items:
- "{{ inventory_hostname }}"
- "{{ inventory_hostname.split('.')[0] }}"
when: etc_hosts_rm|default(false)
tags: etc-hosts
# - name: set image hostname # - name: set image hostname
# hostname: # hostname:
# name: "sensor-image" # name: "sensor-image"
@ -238,6 +258,7 @@
- dnsutils - dnsutils
- file - file
- git - git
- gnupg
- haveged - haveged
- htop - htop
- iotop - iotop
@ -292,13 +313,14 @@
- requests - requests
- psutil - psutil
- humanize # added because of telegraf/vnstat.py - humanize # added because of telegraf/vnstat.py
state: latest state: present
executable: pip3 executable: pip3
tags: tags:
- pip - pip
- common-pip - common-pip
- common-pip-packages - common-pip-packages
- packages - packages
- pip-latest
- name: remove pip packages that are only needed on mainframe - name: remove pip packages that are only needed on mainframe
pip: pip:
@ -346,7 +368,7 @@
- update-motd - update-motd
- landscape-client - landscape-client
- landscape-common - landscape-common
autoremove: false autoremove: true
state: absent state: absent
purge: true purge: true
tags: packages tags: packages
@ -590,3 +612,14 @@
tags: tags:
- packages - packages
- apt.sudo.is - apt.sudo.is
- name: ensure /deadspace exists
file:
path: /deadspace
state: directory
owner: root
group: "{{ grouplist.media.gid }}"
mode: "0775"
tags:
- deadspace
- deadspace-dir

View File

@ -3,3 +3,4 @@
- import_tasks: common.yml - import_tasks: common.yml
tags: tags:
- common - common
- common-base

View File

@ -1,10 +1,6 @@
# distributed from ansible # distributed from ansible
# m h dom mon dow # m h dom mon dow
*/30 * * * * root /usr/local/bin/reboot_required.py 0 8 * * * root /usr/local/bin/reboot_required.py
# @reboot /usr/local/bin/reboot_required.py
# #

View File

@ -104,10 +104,13 @@
- docker - docker
- docker-compose - docker-compose
executable: pip3 executable: pip3
state: present
tags: tags:
- packages - packages
- pip - pip
- docker-compose - docker-compose
- docker-pip
- pip-latest
- name: set up bridged network with dns - name: set up bridged network with dns
docker_network: docker_network:
@ -119,6 +122,7 @@
internal: no internal: no
tags: tags:
- docker-network - docker-network
- docker-network-bridgewithdns
- name: install systemd config for container - name: install systemd config for container
template: template:
@ -187,6 +191,8 @@
file: file:
state: directory state: directory
path: "{{ systemuserlist[ansible_user].home }}/.docker" path: "{{ systemuserlist[ansible_user].home }}/.docker"
owner: "{{ systemuserlist[ansible_user].uid }}"
group: "{{ systemuserlist[ansible_user].gid }}"
mode: 0700 mode: 0700
tags: tags:
- docker-auth - docker-auth
@ -195,6 +201,8 @@
copy: copy:
dest: "{{ systemuserlist[ansible_user].home }}/.docker/config.json" dest: "{{ systemuserlist[ansible_user].home }}/.docker/config.json"
mode: 0750 mode: 0750
owner: "{{ systemuserlist[ansible_user].uid }}"
group: "{{ systemuserlist[ansible_user].gid }}"
content: "{{ docker_config | to_nice_json }}" content: "{{ docker_config | to_nice_json }}"
tags: tags:
- docker-auth - docker-auth

View File

@ -1,12 +1,14 @@
dotfiles_path: "/srv/dotfiles" dotfiles_path: "/srv/dotfiles"
dotfiles_repo: "https://git.sudo.is/ben/dotfiles.git" dotfiles_repo: "https://git.sudo.is/ben/dotfiles.git"
dotfiles: dotfiles:
- { src: 'zsh/.zshrc', dest: '.zshrc', root: yes } - { src: 'zsh/.zshrc', dest: '.zshrc', root: false }
- { src: 'zsh/jreese2.zsh-theme', dest: '.zsh.d/', root: yes} - { src: 'zsh/jreese2.zsh-theme', dest: '.zsh.d/', root: false }
- { src: 'emacs/.emacs', dest: '.emacs', root: yes } - { src: '.emacs.d', dest: '.emacs.d', root: false }
- { src: 'tmux/.tmux.simple.conf', dest: '.tmux.conf', root: yes} - { src: 'tmux/.tmux.simple.conf', dest: '.tmux.conf', root: true}
- { src: 'tmux/.tmux.remote.conf', dest: '.tmux.remote.conf', root: yes} - { src: 'tmux/.tmux.remote.conf', dest: '.tmux.remote.conf', root: true}
- { src: 'ssh/config', dest: '.ssh/config', root: no } - { src: 'ssh/config', dest: '.ssh/config', root: false }
dotfiles_rm:
- .emacs
github_hostkey: "|1|UkAzHpv3U4bZhaeBPmtZZi80Dos=|UEE2HqFXOa7wHG34fg6S2qC6n4g= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" github_hostkey: "|1|UkAzHpv3U4bZhaeBPmtZZi80Dos=|UEE2HqFXOa7wHG34fg6S2qC6n4g= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
sudo_hostkey: "|1|JmGLvQJOAO/V8IIDJZgluT1CY84=|Isb+G3aPnbW0wdLidEphxiOppdM= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBqctfVYBZfEuZj2JWrMjzh3y5QZsdrh40yv29gHaE3Yx4mOkB2eCUVm898r9Qm1e/5bYey/NHp6ZSEy1y7ZA6jJ8lbzFXxbYPXDysw5rvIyGKbe/A0WeFF88ZgZejcxvMZQVNDY/oOKw2lr6LDiCEne7wkaVKHxnztyw47nq963aSATrysCc45TNG93bGbNEGGqp47MUdL/ZAy2+zQnDtSq2JrQD9mgIiBOOdWMnAA87QgcHH536Zq7X3/JX309cDJjUfkedBMcrwcCy9juQEaQdKb6RdKICcI3vXvPZs877mskjptYcacWEMnTBRdutaYcBCF9DlehaqOY2ERdcR" sudo_hostkey: "|1|JmGLvQJOAO/V8IIDJZgluT1CY84=|Isb+G3aPnbW0wdLidEphxiOppdM= ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBqctfVYBZfEuZj2JWrMjzh3y5QZsdrh40yv29gHaE3Yx4mOkB2eCUVm898r9Qm1e/5bYey/NHp6ZSEy1y7ZA6jJ8lbzFXxbYPXDysw5rvIyGKbe/A0WeFF88ZgZejcxvMZQVNDY/oOKw2lr6LDiCEne7wkaVKHxnztyw47nq963aSATrysCc45TNG93bGbNEGGqp47MUdL/ZAy2+zQnDtSq2JrQD9mgIiBOOdWMnAA87QgcHH536Zq7X3/JX309cDJjUfkedBMcrwcCy9juQEaQdKb6RdKICcI3vXvPZs877mskjptYcacWEMnTBRdutaYcBCF9DlehaqOY2ERdcR"

View File

@ -3,7 +3,7 @@
lineinfile: lineinfile:
path: /etc/ssh/ssh_known_hosts path: /etc/ssh/ssh_known_hosts
line: "{{ item.key }}" line: "{{ item.key }}"
create: yes create: true
loop_control: loop_control:
label: "{{ item.name }}" label: "{{ item.name }}"
with_items: with_items:
@ -38,7 +38,7 @@
- name: put my dotfiles in place if dotfiles repo isnt human-managed - name: put my dotfiles in place if dotfiles repo isnt human-managed
copy: copy:
remote_src: yes remote_src: true
src: "{{ dotfiles_path }}/dotfiles/{{ item.src }}" src: "{{ dotfiles_path }}/dotfiles/{{ item.src }}"
dest: "~/{{ item.dest }}" dest: "~/{{ item.dest }}"
with_items: with_items:
@ -52,7 +52,7 @@
- name: put dotfiles for root in place - name: put dotfiles for root in place
copy: copy:
remote_src: yes remote_src: true
src: "{{ dotfiles_path }}/dotfiles/{{ item.src }}" src: "{{ dotfiles_path }}/dotfiles/{{ item.src }}"
dest: "/root/{{ item.dest }}" dest: "/root/{{ item.dest }}"
owner: root owner: root
@ -75,3 +75,13 @@
- "{{ dotfiles }}" - "{{ dotfiles }}"
when: when:
- not item.root|bool - not item.root|bool
- name: rm outdate dotfile links
file:
path: "~/{{ item }}"
state: absent
loop_control:
label: "{{ item }}"
become_user: "{{ myusername }}"
with_items:
- "{{ dotfiles_rm }}"

View File

@ -37,7 +37,7 @@
- name: authorized_keys for dropbear - name: authorized_keys for dropbear
template: template:
src: "private/sshkeys/{{ myusername }}.authorized_keys" src: "private/sshkeys/{{ myusername }}.authorized_keys"
dest: /etc/dropbear/initramfs/authorized_keys dest: "{{ dropbear_initramfs_dir|default('/etc/dropbear/initramfs') }}/authorized_keys"
mode: 0700 mode: 0700
force: true force: true
notify: update initramfs notify: update initramfs

View File

@ -35,7 +35,7 @@
# from role/gitea/files/tmpl/ # from role/gitea/files/tmpl/
- name: data/gitea/templates - name: data/gitea/templates
- name: data/gitea/templates/custom - name: data/gitea/templates/custom
- name: data/gitea/templates/user/dashboard #- name: data/gitea/templates/user/dashboard
- name: data/gitea/conf - name: data/gitea/conf
- name: data/gitea/tmp - name: data/gitea/tmp
@ -92,8 +92,8 @@
- home.tmpl - home.tmpl
- custom/extra_links.tmpl - custom/extra_links.tmpl
- custom/extra_tabs.tmpl - custom/extra_tabs.tmpl
- user/dashboard/feeds.tmpl #- user/dashboard/feeds.tmpl
- user/dashboard/repolist.tmpl #- user/dashboard/repolist.tmpl
tags: tags:
- gitea-templates - gitea-templates
when: gitea_custom_tmpl_enabled when: gitea_custom_tmpl_enabled

View File

@ -67,6 +67,10 @@ server {
# gitea itself can also serve this file # gitea itself can also serve this file
alias {{ gitea_user.home }}/data/gitea/sitemap.xml; alias {{ gitea_user.home }}/data/gitea/sitemap.xml;
} }
location /user/login {
return 302 /user/oauth2/{{ gitea_oidc_name }};
}
location /user/forgot_password { location /user/forgot_password {
return 302 https://{{ authelia_login_url }}/reset-password/step1; return 302 https://{{ authelia_login_url }}/reset-password/step1;

View File

@ -1,6 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
<url>
<loc>https://www.{{ domain }}/</loc>
<priority>1.00</priority>
</url>
<url> <url>
<loc>https://{{ gitea_url }}/</loc> <loc>https://{{ gitea_url }}/</loc>
<priority>1.00</priority> <priority>1.00</priority>

View File

@ -0,0 +1,4 @@
---
dependencies:
- haproxy

View File

@ -0,0 +1,3 @@
---
- import_tasks: jellyfin-cache.yml
tags: jellyfin-cache

View File

@ -0,0 +1,135 @@
# also change haproxy role like this
#
# haproxy roel should set up haproxy, not configure it
#
# this role templates its copy of haproxy.cfg
# and then the lb role templates a different copy
# but both have haproxy as dependency
#
# haproxy does not support to include config files
# or split up the config in multiple parts.
# https://www.haproxy.com/de/blog/haproxy-log-customization/
global
chroot /var/lib/haproxy
maxconn 60000
# default:
{# #stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
# #stats socket /run/haproxy/admin.sock user haproxy group haproxy mode 660 level admin
# #stats timeout 30s #}
user haproxy
group haproxy
daemon
{# # number of processes
# #nbproc 2
# # number of threads
# #nbthread 4
#
# # Default SSL material locations
# ca-base /etc/ssl/certs
# crt-base /etc/ssl/private
#
# # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
# ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
# ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
# ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets #}
defaults
timeout connect 5000
timeout client 50000
timeout server 50000
{# defaults
#
#
# # mode http
# #option dontlognull
# #option log-health-checks
# errorfile 400 /etc/haproxy/errors/400.http
# errorfile 403 /etc/haproxy/errors/403.http
# errorfile 408 /etc/haproxy/errors/408.http
# errorfile 500 /etc/haproxy/errors/500.http
# errorfile 502 /etc/haproxy/errors/502.http
# errorfile 503 /etc/haproxy/errors/503.http
# errorfile 504 /etc/haproxy/errors/504.http
# #log-format '{"type":"haproxy","timestamp":%Ts,"http_status":%ST,"http_request":"%r","remote_addr":"%ci","bytes_read":%B,"upstream_addr":"%si","backend_name":"%b","retries":%rc,"bytes_uploaded":%U,"upstream_response_time":"%Tr","upstream_connect_time":"%Tc","session_duration":"%Tt","termination_state":"%ts"}'
#
# # http:
# # log-format '{"pid":%pid,"haproxy_frontend_type":"http","haproxy_process_concurrent_connections":%ac,"haproxy_frontend_concurrent_connections":%fc,"haproxy_backend_concurrent_connections":%bc,"haproxy_server_concurrent_connections":%sc,"haproxy_backend_queue":%bq,"haproxy_server_queue":%sq,"haproxy_client_request_send_time":%Tq,"haproxy_queue_wait_time":%Tw,"haproxy_server_wait_time":%Tc,"haproxy_server_response_send_time":%Tr,"response_time":%Td,"session_duration":%Tt,"request_termination_state":"%tsc","haproxy_server_connection_retries":%rc,"remote_addr":"%ci","remote_port":%cp,"frontend_addr":"%fi","frontend_port":%fp,"frontend_ssl_version":"%sslv","frontend_ssl_ciphers":"%sslc","request_method":"%HM","request_uri":"%[capture.req.uri,json(utf8s)]","request_http_version":"%HV","host":"%[capture.req.hdr(0)]","referer":"%[capture.req.hdr(1),json(utf8s)]","haproxy_frontend_name":"%f","haproxy_backend_name":"%b","haproxy_server_name":"%s","status":%ST,"response_size":%B,"request_size":%U}' #}
listen health
bind :{{ haproxy_stats_port }}
# interface wg0 interface lo
mode http
acl health_allowed src {{ wg_clients[ansible_control_host].ip }}/32
acl health_allowed src 127.0.0.1/32
http-request deny unless health_allowed
stats enable
stats uri /stats
stats refresh 10s
stats admin unless FALSE {# unless LOCALHOST #}
monitor-uri /health
option httpchk
option dontlognull
frontend https-redirect
mode http
bind :80
http-request redirect scheme https
frontend {{ lb_url }}
bind *:443
mode tcp
log /dev/log local0
option tcplog
log-format '{"pid":%pid,"server_name":"%b", "haproxy_frontend_type":"tcp","haproxy_process_concurrent_connections":%ac,"haproxy_frontend_concurrent_connections":%fc,"haproxy_backend_concurrent_connections":%bc,"haproxy_server_concurrent_connections":%sc,"haproxy_backend_queue":%bq,"haproxy_server_queue":%sq,"haproxy_queue_wait_time":%Tw,"haproxy_server_wait_time":%Tc,"response_time":%Td,"session_duration":%Tt,"request_termination_state":"%tsc","haproxy_server_connection_retries":%rc,"remote_addr":"%ci","remote_port":%cp,"frontend_addr":"%fi","frontend_port":%fp,"frontend_ssl_version":"%sslv","frontend_ssl_ciphers":"%sslc","haproxy_frontend_name":"%f","haproxy_backend_name":"%b","haproxy_server_name":"%s","response_size":%B,"request_size":%U}'
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
{% for item in lb_tcp -%}
use_backend {{ item.fqdn }} if { req.ssl_sni -i {{ item.fqdn }} }
{% endfor %}
{# balance leastconn #}
{#
# from backend (not used):
default-server check maxconn 20
#}
{% for item in lb_tcp -%}
{% if item.proxy_protocol|default(true) -%}
backend {{ item.fqdn }}
mode tcp
balance leastconn
{% if item.tcp_keepalive|default(false) -%}
option tcpka
{% endif -%}
{% for origin in item.origins -%}
server {{ origin.name }} {{ origin.name }}:{{ origin.port | default(40443) }} send-proxy-v2 check
{% endfor %}
{% else -%}
backend {{ item.fqdn }}
mode tcp
option ssl-hello-chk
{% if item.tcp_keepalive|default(false) -%}
option tcpka
{% endif -%}
{% for origin in item.origins -%}
server {{ origin.name }} {{ origin.name }}:{{ origin.port | default(443) }} check
{% endfor %}
{% endif %}
{% endfor %}

View File

@ -8,10 +8,13 @@ server {
# listen [::]:443 ssl; # listen [::]:443 ssl;
include listen-proxy-protocol.conf; include listen-proxy-protocol.conf;
include /etc/nginx/authelia_internal.conf;
server_name {{ apt_url }}; server_name {{ apt_url }};
root /var/www/{{ apt_url }}; root /var/www/{{ apt_url }};
location / { location / {
include /etc/nginx/require_auth.conf;
index index.html index.htm; index index.html index.htm;
autoindex on; autoindex on;
autoindex_exact_size off; autoindex_exact_size off;

15
roles/lb/tasks/main.yml Normal file
View File

@ -0,0 +1,15 @@
---
# - name: sshd AllowedUsers config
# template:
# src: 10-lb.conf.j2
# dest: /etc/ssh/sshd_config.d/10-lb.conf
# owner: root
# group: root
# mode: '0644'
# tags:
# - sshd
# - gitea
# notify:
# - reload ssh
# - restart ssh

View File

@ -0,0 +1,3 @@
# {{ ansible_managed }}
AllowUsers {{ lb_sshd_allow_users.join(" ") }}

View File

@ -217,6 +217,8 @@
- nginx - nginx
- mainframe-nginx - mainframe-nginx
- static-sites - static-sites
- static
- www
notify: reload nginx notify: reload nginx
- name: archives log dir and cache - name: archives log dir and cache

View File

@ -34,11 +34,16 @@ server {
ssl_session_timeout 5m; ssl_session_timeout 5m;
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always; add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# for /assets
#add_header X-Content-Type-Options "nosniff" always;
#add_header X-Permitted-Cross-Domain-Policies "none" always;
#add_header Referrer-Policy "no-referrer" always;
#add_header X-Download-Options "noopen" always;
#add_header X-Frame-Options "SAMEORIGIN" always;
#add_header X-XSS-Protection "1; mode=block" always;
add_header "Access-Control-Allow-Origin" "*" always;
} }

View File

@ -150,7 +150,7 @@ innodb_io_capacity=4000
# you can put MariaDB-only options here # you can put MariaDB-only options here
[mariadb] [mariadb]
# This group is only read by MariaDB-10.3 servers. # This group is only read by MariaDB-10.6 servers.
# If you use the same .cnf file for MariaDB of different versions, # If you use the same .cnf file for MariaDB of different versions,
# use this group for options that older servers don't understand # use this group for options that older servers don't understand
# [mariadb-10.3] # [mariadb-10.6]

View File

@ -26,7 +26,7 @@
- uwsgi - uwsgi
- uwsgi-plugin-python3 - uwsgi-plugin-python3
- apache2-utils - apache2-utils
- wwwsudois #- wwwsudois
update_cache: true update_cache: true
state: latest state: latest
environment: environment:
@ -34,10 +34,24 @@
when: not skip_apt|default(false) when: not skip_apt|default(false)
tags: tags:
- packages - packages
- wwwsudois # - wwwsudois
- www.sudo.is # - www.sudo.is
- apt.sudo.is - apt.sudo.is
- name: copy nginx_default_cert
copy:
src: "private/nginx_default_cert"
dest: "/usr/local/etc/certs/"
owner: root
group: root
mode: 0755
tags:
- nginx-cert
- nginx-default-vhost
- nginx-default-cert
notify: reload nginx
vars:
prediff_cmd: echo
Review

avoiding hostname certs

avoiding hostname certs
- name: make cache dir - name: make cache dir
file: file:
@ -103,6 +117,8 @@
tags: tags:
- nginx-config - nginx-config
- nginx-default-vhost - nginx-default-vhost
- nginx-default-cert
- nginx-cert
- name: cleanup - name: cleanup
file: file:

View File

@ -64,8 +64,13 @@ server {
ssl_session_timeout 5m; ssl_session_timeout 5m;
{% if inventory_hostname == ansible_control_host -%}
ssl_certificate /usr/local/etc/certs/{{ inventory_hostname }}/fullchain.pem; ssl_certificate /usr/local/etc/certs/{{ inventory_hostname }}/fullchain.pem;
ssl_certificate_key /usr/local/etc/certs/{{ inventory_hostname }}/privkey.pem; ssl_certificate_key /usr/local/etc/certs/{{ inventory_hostname }}/privkey.pem;
{% else -%}
ssl_certificate /usr/local/etc/certs/nginx_default_cert/pubkey.pem;
ssl_certificate_key /usr/local/etc/certs/nginx_default_cert/privkey.pem;
{% endif %}
add_header Referrer-Policy "no-referrer" always; add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always; add_header X-Content-Type-Options "nosniff" always;

View File

@ -8,6 +8,9 @@ openldap_root: "{{ systemuserlist.openldap.home }}"
ldap_human_users: "{{ userlist.values()|selectattr('ldap_enabled', 'true') }}" ldap_human_users: "{{ userlist.values()|selectattr('ldap_enabled', 'true') }}"
ldap_human_users_in_groups: "{{ ldap_human_users | selectattr('ldap_groups') }}" ldap_human_users_in_groups: "{{ ldap_human_users | selectattr('ldap_groups') }}"
ldap_system_users: "{{ systemuserlist.values()|selectattr('ldap_enabled', 'true') }}"
ldap_system_users_in_groups: "{{ ldap_system_users | selectattr('ldap_groups') }}"
ldap_only_users_enabled: "{{ ldap_only_users.values() | selectattr('ldap_enabled', 'true') }}" ldap_only_users_enabled: "{{ ldap_only_users.values() | selectattr('ldap_enabled', 'true') }}"
ldap_linux_usernames_disabled: "{{ userlist.values()|selectattr('ldap_enabled', 'false') | map(attribute='username') }}" ldap_linux_usernames_disabled: "{{ userlist.values()|selectattr('ldap_enabled', 'false') | map(attribute='username') }}"

View File

@ -187,11 +187,13 @@
bind_pw: "{{ openldap_admin_pass }}" bind_pw: "{{ openldap_admin_pass }}"
tags: tags:
- ldap-users - ldap-users
- ldap-system-users
with_items: with_items:
- users - users
- usergroups - usergroups
- groups - groups
- services - services
- systems
- hosts - hosts
# this module doesnt change existing users if they exist, so it is setting anything that a user # this module doesnt change existing users if they exist, so it is setting anything that a user
@ -242,6 +244,31 @@
- ldap-users - ldap-users
- ldap-only-users - ldap-only-users
- name: add inventory system users
community.general.ldap_entry:
dn: "uid={{ item.username }},ou=systems,{{ openldap_dc }}"
objectClass:
- inetOrgPerson
- posixAccount
state: present
attributes:
sn: "{{ item.username }}"
cn: "{{ item.username }}"
mail: "{{ item.username }}@{{ openldap_domain }}"
displayName: "{{ item.username }}"
uidNumber: "{{ item.uid }}"
gidNumber: "{{ item.gid }}"
homeDirectory: "{{ item.home }}"
loginShell: "{{ item.shell | default('/dev/nologin')}}"
server_uri: ldap://{{ openldap_url }}/
bind_dn: "{{ openldap_admin_user }}"
bind_pw: "{{ openldap_admin_pass }}"
loop_control:
label: "{{ item.username }}"
with_items: "{{ ldap_system_users }}" # see role/openldap/defaults/main.yml
tags:
- ldap-users
- ldap-system-users
- name: add inventory user groups - name: add inventory user groups
community.general.ldap_entry: community.general.ldap_entry:
@ -299,6 +326,26 @@
tags: tags:
- ldap-users - ldap-users
- name: adding system users to groups
community.general.ldap_attrs:
dn: "cn={{ item[0] }},ou=groups,{{ openldap_dc }}"
attributes:
uniqueMember: "uid={{ item[1].username }},ou=systems,{{ openldap_dc }}"
state: "{%if item[0] in item[1].ldap_groups|default([])%}present{%else%}absent{%endif%}"
server_uri: ldap://{{ openldap_url }}/
bind_dn: "{{ openldap_admin_user }}"
bind_pw: "{{ openldap_admin_pass }}"
loop_control:
label: "{{ item[1].username }}, group={{ item[0] }}, present={{ item[0] in item[1].ldap_groups|default([]) }}"
with_nested:
- "{{ openldap_groups }}"
- "{{ ldap_system_users }}"
# when:
# - item[1] in item[0].ldap_groups|default([])
tags:
- ldap-users
- ldap-system-users
- name: adding inventory ldap_only_users to groups - name: adding inventory ldap_only_users to groups
community.general.ldap_attrs: community.general.ldap_attrs:
dn: "cn={{ item[0] }},ou=groups,{{ openldap_dc }}" dn: "cn={{ item[0] }},ou=groups,{{ openldap_dc }}"

View File

@ -115,6 +115,7 @@
- pihole-config - pihole-config
with_items: with_items:
- "99-edns.conf" - "99-edns.conf"
- "02-zone-forward.conf"
notify: restart pihole notify: restart pihole
- name: template unbound config - name: template unbound config
@ -126,6 +127,7 @@
mode: 0644 mode: 0644
tags: tags:
- unbound - unbound
- a-records
with_items: with_items:
- unbound.conf - unbound.conf
- a-records.conf - a-records.conf

View File

@ -0,0 +1,3 @@
{% for item in pihole_zone_forwards -%}
server=/{{ item.zone | trim }}/{{ item.resolver | trim }}
{% endfor %}

View File

@ -0,0 +1,6 @@
- name: restart podgrab container
docker_container:
name: podgrab
state: started
restart: true
when: podgrab_container is not defined or not podgrab_container.changed

View File

@ -0,0 +1,3 @@
---
- import_tasks: podgrab.yml
tags: podgrab

View File

@ -0,0 +1,47 @@
---
- name: create dir structure
file:
path: "{{ systemuserlist.archives.home }}/podgrab/{{ item.name }}"
state: directory
mode: "{{ item.mode | default('0755') }}"
owner: "{{ systemuserlist.archives.username }}"
group: media
loop_control:
label: "{{ item.name }}"
with_items:
- name: data
- name: config
tags:
- podgrab-dirs
- name: start podgrab container
docker_container:
name: podgrab
image: akhilrex/podgrab
user: "{{ systemuserlist.archives.uid }}:{{ grouplist.media.gid }}"
detach: true
pull: true
restart_policy: "no"
state: "{{ podgrab_container_state | default('started') }}"
container_default_behavior: compatibility
networks_cli_compatible: false
network_mode: bridgewithdns
networks:
- name: bridgewithdns
env:
BASE_URL: "https://{{ hass_url }}/podcasts"
CHECK_FREQUENCY: "240"
ports:
- "127.0.0.1:{{ podgrab_port }}:8080"
mounts:
- type: bind
source: "{{ systemuserlist.archives.home }}/podgrab/data"
target: /assets
- type: bind
source: "{{ systemuserlist.archives.home }}/podgrab/config"
target: /config
tags:
- podgrab-container
- docker-containers
register: podgrab_container

Binary file not shown.

View File

@ -0,0 +1,4 @@
---
- name: update grub
command: update-grub

View File

@ -0,0 +1,4 @@
---
- import_tasks: proxmox.yml
tags: proxmox

View File

@ -0,0 +1,74 @@
---
- name: add apt repo
apt_repository:
repo: "deb [arch=amd64] http://download.proxmox.com/{{ ansible_lsb.id | lower }}/pve {{ ansible_lsb.codename | lower}} pve-no-subscription"
state: present
update_cache: false
filename: /etc/apt/sources.list.d/pve-install-repo
tags:
- packages
- proxmox-repo
- name: copy gpg key for repo
copy:
src: proxmox-release-bullseye.gpg
dest: /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
owner: root
group: root
mode: "0644"
tags:
- packages
- name: apt-get update
apt:
update_cache: true
tags:
- packages
# NOTE: not in ansible yet, docs say to do a "apt full-upgrade" now.
- name: install proxmox kernel
apt:
name: pve-kernel-5.15
state: present
tags:
- packages
# NOTE: also not in ansible, reboot after this.
- name: install poxmox ve
apt:
name:
- proxmox-ve
#- postfix
- open-iscsi
state: present
tags:
- packages
- name: remove the enterprise repo that gets installed
file:
path: /etc/apt/sources.list.d/pve-enterprise.list
state: absent
tags:
- packages
register: enterprise_removed
- name: update apt if enterprise repo removed
apt:
update_cache: true
tags:
- packages
when: enterprise_removed.changed
- name: remove debian kernel and os-prober
apt:
name:
- linux-image-amd64
- 'linux-image-5.10*'
- os-prober
state: absent
notify: update grub
tags:
- packages

View File

@ -0,0 +1,242 @@
#
# Sample configuration file for the Samba suite for Debian GNU/Linux.
#
#
# This is the main Samba configuration file. You should read the
# smb.conf(5) manual page in order to understand the options listed
# here. Samba has a huge number of configurable options most of which
# are not shown in this example
#
# Some options that are often worth tuning have been included as
# commented-out examples in this file.
# - When such options are commented with ";", the proposed setting
# differs from the default Samba behaviour
# - When commented with "#", the proposed setting is the default
# behaviour of Samba but the option is considered important
# enough to be mentioned here
#
# NOTE: Whenever you modify this file you should run the command
# "testparm" to check that you have not made any basic syntactic
# errors.
#======================= Global Settings =======================
[global]
## Browsing/Identification ###
# Change this to the workgroup/NT-domain name your Samba server will part of
workgroup = WORKGROUP
# server string is the equivalent of the NT Description field
server string = %h server (Samba, Ubuntu)
#### Networking ####
# The specific set of interfaces / networks to bind to
# This can be either the interface name or an IP address/netmask;
# interface names are normally preferred
; interfaces = 127.0.0.0/8 eth0
# Only bind to the named interfaces and/or networks; you must use the
# 'interfaces' option above to use this.
# It is recommended that you enable this feature if your Samba machine is
# not protected by a firewall or is a firewall itself. However, this
# option cannot handle dynamic or non-broadcast interfaces correctly.
; bind interfaces only = yes
#### Debugging/Accounting ####
# This tells Samba to use a separate log file for each machine
# that connects
log file = /var/log/samba/log.%m
# Cap the size of the individual log files (in KiB).
max log size = 1000
# We want Samba to only log to /var/log/samba/log.{smbd,nmbd}.
# Append syslog@1 if you want important messages to be sent to syslog too.
logging = file
# Do something sensible when Samba crashes: mail the admin a backtrace
panic action = /usr/share/samba/panic-action %d
####### Authentication #######
# Server role. Defines in which mode Samba will operate. Possible
# values are "standalone server", "member server", "classic primary
# domain controller", "classic backup domain controller", "active
# directory domain controller".
#
# Most people will want "standalone server" or "member server".
# Running as "active directory domain controller" will require first
# running "samba-tool domain provision" to wipe databases and create a
# new domain.
server role = standalone server
obey pam restrictions = yes
# This boolean parameter controls whether Samba attempts to sync the Unix
# password with the SMB password when the encrypted SMB password in the
# passdb is changed.
unix password sync = yes
# For Unix password sync to work on a Debian GNU/Linux system, the following
# parameters must be set (thanks to Ian Kahan <<kahan@informatik.tu-muenchen.de> for
# sending the correct chat script for the passwd program in Debian Sarge).
passwd program = /usr/bin/passwd %u
passwd chat = *Enter\snew\s*\spassword:* %n\n *Retype\snew\s*\spassword:* %n\n *password\supdated\ssuccessfully* .
# This boolean controls whether PAM will be used for password changes
# when requested by an SMB client instead of the program listed in
# 'passwd program'. The default is 'no'.
pam password change = yes
# This option controls how unsuccessful authentication attempts are mapped
# to anonymous connections
map to guest = bad user
########## Domains ###########
#
# The following settings only takes effect if 'server role = classic
# primary domain controller', 'server role = classic backup domain controller'
# or 'domain logons' is set
#
# It specifies the location of the user's
# profile directory from the client point of view) The following
# required a [profiles] share to be setup on the samba server (see
# below)
; logon path = \\%N\profiles\%U
# Another common choice is storing the profile in the user's home directory
# (this is Samba's default)
# logon path = \\%N\%U\profile
# The following setting only takes effect if 'domain logons' is set
# It specifies the location of a user's home directory (from the client
# point of view)
; logon drive = H:
# logon home = \\%N\%U
# The following setting only takes effect if 'domain logons' is set
# It specifies the script to run during logon. The script must be stored
# in the [netlogon] share
# NOTE: Must be store in 'DOS' file format convention
; logon script = logon.cmd
# This allows Unix users to be created on the domain controller via the SAMR
# RPC pipe. The example command creates a user account with a disabled Unix
# password; please adapt to your needs
; add user script = /usr/sbin/adduser --quiet --disabled-password --gecos "" %u
# This allows machine accounts to be created on the domain controller via the
# SAMR RPC pipe.
# The following assumes a "machines" group exists on the system
; add machine script = /usr/sbin/useradd -g machines -c "%u machine account" -d /var/lib/samba -s /bin/false %u
# This allows Unix groups to be created on the domain controller via the SAMR
# RPC pipe.
; add group script = /usr/sbin/addgroup --force-badname %g
############ Misc ############
# Using the following line enables you to customise your configuration
# on a per machine basis. The %m gets replaced with the netbios name
# of the machine that is connecting
; include = /home/samba/etc/smb.conf.%m
# Some defaults for winbind (make sure you're not using the ranges
# for something else.)
; idmap config * : backend = tdb
; idmap config * : range = 3000-7999
; idmap config YOURDOMAINHERE : backend = tdb
; idmap config YOURDOMAINHERE : range = 100000-999999
; template shell = /bin/bash
# Setup usershare options to enable non-root users to share folders
# with the net usershare command.
# Maximum number of usershare. 0 means that usershare is disabled.
# usershare max shares = 100
# Allow users who've been granted usershare privileges to create
# public shares, not just authenticated ones
usershare allow guests = yes
#======================= Share Definitions =======================
# Un-comment the following (and tweak the other settings below to suit)
# to enable the default home directory shares. This will share each
# user's home directory as \\server\username
;[homes]
; comment = Home Directories
; browseable = no
# By default, the home directories are exported read-only. Change the
# next parameter to 'no' if you want to be able to write to them.
; read only = yes
# File creation mask is set to 0700 for security reasons. If you want to
# create files with group=rw permissions, set next parameter to 0775.
; create mask = 0700
# Directory creation mask is set to 0700 for security reasons. If you want to
# create dirs. with group=rw permissions, set next parameter to 0775.
; directory mask = 0700
# By default, \\server\username shares can be connected to by anyone
# with access to the samba server.
# Un-comment the following parameter to make sure that only "username"
# can connect to \\server\username
# This might need tweaking when using external authentication schemes
; valid users = %S
# Un-comment the following and create the netlogon directory for Domain Logons
# (you need to configure Samba to act as a domain controller too.)
;[netlogon]
; comment = Network Logon Service
; path = /home/samba/netlogon
; guest ok = yes
; read only = yes
# Un-comment the following and create the profiles directory to store
# users profiles (see the "logon path" option above)
# (you need to configure Samba to act as a domain controller too.)
# The path below should be writable by all users so that their
# profile directory may be created the first time they log on
;[profiles]
; comment = Users profiles
; path = /home/samba/profiles
; guest ok = no
; browseable = no
; create mask = 0600
; directory mask = 0700
[printers]
comment = All Printers
browseable = no
path = /var/spool/samba
printable = yes
guest ok = no
read only = yes
create mask = 0700
# Windows clients look for this share name as a source of downloadable
# printer drivers
[print$]
comment = Printer Drivers
path = /var/lib/samba/printers
browseable = yes
read only = yes
guest ok = no
# Uncomment to allow remote administration of Windows print drivers.
# You may need to replace 'lpadmin' with the name of the group your
# admin users are members of.
# Please note that you also need to set appropriate Unix permissions
# to the drivers directory for these users to have write rights in it
; write list = root, @lpadmin

View File

@ -0,0 +1,9 @@
---
- name: restart samba
service:
name: "{{ item }}"
state: restarted
with_items:
- smbd
- nmbd

View File

@ -0,0 +1,3 @@
---
- import_tasks: samba.yml
tags: samba

View File

@ -0,0 +1,49 @@
---
- name: create dirs
file:
state: directory
path: /srv/samba/{{ item.path }}
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
loop_control:
label: "{{ item.path }}"
with_items:
- path: ""
owner: root
group: root
mode: '0775'
- path: printers
owner: nobody
group: nogroup
mode: '0775'
- path: drivers
owner: nobody
group: nogroup
mode: '0775'
- path: drivers/hp1010
owner: nobody
group: nogroup
mode: '0775'
- name: install samba
apt:
state: latest
name:
- samba
- smbclient
tags:
- packages
- name: template smb.conf file
template:
src: smb.conf.j2
dest: /etc/samba/smb.conf
owner: root
group: root
mode: 0644
#validate: /usr/bin/testparm -s %s
notify: restart samba
tags:
- smb.conf

View File

@ -0,0 +1,270 @@
# distributed from ansible
#
# NOTE: Whenever you modify this file you should run the command
# "testparm" to check that you have not made any basic syntactic
# errors.
#======================= Global Settings =======================
[global]
workgroup = WORKGROUP
#netbios name = {{ inventory_hostname.split('.')[0].upper() }}
#server string = %h server (Samba)
server string = {{ ansible_inventory_hostname.split('.')[0] }}
interfaces = 127.0.0.0/8 {{ samba_interfaces | default([]) | join(" ") }}
bind interfaces only = yes
log file = /var/log/samba/log.%m
max log size = 1000
logging = file
panic action = /usr/share/samba/panic-action %d
# Possible values are "standalone server", "member server",
# "classic primary domain controller", "classic backup domain
# controller", "active directory domain controller".
server role = standalone server
obey pam restrictions = yes
unix password sync = yes
passwd program = /usr/bin/passwd %u
passwd chat = *Enter\snew\s*\spassword:* %n\n *Retype\snew\s*\spassword:* %n\n *password\supdated\ssuccessfully* .
pam password change = yes
map to guest = bad user
invalid users = root
unix charset = UTF-8
# possible values: auto, user, domain, ads
security = user
local master = yes
preferred master = yes
domain master = yes
# printing = CUPS
# printcap name = sambacap
# load printers = yes
# printcap = cups
####### Authentication #######
# Server role. Defines in which mode Samba will operate.
#
# Most people will want "standalone server" or "member server".
# Running as "active directory domain controller" will require first
# running "samba-tool domain provision" to wipe databases and create a
# new domain.
# server role = standalone server
#
# This boolean parameter controls whether Samba attempts to sync the Unix
# password with the SMB password when the encrypted SMB password in the
# passdb is changed.
unix password sync = no
# For Unix password sync to work on a Debian GNU/Linux system, the following
# parameters must be set (thanks to Ian Kahan <<kahan@informatik.tu-muenchen.de> for
# sending the correct chat script for the passwd program in Debian Sarge).
passwd program = /usr/bin/passwd %u
passwd chat = *Enter\snew\s*\spassword:* %n\n *Retype\snew\s*\spassword:* %n\n *password\supdated\ssuccessfully* .
# This boolean controls whether PAM will be used for password changes
# when requested by an SMB client instead of the program listed in
# 'passwd program'. The default is 'no'.
pam password change = no
########## Domains ###########
#
# The following settings only takes effect if 'server role = classic
# primary domain controller', 'server role = classic backup domain controller'
# or 'domain logons' is set
#
# It specifies the location of the user's
# profile directory from the client point of view) The following
# required a [profiles] share to be setup on the samba server (see
# below)
; logon path = \\%N\profiles\%U
# Another common choice is storing the profile in the user's home directory
# (this is Samba's default)
# logon path = \\%N\%U\profile
# The following setting only takes effect if 'domain logons' is set
# It specifies the location of a user's home directory (from the client
# point of view)
; logon drive = H:
# logon home = \\%N\%U
# The following setting only takes effect if 'domain logons' is set
# It specifies the script to run during logon. The script must be stored
# in the [netlogon] share
# NOTE: Must be store in 'DOS' file format convention
; logon script = logon.cmd
# This allows Unix users to be created on the domain controller via the SAMR
# RPC pipe. The example command creates a user account with a disabled Unix
# password; please adapt to your needs
; add user script = /usr/sbin/adduser --quiet --disabled-password --gecos "" %u
# This allows machine accounts to be created on the domain controller via the
# SAMR RPC pipe.
# The following assumes a "machines" group exists on the system
; add machine script = /usr/sbin/useradd -g machines -c "%u machine account" -d /var/lib/samba -s /bin/false %u
# This allows Unix groups to be created on the domain controller via the SAMR
# RPC pipe.
; add group script = /usr/sbin/addgroup --force-badname %g
############ Misc ############
# Using the following line enables you to customise your configuration
# on a per machine basis. The %m gets replaced with the netbios name
# of the machine that is connecting
; include = /home/samba/etc/smb.conf.%m
# Some defaults for winbind (make sure you're not using the ranges
# for something else.)
; idmap config * : backend = tdb
; idmap config * : range = 3000-7999
; idmap config YOURDOMAINHERE : backend = tdb
; idmap config YOURDOMAINHERE : range = 100000-999999
; template shell = /bin/bash
# Setup usershare options to enable non-root users to share folders
# with the net usershare command.
# Maximum number of usershare. 0 means that usershare is disabled.
# usershare max shares = 100
# Allow users who've been granted usershare privileges to create
# public shares, not just authenticated ones
usershare allow guests = no
#======================= Share Definitions =======================
# Un-comment the following (and tweak the other settings below to suit)
# to enable the default home directory shares. This will share each
# user's home directory as \\server\username
;[homes]
; comment = Home Directories
; browseable = no
# By default, the home directories are exported read-only. Change the
# next parameter to 'no' if you want to be able to write to them.
; read only = yes
# File creation mask is set to 0700 for security reasons. If you want to
# create files with group=rw permissions, set next parameter to 0775.
; create mask = 0700
# Directory creation mask is set to 0700 for security reasons. If you want to
# create dirs. with group=rw permissions, set next parameter to 0775.
; directory mask = 0700
# By default, \\server\username shares can be connected to by anyone
# with access to the samba server.
# Un-comment the following parameter to make sure that only "username"
# can connect to \\server\username
# This might need tweaking when using external authentication schemes
; valid users = %S
# Un-comment the following and create the netlogon directory for Domain Logons
# (you need to configure Samba to act as a domain controller too.)
;[netlogon]
; comment = Network Logon Service
; path = /home/samba/netlogon
; guest ok = yes
; read only = yes
# Un-comment the following and create the profiles directory to store
# users profiles (see the "logon path" option above)
# (you need to configure Samba to act as a domain controller too.)
# The path below should be writable by all users so that their
# profile directory may be created the first time they log on
;[profiles]
; comment = Users profiles
; path = /home/samba/profiles
; guest ok = no
; browseable = no
; create mask = 0600
; directory mask = 0700
# [music]
# comment = music files
# path = /deadspace/music
# guest ok = yes
# browseable = yes
# read only = yes
# [drivers]
# comment = printer files
# path = /srv/samba/drivers
# guest ok = yes
# browsable = yes
# read only = no
# writable = yes
# [printers]
# comment = All Printers
# path = /var/spool/samba
# guest ok = yes
# printable = yes
# Windows clients look for this share name as a source of downloadable
# printer drivers
[print$]
comment = Printer Drivers
path = /srv/samba/printers
guest ok = yes
browseable = yes
read only = no
writable = yes
# [sambaprint]
# comment = "raw printer for samba"
# path = /var/spool/samba
# guest ok = yes
# #guest only = yes
# writable = no
# printable = yes
# printer name = samba
# #force user = nobody
# [shredder]
# comment = "in the study"
# path = /var/spool/samba
# guest ok = yes
# writable = no
# printable = yes
# printer name = hp-LaserJet-1010
#guest only = yes
#force user = nobody
# [sambajet]
# path = /var/spool/samba
# guest ok = yes
# guest only = yes
# force user = nobody
# printable = yes
# browseable = yes
# Uncomment to allow remote administration of Windows print drivers.
# You may need to replace 'lpadmin' with the name of the group your
# admin users are members of.
# Please note that you also need to set appropriate Unix permissions
# to the drivers directory for these users to have write rights in it
; write list = root, @lpadmin

Binary file not shown.

View File

@ -36,11 +36,12 @@
- name: install humanize - name: install humanize
pip: pip:
name: humanize name: humanize
state: latest state: present
executable: pip3 executable: pip3
tags: tags:
- pip - pip
- packages - packages
- pip-latest
- name: template vnstat script - name: template vnstat script
template: template:
@ -63,32 +64,45 @@
# with_items: "{{ systemuserlist.telegraf.groups }}" # with_items: "{{ systemuserlist.telegraf.groups }}"
# when: 'item in etc_groups.stdout_lines' # when: 'item in etc_groups.stdout_lines'
- name: add apt key for influxdata to install telegraf # sudo gpg --no-default-keyring --keyring /usr/share/keyrings/influxdb-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys D8FF8E1F7DF8B07E
apt_key: # - name: add apt key for influxdata to install telegraf
url: https://repos.influxdata.com/influxdb.key # apt_key:
state: present # url: https://repos.influxdata.com/influxdb.key
tags: # state: present
- packages # tags:
# - packages
- name: set distro to debian if raspbian - name: set distro to debian if raspbian
set_fact: set_fact:
distro: debian distro: debian
when: ansible_lsb.id == "Raspbian" when: ansible_lsb.id == "Raspbian"
tags:
- influx-repo
- packages
- name: otherwise use lsb id - name: otherwise use lsb id
set_fact: set_fact:
distro: "{{ ansible_lsb.id }}" distro: "{{ ansible_lsb.id }}"
when: ansible_lsb.id != "Raspbian" when: ansible_lsb.id != "Raspbian"
tags:
- influx-repo
- packages
- name: set codename to bullseye if bookworm - name: set codename to bullseye if bookworm
set_fact: set_fact:
codename: bullseye codename: bullseye
when: ansible_lsb.codename == "bookworm" when: ansible_lsb.codename == "bookworm"
tags:
- influx-repo
- packages
- name: set codename to focal if hirsute - name: set codename to focal if hirsute
set_fact: set_fact:
codename: focal codename: focal
when: ansible_lsb.codename == "hirsute" when: ansible_lsb.codename == "hirsute"
tags:
- influx-repo
- packages
- name: otherwise use lsb codename - name: otherwise use lsb codename
set_fact: set_fact:
@ -96,14 +110,45 @@
when: when:
- ansible_lsb.codename != "bookworm" - ansible_lsb.codename != "bookworm"
- ansible_lsb.codename != "hirsute" - ansible_lsb.codename != "hirsute"
tags:
- influx-repo
- packages
- name: get influxdb key
get_url:
url: https://repos.influxdata.com/influxdb.key
dest: /etc/apt/trusted.gpg.d/influxdb.asc
tags:
- influx-repo
- packages
- name: get dearmored influxdb key (the key downloaded in the other task doesnt work)
command:
cmd: gpg --no-default-keyring --keyring /usr/share/keyrings/influxdb-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys D8FF8E1F7DF8B07E
creates: /usr/share/keyrings/influxdb-archive-keyring.gpg
tags:
- influx-repo
- packages
- name: remove repo definition without signed-by
apt_repository:
repo: deb https://repos.influxdata.com/{{ distro | lower }} {{ codename }} stable
state: absent
update_cache: false
tags:
- packages
- influx-repo
- name: add repo for influxdata to install telegraf - name: add repo for influxdata to install telegraf
apt_repository: apt_repository:
repo: deb https://repos.influxdata.com/{{ distro | lower }} {{ codename }} stable repo: deb [signed-by=/usr/share/keyrings/influxdb-archive-keyring.gpg] https://repos.influxdata.com/{{ distro | lower }} {{ codename }} stable
#repo: deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.asc] https://repos.influxdata.com/{{ distro | lower }} {{ codename }} stable
state: present state: present
update_cache: false
when: not skip_apt|default(false) when: not skip_apt|default(false)
tags: tags:
- packages - packages
- influx-repo
- name: install telegraf - name: install telegraf
apt: apt:

View File

@ -30,12 +30,20 @@
- name: create dir structure - name: create dir structure
file: file:
path: "{{ unifi_root }}"
state: directory state: directory
mode: 0750 path: "{{ unifi_root }}/{{ item.name }}"
owner: unifi owner: "{{ systemuserlist.unifi.uid }}"
group: unifi group: "{{ systemuserlist.unifi.gid }}"
recurse: no mode: "{{ item.mode | default('0770') }}"
tags:
- unifi-dirs
loop_control:
label: "{{ item.name }}"
with_items:
- name: data
- name: log
- name: run
mode: "0775"
# to adopt # to adopt
# ssh unifi-ap # ssh unifi-ap
@ -74,20 +82,32 @@
state: started state: started
init: true init: true
pull: true pull: true
#user: "{{ systemuserlist['unifi']['uid'] }}:{{ systemuserlist['unifi']['gid'] }}" user: "{{ systemuserlist['unifi']['uid'] }}:{{ systemuserlist['unifi']['gid'] }}"
env: env:
TZ: "Europe/Berlin" TZ: "Europe/Berlin"
RUNAS_UID0: "false" RUNAS_UID0: "false"
UNIFI_UID: "{{ systemuserlist['unifi']['uid'] }}" UNIFI_UID: "{{ systemuserlist['unifi']['uid'] }}"
UNIFI_GID: "{{ systemuserlist['unifi']['gid'] }}" UNIFI_GID: "{{ systemuserlist['unifi']['gid'] }}"
UNIFI_HTTPS_PORT: "443"
container_default_behavior: compatibility container_default_behavior: compatibility
volumes: network_mode: bridgewithdns
- "{{ unifi_root }}:/unifi" networks:
- name: bridgewithdns
ipv4_address: "{{ bridgewithdns.unifi }}"
mounts:
- type: bind
source: "{{ unifi_root }}/data"
target: "/unifi/data"
- type: bind
source: "{{ unifi_root }}/run"
target: "/unifi/run"
- type: bind
source: "{{ unifi_root }}/log"
target: "/unifi/log"
ports: ports:
- "3478:3478/udp" # STUN - "3478:3478/udp" # STUN
- "6789:6789/tcp" # Speed test - "6789:6789/tcp" # Speed test
- "8080:8080/tcp" # Device/ controller comm. - "8080:8080/tcp" # Device/ controller comm.
- "127.0.0.1:8443:8443/tcp" # Controller GUI/API as seen in a web browser
- "10001:10001/udp" # AP discovery - "10001:10001/udp" # AP discovery
tags: tags:
- unifi-container - unifi-container

View File

@ -2,30 +2,40 @@
server { server {
listen 443 ssl; listen 443 ssl;
listen [::]:443 ssl; # listen for ipv6 server_name {{ unifi_url }} {{ unifi_url2 }};
server_name {{ unifi_url }}; ssl_session_timeout 5m;
ssl_certificate /usr/local/etc/certs/{{ domain }}/fullchain.pem;
ssl_certificate_key /usr/local/etc/certs/{{ domain }}/privkey.pem;
location /wss/ { include /etc/nginx/authelia_internal.conf;
proxy_pass https://localhost:8443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
location /inform { location /wss/ {
proxy_pass http://localhost:8080; #include /etc/nginx/require_auth.conf;
proxy_pass https://{{ bridgewithdns.unifi }}:443;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
location /inform {
proxy_pass http://localhost:8080;
proxy_redirect off;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
}
} location / {
#include /etc/nginx/require_auth.conf;
proxy_pass https://{{ bridgewithdns.unifi }}:443;
location / {
proxy_pass https://localhost:8443/;
proxy_redirect off; proxy_redirect off;
proxy_set_header Host $host; proxy_set_header Host $host;
@ -34,16 +44,11 @@ server {
proxy_set_header X-Forwarded-Host $server_name; proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
access_log /var/log/nginx/access_{{ unifi_url }}.log main; access_log /var/log/nginx/access_{{ unifi_url }}.log main;
error_log /var/log/nginx/error_{{ unifi_url }}.log warn; error_log /var/log/nginx/error_{{ unifi_url }}.log warn;
ssl_session_timeout 5m;
ssl_certificate /usr/local/etc/certs/{{ domain }}/fullchain.pem;
ssl_certificate_key /usr/local/etc/certs/{{ domain }}/privkey.pem;
add_header Referrer-Policy "no-referrer" always; add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always; add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always; add_header X-Download-Options "noopen" always;

View File

@ -189,20 +189,20 @@
- vaultwarden-ldap - vaultwarden-ldap
- docker-containers - docker-containers
# - name: install certs - name: install certs
# copy: copy:
# src: "/usr/local/etc/letsencrypt/live/{{ item }}" src: "/usr/local/etc/letsencrypt/live/{{ item }}"
# dest: "/usr/local/etc/certs/" dest: "/usr/local/etc/certs/"
# owner: root owner: root
# group: root group: root
# mode: 0755 mode: 0755
# tags: tags:
# - letsencrypt-certs - letsencrypt-certs
# notify: reload nginx notify: reload nginx
# vars: vars:
# prediff_cmd: echo prediff_cmd: echo
# with_items: with_items:
# - "{{ vaultwarden_url }}" - "{{ vaultwarden_url }}"
- name: template nginx vhost for vaultwarden - name: template nginx vhost for vaultwarden
template: template:

View File

@ -9,6 +9,7 @@ upstream {{ item.container_name }}-ws {
server {{ bridgewithdns[item.container_name] }}:3012; server {{ bridgewithdns[item.container_name] }}:3012;
keepalive 2; keepalive 2;
} }
server { server {
listen 443 ssl http2; listen 443 ssl http2;
{% if inventory_hostname in wg_clients -%} {% if inventory_hostname in wg_clients -%}
@ -21,12 +22,6 @@ server {
include /etc/nginx/authelia_internal.conf; include /etc/nginx/authelia_internal.conf;
include /etc/nginx/sudo-known.conf; include /etc/nginx/sudo-known.conf;
# Specify SSL Config when needed
#ssl_certificate /path/to/certificate/letsencrypt/live/vaultwarden.example.tld/fullchain.pem;
#ssl_certificate_key /path/to/certificate/letsencrypt/live/vaultwarden.example.tld/privkey.pem;
#ssl_trusted_certificate /path/to/certificate/letsencrypt/live/vaultwarden.example.tld/fullchain.pem;
client_max_body_size 128M; client_max_body_size 128M;
location / { location / {
@ -85,8 +80,8 @@ server {
error_log /var/log/nginx/error_{{ vaultwarden_url }}.log warn; error_log /var/log/nginx/error_{{ vaultwarden_url }}.log warn;
ssl_session_timeout 5m; ssl_session_timeout 5m;
ssl_certificate /usr/local/etc/certs/{{ domain }}/fullchain.pem; ssl_certificate /usr/local/etc/certs/{{ vaultwarden_url }}/fullchain.pem;
ssl_certificate_key /usr/local/etc/certs/{{ domain }}/privkey.pem; ssl_certificate_key /usr/local/etc/certs/{{ vaultwarden_url }}/privkey.pem;
fastcgi_hide_header X-Powered-By; fastcgi_hide_header X-Powered-By;
} }

View File

@ -7,6 +7,10 @@ iptables -t nat -D POSTROUTING -s {{ wireguard_cidr }} -o {{ ansible_default_ipv
iptables -t nat -A POSTROUTING -s {{ wireguard_cidr }} -o {{ ansible_default_ipv4.interface }} -j MASQUERADE iptables -t nat -A POSTROUTING -s {{ wireguard_cidr }} -o {{ ansible_default_ipv4.interface }} -j MASQUERADE
{% endif %} {% endif %}
{% if wg_clients[inventory_hostname].multicast_enabled|default(false) -%}
ip link set multicast on dev wg0
{% endif -%}
{% if wg_post_up_dns_route|default(false) %} {% if wg_post_up_dns_route|default(false) %}
sleep 5 sleep 5

View File

@ -44,11 +44,13 @@ server {
include /etc/nginx/well-known.conf; include /etc/nginx/well-known.conf;
include /etc/nginx/sudo-known.conf; include /etc/nginx/sudo-known.conf;
location /{{ www_stream }} {
proxy_pass https://{{ owntone_url }}/{{ www_stream_owntone }};
}
location {{ coolcats }} { location {{ coolcats }} {
include /etc/nginx/require_auth.conf; include /etc/nginx/require_auth.conf;
autoindex on; autoindex on;
} }
location ~* ^.+\.json$ { location ~* ^.+\.json$ {
add_header Content-Type application/json; add_header Content-Type application/json;
} }