jellyfin version bump, hass with nginx and network_mode=host, more settings in yaml, basic ufw, and long-awaited readonly grafana-proxy #11

Merged
ben merged 12 commits from dev into main 2022-10-02 21:41:37 +00:00
12 changed files with 403 additions and 34 deletions

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,18 @@
---
- name: reload nginx
service:
name: nginx
state: reloaded
- name: restart nginx
service:
name: nginx
state: restarted
- name: restart hass container
docker_container:
name: hass
state: started
restart: true
when: not hass_container.changed

View File

@ -1,7 +1,70 @@
--- ---
- name: allow ssh
ufw:
rule: allow
to_port: "22"
direction: in
state: enabled
tags:
- ufw
- name: allow loopback
ufw:
rule: allow
interface: lo
direction: in
state: enabled
tags:
- ufw
- name: default policy
ufw:
policy: allow
state: enabled
tags:
- ufw
- name: deny hass cloud port stuff
ufw:
# drops packets
rule: deny
to_port: '42161'
direction: in
state: enabled
tags:
- ufw
- name: reject zwavejs ws and hass ports (loopback only)
ufw:
# connection refused
rule: reject
to_port: "{{ item }}"
direction: in
state: enabled
with_items:
- "8091"
- "8123"
tags:
- ufw
# hass + zwave # hass + zwave
- name: install certs
copy:
src: "/usr/local/etc/letsencrypt/live/{{ item }}"
dest: "/usr/local/etc/certs/"
owner: root
group: root
mode: 0755
tags:
- letsencrypt-certs
notify: reload nginx
vars:
prediff_cmd: echo
with_items:
- "{{ hass_url }}"
- name: create dir structure - name: create dir structure
file: file:
path: "{{ systemuserlist.hass.home }}/{{ item }}" path: "{{ systemuserlist.hass.home }}/{{ item }}"
@ -11,37 +74,67 @@
group: hass group: hass
with_items: with_items:
- home-assistant - home-assistant
- home-assistant/config
- home-assistant/.config
- home-assistant/media
- zwavejs - zwavejs
- zwavejs/app
- zwavejs/app/store
- name: home assistant main configuration.yaml
template:
src: configuration.yaml.j2
dest: "{{ systemuserlist.hass.home }}/home-assistant/config/configuration.yaml"
owner: "{{ systemuserlist.hass.uid }}"
group: "{{ systemuserlist.hass.gid }}"
mode: 0644
notify: restart hass container
- name: home assistant secrets file
template:
src: secrets.yaml.j2
dest: "{{ systemuserlist.hass.home }}/home-assistant/config/secrets.yaml"
owner: "{{ systemuserlist.hass.uid }}"
group: "{{ systemuserlist.hass.gid }}"
mode: 0644
no_log: true
notify: restart hass container
# docker run --run -it -p 8091:8091 -p 3000:3000 --network # docker run --run -it -p 8091:8091 -p 3000:3000 --network
#bridgewithdns --device /dev/ttyACM0:/dev/zwave -v #bridgewithdns --device /dev/ttyACM0:/dev/zwave -v
# /home/ben/zwavejs:/usr/src/app/store zwavejs/zwavejs2mqtt:latest # /home/ben/zwavejs:/usr/src/app/store zwavejs/zwavejs2mqtt:latest
# the name has changed to zwave-js-ui:
# https://github.com/zwave-js/zwave-js-ui/pull/2650
- name: start zwavejs container - name: start zwavejs container
docker_container: docker_container:
name: zwavejs name: zwavejs
image: zwavejs/zwavejs2mqtt:latest image: zwavejs/zwave-js-ui:latest
detach: true detach: true
pull: true pull: true
restart_policy: "unless-stopped" restart_policy: "unless-stopped"
state: "{{ container_state | default('started') }}" state: "{{ container_state | default('started') }}"
container_default_behavior: compatibility container_default_behavior: compatibility
user: "{{ systemuserlist.hass.uid }}:dialout" user: "{{ systemuserlist.hass.uid }}:dialout"
devices:
- "/dev/serial/by-id/usb-0658_0200-if00:/dev/zwave:rwm"
ports:
# - "127.0.0.1:3000:3000"
# - "127.0.0.1:8091:8091"
- "3000:3000"
- "8091:8091"
mounts:
- type: bind
source: /var/lib/hass/zwavejs
target: /usr/src/app/store
networks_cli_compatible: false networks_cli_compatible: false
network_mode: bridgewithdns network_mode: bridgewithdns
networks: networks:
- name: bridgewithdns - name: bridgewithdns
ipv4_address: "{{ bridgewithdns.zwavejs }}"
devices:
- "/dev/serial/by-id/usb-0658_0200-if00:/dev/zwave:rwm"
ports:
# ws for hass<->zwavejs
# hass is configured to use localhost:3000 to talk to zwavejs, but can
# also use {{ bridgewithdns.zwavejs }}, but hass is very fragile and
# you have to manually work around it if it cant access zwaevjs because the
# ip/dns changed or the container moved networks. it is not configured in a
# config file either. so using localhost is the least fragile strategy.
- "127.0.0.1:3000:3000"
mounts:
- type: bind
source: "{{ systemuserlist.hass.home }}/zwavejs/app/store"
target: /usr/src/app/store
tags: tags:
- zwavejs - zwavejs
- zwavejs-container - zwavejs-container
@ -62,20 +155,48 @@
state: "{{ container_state | default('started') }}" state: "{{ container_state | default('started') }}"
container_default_behavior: compatibility container_default_behavior: compatibility
user: "{{ systemuserlist.hass.uid }}:{{ systemuserlist.hass.gid }}" user: "{{ systemuserlist.hass.uid }}:{{ systemuserlist.hass.gid }}"
network_mode: host
env: env:
TZ: "Etc/UTC" TZ: "Etc/UTC"
ports:
- "8123:8123"
mounts: mounts:
- type: bind - type: bind
source: /var/lib/hass/home-assistant source: "{{ systemuserlist.hass.home }}/home-assistant/config"
target: /config target: /config
networks_cli_compatible: false - type: bind
network_mode: bridgewithdns source: "{{ systemuserlist.hass.home }}/home-assistant/.config"
networks: target: /.config
- name: bridgewithdns - type: bind
source: "{{ systemuserlist.hass.home }}/home-assistant/media"
target: /usr/var/media
tags: tags:
- home-assistant - home-assistant
- home-assistant-container - home-assistant-container
- hass-container - hass-container
- docker-containers - docker-containers
register: hass_container
- name: template nginx vhost for hass
template:
src: 01-hass.j2
dest: /etc/nginx/sites-enabled/01-hass
owner: root
group: root
mode: 0644
tags:
- nginx
- hass-nginx
- zwave-nginx
notify: restart nginx
# different task because its better for the hass config to restart nginx
- name: template nginx vhost for grafana-proxy
template:
src: 01-grafana-proxy.j2
dest: /etc/nginx/sites-enabled/01-grafana
owner: root
group: root
mode: 0644
tags:
- nginx
- grafana-proxy-nginx
notify: reload nginx

View File

@ -0,0 +1,28 @@
server {
listen 443 ssl http2;
include /etc/nginx/sudo-known.conf;
server_name {{ hass_grafana_proxy_url }};
location / {
proxy_set_header Host {{ grafana_url }};
proxy_set_header Authorization "{{ hass_grafana_proxy_auth_header }}";
{# strip header from requst #}
{# proxy_set_header Referer ""; #}
proxy_pass https://{{ hass_grafana_proxy_pass }};
}
access_log /var/log/nginx/access_{{ hass_grafana_proxy_url }}.log main;
error_log /var/log/nginx/error_{{ hass_grafana_proxy_url }}.log warn;
ssl_session_timeout 5m;
ssl_certificate /usr/local/etc/certs/{{ domain }}/fullchain.pem;
ssl_certificate_key /usr/local/etc/certs/{{ domain }}/privkey.pem;
add_header Content-Security-Policy "frame-ancestors 'self' {{ cast_refer }} https://*.{{ domain }};" always;
fastcgi_hide_header X-Powered-By;
}

View File

@ -0,0 +1,65 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' upgrade;
}
server {
listen 443 ssl http2;
{% if inventory_hostname in wg_clients -%}
listen {{ wg_clients[inventory_hostname].ip }}:443 ssl http2;
{% endif -%}
include /etc/nginx/authelia_internal.conf;
include listen-proxy-protocol.conf;
include /etc/nginx/sudo-known.conf;
server_name {{ hass_url }};
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8123;
}
location = {{ nginx_zwavejs_path }} {
# zwavejs needs to be accessed with a trailing / to respond.
#
# temporary redirects dont get remembered by the browser
# and redirect issues are no fun
return 302 https://{{ hass_url }}{{ nginx_zwavejs_path }}/;
}
location {{ nginx_zwavejs_path }}/ {
include /etc/nginx/require_auth.conf;
proxy_set_header X-External-Path {{ nginx_zwavejs_path }};
rewrite ^ $request_uri;
rewrite '^{{ nginx_zwavejs_path }}(/.*)$' $1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_pass http://{{ bridgewithdns.zwavejs }}:8091$uri;
# for the special dashboard
# https://zwave-js.github.io/zwave-js-ui/#/usage/reverse-proxy?id=using-an-http-header
# proxy_set_header X-External-Path $http_x_ingress_path;
}
access_log /var/log/nginx/access_{{ hass_url }}.log main;
error_log /var/log/nginx/error_{{ hass_url }}.log warn;
ssl_session_timeout 5m;
ssl_certificate /usr/local/etc/certs/{{ hass_url }}/fullchain.pem;
ssl_certificate_key /usr/local/etc/certs/{{ hass_url }}/privkey.pem;
fastcgi_hide_header X-Powered-By;
}

View File

@ -0,0 +1,123 @@
# Loads default set of integrations. Icluding the cloud crap. Do remove.
# havent gotten it to work wthough, hass doesnt load properly
default_config:
#
# the dict contains this:
# https://github.com/home-assistant/core/blob/dev/homeassistant/components/default_config/manifest.json
#
# the cloud thing clistens on (at least) port 42161.
# since we need to run in host mode, and dont have network/port isolation by default
# we'll kill this stuff.
#
# for some reason the settings dialog for it is still at /config/cloud/login, but
# we arent listening on port 42161 anymore (yay!). (but hass doesnt start)
#
# for now we just block the ports with iptables/ufw
#
# config:
# application_credentials:
# automation:
# bluetooth:
# # there is no cloud, just other peoples computers..
# #cloud:
# counter:
# dhcp:
# energy:
# frontend:
# hardware:
# history:
# homeassistant_alerts:
# input_boolean:
# input_button:
# input_datetime:
# input_number:
# input_select:
# input_text:
# logbook:
# map:
# media_source:
# mobile_app:
# my:
# network:
# person:
# scene:
# schedule:
# script:
# ssdp:
# # kind of undocumented, but didnt help
# stream:
# sun:
# system_health:
# tag:
# timer:
# usb:
# webhook:
# zeroconf:
# zone:
# Text to speech
tts:
- platform: google_translate
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
calendar:
- platform: caldav
username: !secret caldav_user
password: !secret caldav_passwd
url: !secret caldav_url
http:
# container runs with network_mode=host, so no network isolation. the docs say to not
# do this, and it doesnt work as expected either.
# using ufw/iptables for now....
#
#server_host: 127.0.0.1
trusted_proxies:
- 127.0.0.1
- {{ bridgewithdns.host }}
- {{ bridgewithdns_cidr }}
use_x_forwarded_for: true
homeassistant:
name: Home
currency: EUR
unit_system: metric
time_zone: "Europe/Berlin"
external_url: https://{{ hass_url }}
internal_url: https://{{ hass_url }}
allowlist_external_dirs:
- "/usr/var/media"
allowlist_external_urls:
- "https://{{ static_url }}"
- "https://{{ hass_notflix_url }}"
media_dirs:
media: "/usr/var/media"
sensor:
# https://www.home-assistant.io/integrations/dwd_weather_warnings/
# https://www.dwd.de/DE/leistungen/opendata/help/warnungen/warning_codes_pdf.pdf?__blob=publicationFile&v=5
# https://www.dwd.de/DE/leistungen/opendata/help/warnungen/cap_warncellids_csv.html
# 111000000;Berlin;DE300;Berlin;BXX
# 711000002;Berlin - Friedrichshain-Kreuzberg;;B-Friedrh./Kbg.;BXB
# 711000003;Berlin - Pankow;;B-Pankow;BXG
# 711000011;Berlin - Lichtenberg;;B-Lichtenberg;BXC
# 811000000;Stadt Berlin;;Berlin;
# 911000000;Berlin;;Land Berlin;LBE
# 911100000;Berlin;;Berlin;BXZ
# 995000000;Brandenburg/Berlin;;Berlin/Brandenb;DWPD
- platform: dwd_weather_warnings
# Berlin - Friedrichshain-Kreuzberg
region_name: 711000002
- platform: dwd_weather_warnings
# Berlin - Pankow
region_name: 711000003
- platform: dwd_weather_warnings
# Stadt Berlin
region_name: 811000000

View File

@ -0,0 +1,8 @@
# Use this file to store secrets like usernames and passwords.
# Learn more at https://www.home-assistant.io/docs/configuration/secrets/
some_password: welcome
caldav_user: "{{ hass_caldav.user }}"
caldav_passwd: "{{ hass_caldav.passwd }}"
caldav_url: https://{{ nextcloud_url }}/remote.php/dav/principals/users/{{ hass_caldav.user }}/

View File

@ -87,7 +87,7 @@
- name: start container - name: start container
docker_container: docker_container:
name: jellyfin name: jellyfin
image: jellyfin/jellyfin:10.8.4 image: jellyfin/jellyfin:10.8.5
auto_remove: no auto_remove: no
detach: yes detach: yes
pull: yes pull: yes

View File

@ -66,18 +66,6 @@ server {
return 403; return 403;
} }
location /health {
allow 127.0.0.1;
allow {{ my_public_ips[inventory_hostname] }}/32;
allow {{ my_public_ips[ansible_control_host] }}/32;
allow {{ wireguard_cidr }};
deny all;
proxy_pass http://127.0.0.1:{{ jellyfin_port }};
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
# so /web/#!/ works instead of having to go to /web/index.html/#!/ # so /web/#!/ works instead of having to go to /web/index.html/#!/
location = /web/ { location = /web/ {
proxy_pass http://127.0.0.1:{{ jellyfin_port }}/web/index.html; proxy_pass http://127.0.0.1:{{ jellyfin_port }}/web/index.html;
@ -102,6 +90,20 @@ server {
proxy_set_header X-Forwarded-Host $http_host; proxy_set_header X-Forwarded-Host $http_host;
} }
{% for item in ["/health", "/GetUtcTime"] -%}
location {{ item }} {
allow 127.0.0.1;
allow {{ my_public_ips[inventory_hostname] }}/32;
allow {{ my_public_ips[ansible_control_host] }}/32;
allow {{ wireguard_cidr }};
deny all;
proxy_pass http://127.0.0.1:{{ jellyfin_port }};
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
{% endfor %}
{# location /videos/ { {# location /videos/ {
# # cache video streams: https://jellyfin.org/docs/general/networking/nginx.html#cache-video-streams # # cache video streams: https://jellyfin.org/docs/general/networking/nginx.html#cache-video-streams
# proxy_cache cWEB; # proxy_cache cWEB;
@ -116,6 +118,7 @@ server {
# proxy_cache_key "{{ jellyfin_url }}$uri?MediaSourceId=$arg_MediaSourceId&VideoCodec=$arg_VideoCodec&AudioCodec=$arg_AudioCodec&AudioStreamIndex=$arg_AudioStreamIndex&VideoBitrate=$arg_VideoBitrate&AudioBitrate=$arg_AudioBitrate&SubtitleMethod=$arg_SubtitleMethod&TranscodingMaxAudioChannels=$arg_TranscodingMaxAudioChannels&RequireAvc=$arg_RequireAvc&SegmentContainer=$arg_SegmentContainer&MinSegments=$arg_MinSegments&BreakOnNonKeyFrames=$arg_BreakOnNonKeyFrames&h264-profile=$h264Profile&h264-level=$h264Level"; # proxy_cache_key "{{ jellyfin_url }}$uri?MediaSourceId=$arg_MediaSourceId&VideoCodec=$arg_VideoCodec&AudioCodec=$arg_AudioCodec&AudioStreamIndex=$arg_AudioStreamIndex&VideoBitrate=$arg_VideoBitrate&AudioBitrate=$arg_AudioBitrate&SubtitleMethod=$arg_SubtitleMethod&TranscodingMaxAudioChannels=$arg_TranscodingMaxAudioChannels&RequireAvc=$arg_RequireAvc&SegmentContainer=$arg_SegmentContainer&MinSegments=$arg_MinSegments&BreakOnNonKeyFrames=$arg_BreakOnNonKeyFrames&h264-profile=$h264Profile&h264-level=$h264Level";
# proxy_cache_valid 200 301 302 30d; # proxy_cache_valid 200 301 302 30d;
# } #} # } #}
} }
server { server {

View File

@ -22,6 +22,7 @@
dest: /etc/nginx/sites-enabled/01-grafana dest: /etc/nginx/sites-enabled/01-grafana
tags: tags:
- grafana-config - grafana-config
- grafana-nginx
- nginx-grafana - nginx-grafana
- nginx - nginx
notify: reload nginx notify: reload nginx

View File

@ -51,6 +51,7 @@ server {
add_header Referrer-Policy "no-referrer" always; add_header Referrer-Policy "no-referrer" always;
add_header X-Download-Options "noopen" always; add_header X-Download-Options "noopen" always;
add_header X-Robots-Tag "none" always; add_header X-Robots-Tag "none" always;
add_header Content-Security-Policy "frame-ancestors 'self' https://*.{{ domain }};" always;
fastcgi_hide_header X-Powered-By; fastcgi_hide_header X-Powered-By;
} }

View File

@ -2,7 +2,7 @@
server { server {
server_name {{ server_names | join(" ") }}; server_name {%- for d in server_names %} www.{{ d }} {{ d }}{% endfor %};
{% if inventory_hostname in wg_clients -%} {% if inventory_hostname in wg_clients -%}
listen {{ wg_clients[inventory_hostname].ip }}:443 ssl http2; listen {{ wg_clients[inventory_hostname].ip }}:443 ssl http2;
@ -53,7 +53,7 @@ server {
add_header Referrer-Policy "no-referrer" always; add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always; add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always; add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always; add_header Content-Security-Policy "frame-ancestors 'self' {{ cast_refer }} {%- for d in server_names %} https://*.{{ d }}{% endfor %};" always;
add_header X-Permitted-Cross-Domain-Policies "none" always; add_header X-Permitted-Cross-Domain-Policies "none" always;
# add_header X-Robots-Tag "none" always; # add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always; add_header X-XSS-Protection "1; mode=block" always;