summaryrefslogtreecommitdiff
path: root/playbooks/roles/outbound
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/roles/outbound')
-rw-r--r--playbooks/roles/outbound/tasks/main.yml160
-rw-r--r--playbooks/roles/outbound/templates/headscale/config/config.yaml7
-rw-r--r--playbooks/roles/outbound/templates/headscale/docker-compose.yml2
-rw-r--r--playbooks/roles/outbound/templates/proxy/docker-compose.yml53
-rwxr-xr-xplaybooks/roles/outbound/templates/proxy/headscale/headscale-client.sh14
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/conf.d/bin.conf (renamed from playbooks/roles/outbound/templates/proxy/sites-enabled/bin.conf)0
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/conf.d/default.conf (renamed from playbooks/roles/outbound/templates/proxy/sites-enabled/default.conf)4
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/conf.d/idm.conf (renamed from playbooks/roles/outbound/templates/proxy/sites-enabled/idm.conf)0
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/conf.d/kanban.conf (renamed from playbooks/roles/outbound/templates/proxy/sites-enabled/kanban.conf)0
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/conf.d/mail.conf (renamed from playbooks/roles/outbound/templates/proxy/sites-enabled/mail.conf)0
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/conf.d/notes.conf (renamed from playbooks/roles/outbound/templates/proxy/sites-enabled/notes.conf)0
-rwxr-xr-xplaybooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh56
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/nginx.conf30
-rw-r--r--playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf56
-rwxr-xr-xplaybooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh33
-rw-r--r--playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf38
16 files changed, 304 insertions, 149 deletions
diff --git a/playbooks/roles/outbound/tasks/main.yml b/playbooks/roles/outbound/tasks/main.yml
index 107e71a..45540b4 100644
--- a/playbooks/roles/outbound/tasks/main.yml
+++ b/playbooks/roles/outbound/tasks/main.yml
@@ -1,119 +1,83 @@
---
-# Headscale setup
-- name: Build headscale compose dirs and files
- ansible.builtin.file:
- state: directory
- dest: '/etc/docker/compose/headscale/{{ item.path }}'
- with_filetree: '../templates/headscale'
- when: item.state == 'directory'
-
-- name: Build headscale compose templates
- ansible.builtin.template:
- src: '{{ item.src }}'
- dest: '/etc/docker/compose/headscale/{{ item.path }}'
- with_filetree: '../templates/headscale'
- when: item.state == 'file'
-
-- name: Daemon-reload and enable headscale
- ansible.builtin.systemd_service:
+- name: Deploy Headscale
+ ansible.builtin.import_tasks: manage-docker-compose-service.yml
+ vars:
+ service_name: headscale
+ template_render_dir: "../templates/headscale"
+ service_destination_dir: "{{ headscale_base }}"
state: started
- enabled: true
- daemon_reload: true
- name: docker-compose@headscale
-
-- name: Perform rollout for headscale
- ansible.builtin.shell:
- cmd: "/usr/local/bin/docker-rollout rollout -f docker-compose.yml headscale"
- chdir: "/etc/docker/compose/headscale"
-
-# User API Key
-- name: Generate API key if homelab build
- ansible.builtin.shell:
- cmd: docker compose exec -it headscale headscale apikeys create --expiration "{{ api_key_expiration }}"
- chdir: /etc/docker/compose/headscale
- register: api_key_result
- when: generate_api_key
+ rollout_services:
+ - name: headscale
-- name: Store and display API key
- when: generate_api_key
+- name: Generate Headscale API key (if requested)
+ when: generate_api_key | default(false)
block:
- - name: Define API Key Variable
- set_fact:
- headscale_api_key: "{{ api_key_result.stdout }}"
+ - name: Execute API key generation command
+ ansible.builtin.command:
+ cmd: "docker compose exec headscale headscale apikeys create --expiration {{ api_key_expiration }}"
+ chdir: /etc/docker/compose/headscale
+ register: api_key_result
+ changed_when: true
+
+ - name: Store and display newly generated API key
+ block:
+ - name: Store API Key in fact
+ ansible.builtin.set_fact:
+ headscale_api_key: "{{ api_key_result.stdout }}"
- - name: Echo new key
- ansible.builtin.debug:
- msg: "Please store this API Key! {{ headscale_api_key }}"
+ - name: Display API Key (Requires User Action)
+ ansible.builtin.debug:
+ msg: "IMPORTANT: Please store this newly generated Headscale API Key! {{ headscale_api_key }}"
- - name: Pause until user confirms
- ansible.builtin.pause:
- prompt: "Press return when ready!"
+ - name: Pause for user confirmation (API Key)
+ ansible.builtin.pause:
+ prompt: "API Key displayed. Press return to continue..."
+ when: api_key_result.rc == 0 # Only proceed if key generation succeeded
-# System user auth key
-- name: Create system key user and auth key if homelab build
- when: generate_auth_key
+- name: Create Headscale system user and auth key (if requested)
+ when: generate_auth_key | default(false) # Default to false if var is undefined
block:
- - name: Create system key user
- ansible.builtin.shell:
- cmd: docker compose exec -it headscale headscale users create "{{ auth_key_user }}"
+ # Note: These steps might not be fully idempotent. Re-running will attempt creation again.
+ - name: Create system key user '{{ auth_key_user }}'
+ ansible.builtin.command: # Using command module is safer
+ cmd: "docker compose exec headscale headscale users create {{ auth_key_user }}"
chdir: /etc/docker/compose/headscale
+ register: user_create_result
+ changed_when: "'User created' in user_create_result.stdout"
+ failed_when: user_create_result.rc != 0 and 'Cannot create user' not in user_create_result.stderr
- - name: Create auth key preauthkey
- ansible.builtin.shell:
- cmd: docker compose exec -it headscale headscale preauthkeys create --reusable --expiration "{{ auth_key_expiration }}" --user "{{ auth_key_user }}"
+ - name: Create auth key for user '{{ auth_key_user }}'
+ ansible.builtin.command: # Using command module is safer
+ cmd: "docker compose exec headscale headscale preauthkeys create --reusable --expiration {{ auth_key_expiration }} --user {{ auth_key_user }}"
chdir: /etc/docker/compose/headscale
register: auth_key_result
+ changed_when: true
- - name: Store and display Auth Key
+ - name: Store and display newly generated Auth Key
block:
- - name: Define Auth Key Variable
- set_fact:
+ # This stores the *newly generated* key. Be aware of Ansible variable precedence
+ # if 'headscale_user_auth_key' is also defined elsewhere (like vaults).
+ # This fact is primarily for immediate display and user interaction below.
+ - name: Store Auth Key in fact
+ ansible.builtin.set_fact:
headscale_user_auth_key: "{{ auth_key_result.stdout }}"
- - name: Echo new auth key
+ - name: Display Auth Key (Requires User Action)
ansible.builtin.debug:
- msg: "Please store this Auth Key for user {{ auth_key_user }}! {{ headscale_user_auth_key }}"
+ msg: "IMPORTANT: Please store this newly generated Auth Key for user '{{ auth_key_user }}'! {{ headscale_user_auth_key }}"
- - name: Pause until user confirms
+ - name: Pause for user confirmation (Auth Key)
ansible.builtin.pause:
- prompt: "Press return when ready!"
-
-# Proxy setup (AFTER API key generation)
-- name: Build proxy compose dirs and files
- ansible.builtin.file:
- state: directory
- dest: '/etc/docker/compose/proxy/{{ item.path }}'
- with_filetree: '../templates/proxy'
- when: item.state == 'directory'
-
-- name: Build proxy compose templates
- ansible.builtin.template:
- src: '{{ item.src }}'
- dest: '/etc/docker/compose/proxy/{{ item.path }}'
- with_filetree: '../templates/proxy'
- when: item.state == 'file'
-
-- name: Allow mail ports
- with_items:
- - "25"
- - "587"
- - "465"
- - "993"
- - "4190"
- community.general.ufw:
- rule: allow
- port: "{{ item }}"
- state: "enabled"
-
-- name: Daemon-reload and enable proxy
- ansible.builtin.systemd_service:
+ prompt: "Auth Key displayed. Press return to continue..."
+ when: auth_key_result.rc == 0
+
+- name: Deploy Open Internet -> Headnet Proxy
+ ansible.builtin.import_tasks: manage-docker-compose-service.yml
+ vars:
+ service_name: proxy
+ template_render_dir: "../templates/proxy"
+ service_destination_dir: "{{ proxy_base }}"
state: started
- enabled: true
- daemon_reload: true
- name: docker-compose@proxy
-
-- name: Perform rollout for proxy
- ansible.builtin.shell:
- cmd: "/usr/local/bin/docker-rollout rollout -f docker-compose.yml proxy"
- chdir: "/etc/docker/compose/proxy"
+ rollout_services:
+ - name: "{{ vpn_proxy_filter_container_name }}"
diff --git a/playbooks/roles/outbound/templates/headscale/config/config.yaml b/playbooks/roles/outbound/templates/headscale/config/config.yaml
index 6bfbfb9..2586848 100644
--- a/playbooks/roles/outbound/templates/headscale/config/config.yaml
+++ b/playbooks/roles/outbound/templates/headscale/config/config.yaml
@@ -135,11 +135,11 @@ unix_socket_permission: "0770"
oidc:
only_start_if_oidc_is_available: false
- issuer: "https://{{ idm_domain }}"
+ issuer: "https://{{ idm_domain }}/oauth2/openid/headscale"
client_id: "headscale"
client_secret: "{{ headscale_oidc_secret }}"
- scope: ["openid", "profile", "email"]
+ scope: ["openid", "profile", "email", "groups"]
pkce:
# Enable or disable PKCE support (default: false)
enabled: true
@@ -150,7 +150,8 @@ oidc:
allowed_domains:
- {{ domain }}
- allowed_users: {{ headscale_allowed_users }}
+ allowed_groups:
+ - vpn@{{ idm_domain }}
strip_email_domain: true
# Logtail configuration
diff --git a/playbooks/roles/outbound/templates/headscale/docker-compose.yml b/playbooks/roles/outbound/templates/headscale/docker-compose.yml
index ee140fb..04b3d9f 100644
--- a/playbooks/roles/outbound/templates/headscale/docker-compose.yml
+++ b/playbooks/roles/outbound/templates/headscale/docker-compose.yml
@@ -12,6 +12,7 @@ services:
networks:
- proxy
environment:
+ - DEPLOYMENT_TIME={{ now() }}
- VIRTUAL_HOST={{ headscale_host }}
- VIRTUAL_PORT={{ headscale_port }}
- LETSENCRYPT_HOST={{ headscale_host }}
@@ -28,6 +29,7 @@ services:
networks:
- proxy
environment:
+ - DEPLOYMENT_TIME={{ now() }}
- VIRTUAL_HOST={{ headscale_host }}
- VIRTUAL_PORT={{ headscale_port }}
- LETSENCRYPT_HOST={{ headscale_host }}
diff --git a/playbooks/roles/outbound/templates/proxy/docker-compose.yml b/playbooks/roles/outbound/templates/proxy/docker-compose.yml
index 9642d6a..c5aa3ac 100644
--- a/playbooks/roles/outbound/templates/proxy/docker-compose.yml
+++ b/playbooks/roles/outbound/templates/proxy/docker-compose.yml
@@ -1,36 +1,69 @@
+---
+
services:
headscale-client:
image: tailscale/tailscale:latest
environment:
+ - DEPLOYMENT_TIME={{ now() }}
- TS_AUTHKEY={{ headscale_user_auth_key }}
- TS_EXTRA_ARGS=--login-server=https://{{ headscale_host }} --accept-routes --accept-dns --stateful-filtering=false
- TS_STATE_DIR=/var/lib/tailscale
- TS_USERSPACE=false
- TZ={{ timezone }}
-
- - VIRTUAL_HOST=*.{{ domain }},{{ domain }}
- - VIRTUAL_PORT=80
- - LETSENCRYPT_HOST=*.{{ domain }},{{ domain }}
- hostname: headscale-outbound
restart: unless-stopped
+ healthcheck:
+ test: ["CMD-SHELL", "tailscale status"]
+ interval: 1s
+ timeout: 5s
+ retries: 10
+ hostname: headscale-outbound
cap_add:
- NET_ADMIN
- SYS_ADMIN
volumes:
- ./data:/var/lib/tailscale
+ - ./headscale/headscale-client.sh:/headscale-client.sh
- /dev/net/tun:/dev/net/tun
networks:
- - proxy
- proxy:
+ - internal_subnet_router # magic.
+ sysctls:
+ - net.ipv4.ip_forward=1
+ entrypoint: ["/bin/sh"]
+ command: /headscale-client.sh
+
+ {{ vpn_proxy_filter_container_name }}:
image: nginx:latest
- network_mode: service:headscale-client
+ entrypoint: ["/bin/sh"]
+ command: /wait-for-bridge.sh
+ pre_stop:
+ - command: /dont-die-until-conn-closed.sh
+ cap_add:
+ - NET_ADMIN # to modify the routing table
+ environment:
+ - DEPLOYMENT_TIME={{ now() }}
+ - VIRTUAL_HOST=*.{{ domain }},{{ domain }}
+ - VIRTUAL_PORT=80
+ - LETSENCRYPT_HOST=*.{{ domain }},{{ domain }}
+ healthcheck:
+ test: ["CMD", "curl", "http://localhost/health"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
depends_on:
- headscale-client
volumes:
- - ./sites-enabled:/etc/nginx/conf.d
- - ./toplevel.conf.d:/etc/nginx/toplevel.conf.d
+ - ./nginx/nginx.conf:/etc/nginx/nginx.conf
+ - ./nginx/conf.d:/etc/nginx/conf.d
+ - ./nginx/toplevel.conf.d:/etc/nginx/toplevel.conf.d
+ - ./nginx/wait-for-bridge.sh:/wait-for-bridge.sh
+ - ./nginx/dont-die-until-conn-closed.sh:/dont-die-until-conn-closed.sh
+ networks:
+ - proxy
+ - internal_subnet_router
networks:
+ internal_subnet_router:
+ driver: bridge
proxy:
external: true
diff --git a/playbooks/roles/outbound/templates/proxy/headscale/headscale-client.sh b/playbooks/roles/outbound/templates/proxy/headscale/headscale-client.sh
new file mode 100755
index 0000000..1ce0acb
--- /dev/null
+++ b/playbooks/roles/outbound/templates/proxy/headscale/headscale-client.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+/usr/local/bin/containerboot &
+
+while ! tailscale status > /dev/null 2>&1; do
+ sleep 1
+ echo '[+] Waiting for tailscale status to be up...'
+done
+
+echo '[+] Tailscale is up. Enabling NAT...'
+iptables -t nat -A POSTROUTING -o tailscale0 -j MASQUERADE
+echo '[+] Done.'
+
+tail -f /dev/null
diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/bin.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/bin.conf
index 3c5682d..3c5682d 100644
--- a/playbooks/roles/outbound/templates/proxy/sites-enabled/bin.conf
+++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/bin.conf
diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/default.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/default.conf
index d127cc5..f4a8007 100644
--- a/playbooks/roles/outbound/templates/proxy/sites-enabled/default.conf
+++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/default.conf
@@ -4,4 +4,8 @@ server {
location / {
return 404;
}
+
+ location /health {
+ return 200;
+ }
}
diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/idm.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/idm.conf
index c85ebcf..c85ebcf 100644
--- a/playbooks/roles/outbound/templates/proxy/sites-enabled/idm.conf
+++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/idm.conf
diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/kanban.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/kanban.conf
index b668310..b668310 100644
--- a/playbooks/roles/outbound/templates/proxy/sites-enabled/kanban.conf
+++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/kanban.conf
diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/mail.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/mail.conf
index c810f5a..c810f5a 100644
--- a/playbooks/roles/outbound/templates/proxy/sites-enabled/mail.conf
+++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/mail.conf
diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/notes.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/notes.conf
index f7937dd..f7937dd 100644
--- a/playbooks/roles/outbound/templates/proxy/sites-enabled/notes.conf
+++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/notes.conf
diff --git a/playbooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh b/playbooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh
new file mode 100755
index 0000000..967c2c0
--- /dev/null
+++ b/playbooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+echo "[/] sleeping to wait for some time for container to be marked as stop."
+# https://stackoverflow.com/a/45146086
+sleep 3
+
+pid_file="/run/nginx.pid"
+max_wait_seconds=30
+
+if [ ! -f "$pid_file" ]; then
+ echo "[!] Nginx PID file not found at $pid_file. Assuming Nginx not running or already stopped."
+ exit 0
+fi
+
+PID=$(cat "$pid_file")
+
+# Validate PID
+if [ -z "$PID" ] || ! [[ "$PID" =~ ^[0-9]+$ ]]; then
+ echo "[!] Invalid PID found in $pid_file: '$PID'"
+ exit 1
+fi
+
+# Check if the process actually exists before sending quit
+# kill -0 PID checks if a signal can be sent.
+if ! kill -0 "$PID" 2>/dev/null; then
+ echo "[!] Nginx process $PID not found or already stopped."
+ exit 0 # Exit successfully
+fi
+
+echo "[/] sending signal to nginx (PID: $PID) to quit"
+nginx -s quit
+
+start_time=$SECONDS
+echo "[/] Waiting for Nginx (PID: $PID) to stop (max ${max_wait_seconds}s)..."
+
+while [ -d /proc/$PID ]; do
+ current_time=$SECONDS
+ elapsed_time=$((current_time - start_time))
+
+ if [ "$elapsed_time" -ge "$max_wait_seconds" ]; then
+ echo "[!] Timeout: Nginx process $PID did not stop within ${max_wait_seconds} seconds."
+ echo "[!] Sending SIGKILL to PID $PID."
+ kill -9 "$PID" 2>/dev/null
+
+ exit 1
+ fi
+
+ sleep 0.5
+ if (( $(echo "$elapsed_time % 5" | bc) == 0 )); then
+ echo "[/] Nginx (PID: $PID) still running (waited ${elapsed_time}s)..."
+ fi
+done
+
+echo "[+] Nginx process $PID stopped gracefully."
+echo "[+] done. goodbye."
+exit 0
diff --git a/playbooks/roles/outbound/templates/proxy/nginx/nginx.conf b/playbooks/roles/outbound/templates/proxy/nginx/nginx.conf
new file mode 100644
index 0000000..32feb3a
--- /dev/null
+++ b/playbooks/roles/outbound/templates/proxy/nginx/nginx.conf
@@ -0,0 +1,30 @@
+user www-data;
+worker_processes 4;
+pid /run/nginx.pid;
+# load_module modules/ndk_http_module.so;
+# load_module modules/ngx_http_set_misc_module.so;
+
+events {
+ worker_connections 768;
+}
+
+include /etc/nginx/toplevel.conf.d/*.conf;
+
+http {
+ charset utf-8;
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ types_hash_max_size 2048;
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ access_log /var/log/nginx/access.log;
+ error_log /var/log/nginx/error.log;
+
+ gzip on;
+ gzip_disable "msie6";
+
+ include /etc/nginx/conf.d/*.conf;
+}
diff --git a/playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf b/playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf
new file mode 100644
index 0000000..193e65a
--- /dev/null
+++ b/playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf
@@ -0,0 +1,56 @@
+stream {
+ log_format basic '$proxy_protocol_addr - [$time_local] '
+ '$protocol $status $bytes_sent $bytes_received '
+ '$session_time';
+ upstream imaps {
+ server {{ loadbalancer_ip }}:993;
+ }
+ upstream smtps {
+ server {{ loadbalancer_ip }}:465;
+ }
+ upstream smtptls {
+ server {{ loadbalancer_ip }}:587;
+ }
+ upstream smtp {
+ server {{ loadbalancer_ip }}:25;
+ }
+ upstream managesieve {
+ server {{ loadbalancer_ip }}:4190;
+ }
+
+ server {
+ set_real_ip_from {{ docker_network }};
+ listen 993 proxy_protocol;
+
+ proxy_pass imaps;
+ proxy_protocol on;
+ }
+ server {
+ set_real_ip_from {{ docker_network }};
+ listen 25 proxy_protocol;
+
+ proxy_pass smtp;
+ proxy_protocol on;
+ }
+ server {
+ set_real_ip_from {{ docker_network }};
+ listen 587 proxy_protocol;
+
+ proxy_pass smtptls;
+ proxy_protocol on;
+ }
+ server {
+ set_real_ip_from {{ docker_network }};
+ listen 465 proxy_protocol;
+
+ proxy_pass smtps;
+ proxy_protocol on;
+ }
+ server {
+ set_real_ip_from {{ docker_network }};
+ listen 4190 proxy_protocol;
+
+ proxy_pass managesieve;
+ proxy_protocol on;
+ }
+}
diff --git a/playbooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh b/playbooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh
new file mode 100755
index 0000000..da273a9
--- /dev/null
+++ b/playbooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+set -e
+
+echo "[+] Waiting for headscale-client to be resolvable..."
+
+# Loop until headscale-client IP is found or timeout
+timeout=30
+start_time=$(date +%s)
+
+HEADSCALE_IP=""
+while [ -z "$HEADSCALE_IP" ]; do
+ HEADSCALE_IP=$(getent hosts headscale-client | awk '{ print $1 }' | head -n 1)
+ current_time=$(date +%s)
+ if [ $((current_time - start_time)) -ge $timeout ]; then
+ echo "[-] Timeout waiting for headscale-client DNS resolution." >&2
+ exit 1
+ fi
+ if [ -z "$HEADSCALE_IP" ]; then
+ sleep 1
+ fi
+done
+
+echo "[+] Found headscale-client IP: $HEADSCALE_IP"
+echo "[+] Attempting to modify routing table..."
+
+apt update && apt install -y iproute2
+ip route del default || echo "[-] Warning: Failed to delete default route (maybe none existed)."
+ip route add default via $HEADSCALE_IP
+echo "[+] Default route set via $HEADSCALE_IP."
+
+echo "[+] Starting Nginx..."
+nginx -g "daemon off;"
diff --git a/playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf b/playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf
deleted file mode 100644
index 68d5445..0000000
--- a/playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf
+++ /dev/null
@@ -1,38 +0,0 @@
-stream {
- upstream imaps {
- server {{ loadbalancer_ip }}:993;
- }
- upstream smtps {
- server {{ loadbalancer_ip }}:465;
- }
- upstream smtptls {
- server {{ loadbalancer_ip }}:587;
- }
- upstream smtp {
- server {{ loadbalancer_ip }}:25;
- }
- upstream managesieve {
- server {{ loadbalancer_ip }}:4190;
- }
-
- server {
- listen 993;
- proxy_pass imaps;
- }
- server {
- listen 25;
- proxy_pass smtp;
- }
- server {
- listen 587;
- proxy_pass smtptls;
- }
- server {
- listen 465;
- proxy_pass smtps;
- }
- server {
- listen 4190;
- proxy_pass managesieve;
- }
-}