diff options
author | Elizabeth Hunt <me@liz.coffee> | 2025-04-27 21:15:30 -0700 |
---|---|---|
committer | Elizabeth Hunt <me@liz.coffee> | 2025-04-27 21:25:52 -0700 |
commit | daef0cf448af17357b552245f39067a9d340ce3d (patch) | |
tree | f65a660f7232f057b0c14e477c166006bfb83f87 | |
parent | 1dcdfe34a74708f88aad68af965f4bb5c79adff1 (diff) | |
download | infra-daef0cf448af17357b552245f39067a9d340ce3d.tar.gz infra-daef0cf448af17357b552245f39067a9d340ce3d.zip |
Waow
56 files changed, 841 insertions, 457 deletions
@@ -129,29 +129,12 @@ class RoleGenerator: task_file.write_text(textwrap.dedent(f"""\ --- - - name: Build {self.service} compose dirs - ansible.builtin.file: - state: directory - dest: '{{{{ {self.service}_base }}}}/{{{{ item.path }}}}' - owner: 1000 - group: 1000 - mode: 0755 - with_filetree: '../templates' - when: item.state == 'directory' - - - name: Build {self.service} compose files - ansible.builtin.template: - src: '{{{{ item.src }}}}' - dest: '{{{{ {self.service}_base }}}}/{{{{ item.path }}}}' - owner: 1000 - group: 1000 - mode: 0755 - with_filetree: '../templates' - when: item.state == 'file' - - - name: Deploy {self.service} stack - ansible.builtin.command: - cmd: 'docker stack deploy -c {{{{ {self.service}_base }}}}/stacks/docker-compose.yml {self.service}' + - name: Deploy {self.service} + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: {self.service} + template_render_dir: "../templates" + service_destination_dir: "{{ {self.service}_base }}" """)) def create_compose_template(self): @@ -165,10 +148,23 @@ class RoleGenerator: - {{{{ {self.service}_base }}}}/volumes/data:/data environment: - TZ={{{{ timezone }}}} + - DEPLOYMENET_TIME={{{{ now() }}}} networks: - proxy + healthcheck: + test: ["CMD-SHELL", "curl", "--fail", "http://localhost:8000"] + timeout: 5s + interval: 30s + retries: 2 + start_period: 8s deploy: mode: replicated + update_config: + parallelism: 1 + failure_action: rollback + order: start-first + delay: 15s + monitor: 10s replicas: 1 labels: - traefik.enable=true @@ -21,9 +21,6 @@ - name: Traefik ansible.builtin.import_playbook: playbooks/traefik.yml -- name: Portainer - ansible.builtin.import_playbook: playbooks/portainer.yml - - name: Keepalived ansible.builtin.import_playbook: playbooks/keepalived.yml @@ -36,6 +33,12 @@ - name: Kanidm ansible.builtin.import_playbook: playbooks/kanidm.yml +- name: mail + ansible.builtin.import_playbook: playbooks/mail.yml + +- name: Portainer + ansible.builtin.import_playbook: playbooks/portainer.yml + - name: Kanboard ansible.builtin.import_playbook: playbooks/kanboard.yml @@ -44,6 +47,3 @@ - name: bin ansible.builtin.import_playbook: playbooks/bin.yml - -- name: mail - ansible.builtin.import_playbook: playbooks/mail.yml diff --git a/group_vars/all.yml b/group_vars/all.yml index 8b31f3f..cef1f9b 100644 --- a/group_vars/all.yml +++ b/group_vars/all.yml @@ -7,9 +7,11 @@ homelab_build: false ansible_user: serve loadbalancer_ip: "10.128.0.200" +homelab_network: "10.0.0.0/8" +docker_network: "172.16.0.0/12" rfc1918_cgnat_networks: - - 10.0.0.0/8 - - 172.16.0.0/12 + - "{{ homelab_network }}" + - "{{ docker_network }}" - 192.168.0.0/16 - 100.64.0.0/10 diff --git a/group_vars/mail.yml b/group_vars/mail.yml index 3cfbeb3..22d72c7 100644 --- a/group_vars/mail.yml +++ b/group_vars/mail.yml @@ -35,7 +35,8 @@ roundcube_default_host: "ssl://{{ mail_domain }}" roundcube_default_port: 993 roundcube_smtp_host: "ssl://{{ mail_domain }}" roundcube_smtp_port: 465 -roundcube_plugins: "archive,zipdownload,managesieve,markasjunk,enigma" +roundcube_plugins: "archive,zipdownload,managesieve,markasjunk,enigma,roundcube_skins" +roundcube_composer_plugins: "texxasrulez/roundcube_skins" roundcube_oauth2_auth_uri: "https://{{ idm_domain }}/ui/oauth2" roundcube_oauth2_user_uri: > diff --git a/group_vars/outbound.yml b/group_vars/outbound.yml index 3a9a51a..2c7c6c8 100644 --- a/group_vars/outbound.yml +++ b/group_vars/outbound.yml @@ -2,20 +2,19 @@ headscale_url: 'https://{{ headscale_host }}' headscale_base_domain: '{{ headscale_host }}' +headscale_base: '/etc/docker/compose/headscale' headscale_port: '8080' headscale_listen_addr: '0.0.0.0:{{ headscale_port }}' headscale_dns_for_connected_clients_1: '{{ loadbalancer_ip }}' headscale_dns_for_connected_clients_2: '1.0.0.1' +vpn_proxy_filter_container_name: 'headscale-proxy' +proxy_base: '/etc/docker/compose/proxy' + generate_api_key: '{{ homelab_build }}' -api_key_expiration: '1y' +api_key_expiration: '2y' generate_auth_key: '{{ homelab_build }}' -auth_key_expiration: '1y' +auth_key_expiration: '2y' auth_key_user: 'pocketmonsters' -headscale_allowed_users: - - liz - - lucina - - riley - - "{{ auth_key_user }}" diff --git a/playbooks/roles/bin/tasks/main.yml b/playbooks/roles/bin/tasks/main.yml index 5254826..568703a 100644 --- a/playbooks/roles/bin/tasks/main.yml +++ b/playbooks/roles/bin/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build bin compose dirs - ansible.builtin.file: - state: directory - dest: '{{ bin_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy Bin + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: bin + template_render_dir: "../templates" + service_destination_dir: "{{ bin_base }}" -- name: Build bin compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ bin_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy bin stack - ansible.builtin.command: - cmd: 'docker stack deploy -c {{ bin_base }}/stacks/docker-compose.yml bin' diff --git a/playbooks/roles/bin/templates/stacks/docker-compose.yml b/playbooks/roles/bin/templates/stacks/docker-compose.yml index 75f48f4..f218b74 100644 --- a/playbooks/roles/bin/templates/stacks/docker-compose.yml +++ b/playbooks/roles/bin/templates/stacks/docker-compose.yml @@ -5,15 +5,28 @@ services: - {{ bin_base }}/volumes/data:/data environment: - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} - TRUST_PROXY=true - API_URL=https://{{ bin_domain }} - DATA_DIRECTORY=/data - DATABASE_URL=file:/data/pingvin-share.db?connection_limit=1 + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:3000/api/configs"] + timeout: 3s + interval: 1m + retries: 2 + start_timeout: 10s networks: - proxy deploy: mode: replicated replicas: 1 + update_config: + parallelism: 1 + failure_action: rollback + order: start-first + delay: 10s + monitor: 10s labels: - traefik.enable=true - traefik.swarm.network=proxy diff --git a/playbooks/roles/ceph/tasks/main.yml b/playbooks/roles/ceph/tasks/main.yml index 69a769a..7b0a56d 100644 --- a/playbooks/roles/ceph/tasks/main.yml +++ b/playbooks/roles/ceph/tasks/main.yml @@ -13,6 +13,7 @@ [global] fsid = {{ ceph_fsid }} mon_host = {{ ceph_mon_host }} + client_permissions = false dest: /etc/ceph/ceph.conf mode: '0644' @@ -36,7 +37,7 @@ ansible.builtin.lineinfile: path: /etc/fstab regexp: '{{ ceph_base }}\w+fuse.ceph' - line: "none {{ ceph_base }} fuse.ceph ceph.id={{ ceph_client_name }},_netdev,defaults,umask=000 0 0" + line: "none {{ ceph_base }} fuse.ceph ceph.id={{ ceph_client_name }},_netdev,defaults 0 0" create: true mode: "0644" diff --git a/playbooks/roles/common/files/authorized_keys b/playbooks/roles/common/files/authorized_keys index 6d49a82..abc559d 100644 --- a/playbooks/roles/common/files/authorized_keys +++ b/playbooks/roles/common/files/authorized_keys @@ -1 +1,2 @@ ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPnLAE5TrdYF8QWCSkvgUp15XKcwQJ9393a/CghSo8dG serve@ansible +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDRHu3h9mDjQyFbojcxGKW0hPUDfgUmb2WCzd4Dv2qISM3GGt9LjD8o0IbWRNaTf5UyId5lu7wNHtygs5ZDfUVnlfxrI1CmoExuqkYFjy+R9Cu0x1J2w7+MrKPBd5akLCuKTTnXbyv79T0tLb07rCpGHojW8HH6wdDtg0siVqsPqZVTjg7WGbBYqiqlA5p8s+V9xN1q8lTOZrRI0PdgoU8W+1oIr9OHSG1ZeUBQx60izTEwMnWBxY2aA8SQolIVvsJCcMMc/EAnaz/rdJ5IkeqXGslIhUI7WCPHnPWN8CSdwMOLi5BNaOAK7Y2FkfKTUlO7I52BL87Cl3YpMxR0mTDrfSJTSp0B3ZAbUIXDA7biSh04YLwGQVI799vcyJf355A60btPaiuiBgI0am3h0WxnOACg7K6eV023EiUQ24UjlQ8pufHcJ1oDW8v6LHlp/atCWOl9KQIun9UUg8DD8/BLPprc0wzAV6Nco0ZIedouxZuUhduYYvUrLJ+ICpaZg6oPGitVJPIgyyI+WTfjRN4WTj/Z3Yhuj0RqF8b5ea4FNWuJtfF724t7SVnZsYlZGSCqL8gaEzbIATVe3THn5VwbK+S4ELD/9W6MOd6aZcTOK2yP3jlwjcjnW8sLuX+2qNwtSVVa4o5VsRZU40Da+3flzoBsyUwSE3H2PsFPH29lIQ== lizzy@yubikey diff --git a/playbooks/roles/docker/handlers/main.yml b/playbooks/roles/docker/handlers/main.yml index 2db0186..8725d1e 100644 --- a/playbooks/roles/docker/handlers/main.yml +++ b/playbooks/roles/docker/handlers/main.yml @@ -6,3 +6,6 @@ state: restarted enabled: true +- name: Reload ufw + community.general.ufw: + state: reloaded diff --git a/playbooks/roles/docker/tasks/main.yml b/playbooks/roles/docker/tasks/main.yml index 8b91f6a..a156e4e 100644 --- a/playbooks/roles/docker/tasks/main.yml +++ b/playbooks/roles/docker/tasks/main.yml @@ -53,3 +53,10 @@ state: directory mode: 0700 +- name: Allow all traffic from Docker subnets + community.general.ufw: + rule: allow + from_ip: "172.16.0.0/12" + to_ip: "any" + notify: + - Reload ufw diff --git a/playbooks/roles/kanboard/tasks/main.yml b/playbooks/roles/kanboard/tasks/main.yml index 3d1efb8..70f0f59 100644 --- a/playbooks/roles/kanboard/tasks/main.yml +++ b/playbooks/roles/kanboard/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build kanboard compose dirs - ansible.builtin.file: - state: directory - dest: '{{ kanboard_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy Kanboard + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: kanboard + template_render_dir: "../templates" + service_destination_dir: "{{ kanboard_base }}" -- name: Build kanboard compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ kanboard_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy kanboard stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ kanboard_base }}/stacks/docker-compose.yml kanboard" diff --git a/playbooks/roles/kanboard/templates/stacks/docker-compose.yml b/playbooks/roles/kanboard/templates/stacks/docker-compose.yml index abed6ce..1055c25 100644 --- a/playbooks/roles/kanboard/templates/stacks/docker-compose.yml +++ b/playbooks/roles/kanboard/templates/stacks/docker-compose.yml @@ -1,13 +1,17 @@ -version: '3.2' - services: kanboard: image: kanboard/kanboard:latest volumes: - {{ kanboard_base }}/volumes/data:/var/www/app/data - {{ kanboard_base }}/volumes/plugins:/var/www/app/plugins + healthcheck: + test: ["CMD-SHELL", "curl --fail http://localhost:80/login"] + retries: 3 + timeout: 5s + start_period: 5s environment: - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} - LOG_DRIVER=syslog - LDAP_AUTH={{ kanboard_ldap_auth }} - LDAP_SERVER={{ kanboard_ldap_server }} @@ -29,6 +33,11 @@ services: deploy: mode: replicated replicas: 1 + update_config: + parallelism: 1 + order: start-first + failure_action: rollback + monitor: 5s labels: - traefik.enable=true - traefik.swarm.network=proxy diff --git a/playbooks/roles/kanidm/tasks/main.yml b/playbooks/roles/kanidm/tasks/main.yml index a004910..7d7adc1 100644 --- a/playbooks/roles/kanidm/tasks/main.yml +++ b/playbooks/roles/kanidm/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build kanidm compose dirs - ansible.builtin.file: - state: directory - dest: '{{ kanidm_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy kanidm + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: kanidm + template_render_dir: "../templates" + service_destination_dir: "{{ kanidm_base }}" -- name: Build kanidm compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ kanidm_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy Kanidm stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ kanidm_base }}/stacks/docker-compose.yml kanidm" diff --git a/playbooks/roles/kanidm/templates/stacks/docker-compose.yml b/playbooks/roles/kanidm/templates/stacks/docker-compose.yml index 7f8bfe2..183d77e 100644 --- a/playbooks/roles/kanidm/templates/stacks/docker-compose.yml +++ b/playbooks/roles/kanidm/templates/stacks/docker-compose.yml @@ -4,6 +4,8 @@ services: volumes: - {{ kanidm_base }}/volumes/data:/data - {{ letsencrypt_certs }}:/certs:ro + ports: + - 3636:3636 networks: - proxy {% if homelab_build %} @@ -15,10 +17,24 @@ services: /sbin/kanidmd server -c /data/server.toml healthcheck: disable: true +{% else %} + healthcheck: + test: ["CMD-SHELL", "curl --fail -k https://localhost:8443/status"] + retries: 1 + timeout: 2s + interval: 30s {% endif %} + environment: + - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} deploy: mode: replicated replicas: 1 + update_config: + parallelism: 1 + order: start-first + failure_action: rollback + monitor: 5s labels: - traefik.enable=true - traefik.swarm.network=proxy @@ -28,12 +44,6 @@ services: - traefik.http.routers.kanidm.entrypoints=websecure - traefik.http.services.kanidm.loadbalancer.server.port=8443 - traefik.http.services.kanidm.loadbalancer.server.scheme=https - # ldap - - traefik.tcp.routers.kanidm-ldaps.tls.passthrough=true - - traefik.tcp.routers.kanidm-ldaps.rule=HostSNI(`*`) - - traefik.tcp.routers.kanidm-ldaps.entrypoints=ldaps - - traefik.tcp.routers.kanidm-ldaps.service=kanidm-ldaps - - traefik.tcp.services.kanidm-ldaps.loadbalancer.server.port=3636 networks: proxy: diff --git a/playbooks/roles/mail/tasks/main.yml b/playbooks/roles/mail/tasks/main.yml index 4576500..b2a7ea8 100644 --- a/playbooks/roles/mail/tasks/main.yml +++ b/playbooks/roles/mail/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build mail compose dirs - ansible.builtin.file: - state: directory - dest: '{{ mail_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy mail + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: mail + template_render_dir: "../templates" + service_destination_dir: "{{ mail_base }}" -- name: Build mail compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ mail_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy mail stack - ansible.builtin.command: - cmd: 'docker stack deploy -c {{ mail_base }}/stacks/docker-compose.yml mail' diff --git a/playbooks/roles/mail/templates/stacks/docker-compose.yml b/playbooks/roles/mail/templates/stacks/docker-compose.yml index 50108c1..b4cc3e0 100644 --- a/playbooks/roles/mail/templates/stacks/docker-compose.yml +++ b/playbooks/roles/mail/templates/stacks/docker-compose.yml @@ -4,11 +4,13 @@ services: restart: always volumes: - {{ mail_base }}/volumes/data/roundcube/db:/var/roundcube/db - - {{ mail_base }}/volumes/data/roundcube/config:/var/roundcube/config + - {{ mail_base }}/volumes/data/roundcube/config:/var/roundcube/config/ environment: + - DEPLOYMENT_TIME={{ now() }} - ROUNDCUBEMAIL_DB_TYPE=sqlite - - ROUNDCUBEMAIL_SKIN=elastic + - ROUNDCUBEMAIL_SKIN={{ roundcube_skin | default('elastic') }} - ROUNDCUBEMAIL_PLUGINS={{ roundcube_plugins }} + - ROUNDCUBEMAIL_COMPOSER_PLUGINS={{ roundcube_composer_plugins }} - ROUNDCUBEMAIL_DEFAULT_HOST={{ roundcube_default_host }} - ROUNDCUBEMAIL_DEFAULT_PORT={{ roundcube_default_port }} - ROUNDCUBEMAIL_SMTP_SERVER={{ roundcube_smtp_host }} @@ -16,6 +18,11 @@ services: networks: - proxy - roundcube + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:8000"] + timeout: 3s + interval: 30s + retries: 2 deploy: mode: replicated replicas: 1 @@ -31,70 +38,62 @@ services: mailserver: image: ghcr.io/docker-mailserver/docker-mailserver:latest hostname: {{ mail_domain }} + command: + - /scripts/wait-for-cert.sh {% if homelab_build %} - command: - - /bin/sh - - -c - - | - [ ! -f "/etc/letsencrypt/live/{{ mail_domain }}" ] && sleep 60 # Sleep until certificate requested from traefik - supervisord -c /etc/supervisor/supervisord.conf healthcheck: disable: true +{% else %} + healthcheck: + test: ["CMD-SHELL", "ss --listening --tcp | grep -P :smtp"] + interval: 3s + timeout: 2s + retries: 3 {% endif %} + ports: + - '25:25' + - '587:587' + - '465:465' + - '143:143' + - '993:993' + - '4190:4190' + - '110:110' + - '995:995' + stop_grace_period: 30s deploy: mode: replicated replicas: 1 - labels: - - traefik.enable=true - - traefik.swarm.network=proxy - # ManageSieve - - traefik.tcp.routers.sieve.tls.passthrough=true - - traefik.tcp.routers.sieve.rule=HostSNI(`*`) - - traefik.tcp.routers.sieve.entrypoints=sieve - - traefik.tcp.routers.sieve.service=sieve - - traefik.tcp.services.sieve.loadbalancer.server.port=4190 - # IMAP - - traefik.tcp.routers.imap.tls.passthrough=true - - traefik.tcp.routers.imap.rule=HostSNI(`*`) - - traefik.tcp.routers.imap.entrypoints=imap - - traefik.tcp.routers.imap.service=imap - - traefik.tcp.services.imap.loadbalancer.server.port=993 - # SMTPS - - traefik.tcp.routers.smtps.tls.passthrough=true - - traefik.tcp.routers.smtps.rule=HostSNI(`*`) - - traefik.tcp.routers.smtps.entrypoints=smtps - - traefik.tcp.routers.smtps.service=smtps - - traefik.tcp.services.smtps.loadbalancer.server.port=465 - # SMTP (StartTLS) - - traefik.tcp.routers.smtptls.tls.passthrough=true - - traefik.tcp.routers.smtptls.rule=HostSNI(`*`) - - traefik.tcp.routers.smtptls.entrypoints=smtptls - - traefik.tcp.routers.smtptls.service=smtptls - - traefik.tcp.services.smtptls.loadbalancer.server.port=587 - # SMTP ("ye' old") - - traefik.tcp.routers.smtp.tls.passthrough=true - - traefik.tcp.routers.smtp.rule=HostSNI(`*`) - - traefik.tcp.routers.smtp.entrypoints=smtp - - traefik.tcp.routers.smtp.service=smtp - - traefik.tcp.services.smtp.loadbalancer.server.port=25 + update_config: + parallelism: 1 + failure_action: rollback + # order: start-first + # We need to stop the old container first because it holds a lock on the + # Postfix mail queue. I don't believe there is a feasible way to solve + # this without either a tiny bit of downtime waiting for the lock to clear, + # or lost mail since we'd have to ignore the lock and thus two competing mailservers + # are accepting mail. + # One of these is more acceptable than the other haha. + # See stuff in scripts/ for the last attempt if interested. + order: stop-first volumes: - - {{ mail_base }}/volumes/data/dms/vmail:/var/mail/ - - {{ mail_base }}/volumes/data/dms/mail-state:/var/mail-state/ - - {{ mail_base }}/volumes/data/dms/mail-logs:/var/log/mail/ - - {{ mail_base }}/volumes/data/dms/config:/tmp/docker-mailserver/ + - {{ mail_base }}/volumes/scripts/:/scripts/ + - {{ mail_base }}/volumes/data/dms/vmail/:/var/mail/ + - {{ mail_base }}/volumes/data/dms/mail-state/:/var/mail-state/ + - {{ mail_base }}/volumes/data/dms/mail-logs/:/var/log/mail/ + - {{ mail_base }}/volumes/data/dms/config/:/tmp/docker-mailserver/ - {{ mail_base }}/volumes/data/dms/config/dovecot-ldap.conf:/etc/dovecot/dovecot-ldap.conf.ext - {{ letsencrypt_certs }}:/certs/:ro - /etc/localtime:/etc/localtime:ro environment: + - DEPLOYMENT_TIME={{ now() }} - SSL_TYPE=manual - SSL_CERT_PATH=/certs/{{ mail_domain }}.pem - SSL_KEY_PATH=/certs/{{ mail_domain }}.key - ENABLE_CLAMAV=0 - ENABLE_AMAVIS=0 - - ENABLE_FAIL2BAN=1 - ENABLE_SASLAUTHD=1 - ENABLE_MANAGESIEVE=1 - - ENABLE_POSTGREY=0 + - ENABLE_POSTGREY=1 - SPOOF_PROTECTION=1 - ACCOUNT_PROVISIONER=LDAP @@ -121,12 +120,7 @@ services: - RELAY_USER={{ relay_user }} - RELAY_PASSWORD={{ relay_password }} - networks: - - mailserver - - proxy - networks: - mailserver: roundcube: proxy: external: true diff --git a/playbooks/roles/mail/templates/volumes/data/dms/config/dovecot.cf b/playbooks/roles/mail/templates/volumes/data/dms/config/dovecot.cf new file mode 100644 index 0000000..62d0550 --- /dev/null +++ b/playbooks/roles/mail/templates/volumes/data/dms/config/dovecot.cf @@ -0,0 +1,27 @@ +haproxy_trusted_networks = {{ homelab_network }} + +service imap-login { + inet_listener imap { + haproxy = yes + } + + inet_listener imaps { + haproxy = yes + } +} + +service pop3-login { + inet_listener pop3 { + haproxy = yes + } + + inet_listener pop3s { + haproxy = yes + } +} + +service managesieve-login { + inet_listener sieve { + haproxy = yes + } +} diff --git a/playbooks/roles/mail/templates/volumes/data/dms/config/postfix-master.cf b/playbooks/roles/mail/templates/volumes/data/dms/config/postfix-master.cf new file mode 100644 index 0000000..1885f4d --- /dev/null +++ b/playbooks/roles/mail/templates/volumes/data/dms/config/postfix-master.cf @@ -0,0 +1,3 @@ +smtp/inet/postscreen_upstream_proxy_protocol=haproxy +submission/inet/smtpd_upstream_proxy_protocol=haproxy +submissions/inet/smtpd_upstream_proxy_protocol=haproxy diff --git a/playbooks/roles/mail/templates/volumes/data/dms/config/user-patches.sh b/playbooks/roles/mail/templates/volumes/data/dms/config/user-patches.sh index c62753f..1749499 100755 --- a/playbooks/roles/mail/templates/volumes/data/dms/config/user-patches.sh +++ b/playbooks/roles/mail/templates/volumes/data/dms/config/user-patches.sh @@ -3,7 +3,13 @@ postconf -e 'smtpd_sasl_type = dovecot' postconf -e 'smtpd_sasl_path = /dev/shm/sasl-auth.sock' postconf -e 'smtpd_sasl_auth_enable = yes' -postconf -e 'broken_sasl_auth_clients = yes' +postconf -e 'broken_sasl_auth_clients = no' +postconf -e 'smtpd_tls_auth_only = yes' +postconf -e 'smtpd_tls_security_level = encrypt' + +postconf -e 'postscreen_bare_newline_enable = no' +postconf -e 'postscreen_non_smtp_command_enable = no' +postconf -e 'postscreen_pipelining_enable = no' postconf -e 'smtp_tls_wrappermode = yes' # for relay @@ -34,8 +40,3 @@ userdb { args = username_format=%u uid=docker gid=docker home=/var/mail/%d/%u default_fields = uid=docker gid=docker home=/var/mail/%d/%u }" > /etc/dovecot/conf.d/auth-ldap.conf.ext - -#userdb { -# driver = static -# args = home=/var/mail/%u -#}" diff --git a/playbooks/roles/mail/templates/volumes/scripts/check-postfix-health.unused.sh b/playbooks/roles/mail/templates/volumes/scripts/check-postfix-health.unused.sh new file mode 100644 index 0000000..198221a --- /dev/null +++ b/playbooks/roles/mail/templates/volumes/scripts/check-postfix-health.unused.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +LOCKFILE="/var/mail-state/lib-postfix/master.lock" + +function log() { + echo "[health] $@" +} + +if [ -f "$LOCKFILE" ]; then + PID=$(cat "$LOCKFILE") + log "pid $PID" + if kill -0 "$PID" 2>/dev/null; then + if ss --listening --tcp | grep -P 'LISTEN.+:smtp' > /dev/null; then + log "successfully listening to smtp" + exit 0 + fi + else + # Not our postfix lock. + exit 0 + fi +fi + +log "bad health state" +exit 1 diff --git a/playbooks/roles/mail/templates/volumes/scripts/wait-for-cert.sh b/playbooks/roles/mail/templates/volumes/scripts/wait-for-cert.sh new file mode 100644 index 0000000..0f8018c --- /dev/null +++ b/playbooks/roles/mail/templates/volumes/scripts/wait-for-cert.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +function log() { + echo "[cert] $@" +} + +CERT="/certs/{{ mail_domain }}.pem" +MAX_TRIES=30 +TRY=0 + +while [ ! -f "$CERT" ]; do + if [ "$TRY" -eq "$MAX_TRIES" ]; then + log "[$TRY/$MAX_TRIES] Max tries, failing." + exit 1 + fi + log "[$TRY/$MAX_TRIES] Certificate nonexistant. Waiting..." + sleep 2 + TRY=$((TRY + 1)) +done + +log "Cert check done. Starting DMS." +supervisord -c /etc/supervisor/supervisord.conf diff --git a/playbooks/roles/mail/templates/volumes/scripts/wait-for-postfix.unused.sh b/playbooks/roles/mail/templates/volumes/scripts/wait-for-postfix.unused.sh new file mode 100644 index 0000000..3e8a8c5 --- /dev/null +++ b/playbooks/roles/mail/templates/volumes/scripts/wait-for-postfix.unused.sh @@ -0,0 +1,27 @@ +# This was an attempt to keep rolling updates with very little downtime. +# I don't think it's worth it, and the nature of update_config provides +# little flexibility to use here. + +#!/bin/bash +set -e + +function log() { + echo "[startup] $@" +} + +LOCKFILE="/var/mail-state/lib-postfix/master.lock" +MAX_TRIES=30 +TRY=0 + +while [ -f "$LOCKFILE" ]; do + if [ "$TRY" -eq "$MAX_TRIES" ]; then + log "[$TRY/$MAX_TRIES] Max tries, failing." + exit 1 + fi + log "[$TRY/$MAX_TRIES] Lockfile exists, waiting for it to be cleaned up by previous container..." + sleep 2 + TRY=$((TRY + 1)) +done + +log "Lock check done. Starting DMS." +supervisord -c /etc/supervisor/supervisord.conf diff --git a/playbooks/roles/nginx-proxy/tasks/main.yml b/playbooks/roles/nginx-proxy/tasks/main.yml index 50958e7..aa7f922 100644 --- a/playbooks/roles/nginx-proxy/tasks/main.yml +++ b/playbooks/roles/nginx-proxy/tasks/main.yml @@ -1,17 +1,13 @@ --- -- name: Build nginx-proxy compose dirs - ansible.builtin.file: - state: directory - dest: '{{ nginx_proxy_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' - -- name: Build nginx-proxy compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ nginx_proxy_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - notify: - - (Re)start nginx-proxy +- name: Deploy nginx-proxy + ansible.builtin.import_tasks: manage-docker-compose-service.yml + vars: + service_name: nginx-proxy + template_render_dir: "../templates" + service_destination_dir: "{{ nginx_proxy_base }}" + state: restarted +# can't rollout the nginx-proxy without a parent reverse proxy. which +# would need a reverse proxy to rollout. which would need a... yeah you +# get the idea. +# rollout_services: diff --git a/playbooks/roles/nginx-proxy/templates/docker-compose.yml b/playbooks/roles/nginx-proxy/templates/docker-compose.yml index e0f56c4..c97f858 100644 --- a/playbooks/roles/nginx-proxy/templates/docker-compose.yml +++ b/playbooks/roles/nginx-proxy/templates/docker-compose.yml @@ -5,11 +5,24 @@ services: image: nginxproxy/nginx-proxy container_name: nginx-proxy ports: + # http - "80:80" - "443:443" + # smtp + - "25:25" + - "465:465" + - "587:587" + # imap + - "993:993" + # sieve + - "4190:4190" volumes: - /var/run/docker.sock:/tmp/docker.sock:ro - {{ nginx_proxy_base }}/certs:/etc/nginx/certs + - {{ nginx_proxy_base }}/toplevel.conf.d:/etc/nginx/toplevel.conf.d + environment: + - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} networks: - proxy labels: @@ -24,8 +37,10 @@ services: - acme:/etc/acme.sh - {{ nginx_proxy_base }}/certs:/etc/nginx/certs environment: - - "DEFAULT_EMAIL={{ certs_email }}" - - "ACME_CHALLENGE=DNS-01" + - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} + - DEFAULT_EMAIL={{ certs_email }} + - ACME_CHALLENGE=DNS-01 - "ACMESH_DNS_API_CONFIG={'DNS_API': 'dns_cf', 'CF_Key': '{{ cloudflare_token }}', 'CF_Email': '{{ cloudflare_email }}'}" networks: - proxy diff --git a/playbooks/roles/nginx-proxy/templates/toplevel.conf.d/stream.conf b/playbooks/roles/nginx-proxy/templates/toplevel.conf.d/stream.conf new file mode 100644 index 0000000..7e3b39d --- /dev/null +++ b/playbooks/roles/nginx-proxy/templates/toplevel.conf.d/stream.conf @@ -0,0 +1,46 @@ +stream { + log_format basic '$proxy_protocol_addr - [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time'; + upstream imaps { + server {{ vpn_proxy_filter_container_name }}:993; + } + upstream smtps { + server {{ vpn_proxy_filter_container_name }}:465; + } + upstream smtptls { + server {{ vpn_proxy_filter_container_name }}:587; + } + upstream smtp { + server {{ vpn_proxy_filter_container_name }}:25; + } + upstream managesieve { + server {{ vpn_proxy_filter_container_name }}:4190; + } + + server { + listen 993; + proxy_pass imaps; + proxy_protocol on; + } + server { + listen 25; + proxy_pass smtp; + proxy_protocol on; + } + server { + listen 587; + proxy_pass smtptls; + proxy_protocol on; + } + server { + listen 465; + proxy_pass smtps; + proxy_protocol on; + } + server { + listen 4190; + proxy_pass managesieve; + proxy_protocol on; + } +} diff --git a/playbooks/roles/outbound/tasks/main.yml b/playbooks/roles/outbound/tasks/main.yml index 107e71a..45540b4 100644 --- a/playbooks/roles/outbound/tasks/main.yml +++ b/playbooks/roles/outbound/tasks/main.yml @@ -1,119 +1,83 @@ --- -# Headscale setup -- name: Build headscale compose dirs and files - ansible.builtin.file: - state: directory - dest: '/etc/docker/compose/headscale/{{ item.path }}' - with_filetree: '../templates/headscale' - when: item.state == 'directory' - -- name: Build headscale compose templates - ansible.builtin.template: - src: '{{ item.src }}' - dest: '/etc/docker/compose/headscale/{{ item.path }}' - with_filetree: '../templates/headscale' - when: item.state == 'file' - -- name: Daemon-reload and enable headscale - ansible.builtin.systemd_service: +- name: Deploy Headscale + ansible.builtin.import_tasks: manage-docker-compose-service.yml + vars: + service_name: headscale + template_render_dir: "../templates/headscale" + service_destination_dir: "{{ headscale_base }}" state: started - enabled: true - daemon_reload: true - name: docker-compose@headscale - -- name: Perform rollout for headscale - ansible.builtin.shell: - cmd: "/usr/local/bin/docker-rollout rollout -f docker-compose.yml headscale" - chdir: "/etc/docker/compose/headscale" - -# User API Key -- name: Generate API key if homelab build - ansible.builtin.shell: - cmd: docker compose exec -it headscale headscale apikeys create --expiration "{{ api_key_expiration }}" - chdir: /etc/docker/compose/headscale - register: api_key_result - when: generate_api_key + rollout_services: + - name: headscale -- name: Store and display API key - when: generate_api_key +- name: Generate Headscale API key (if requested) + when: generate_api_key | default(false) block: - - name: Define API Key Variable - set_fact: - headscale_api_key: "{{ api_key_result.stdout }}" + - name: Execute API key generation command + ansible.builtin.command: + cmd: "docker compose exec headscale headscale apikeys create --expiration {{ api_key_expiration }}" + chdir: /etc/docker/compose/headscale + register: api_key_result + changed_when: true + + - name: Store and display newly generated API key + block: + - name: Store API Key in fact + ansible.builtin.set_fact: + headscale_api_key: "{{ api_key_result.stdout }}" - - name: Echo new key - ansible.builtin.debug: - msg: "Please store this API Key! {{ headscale_api_key }}" + - name: Display API Key (Requires User Action) + ansible.builtin.debug: + msg: "IMPORTANT: Please store this newly generated Headscale API Key! {{ headscale_api_key }}" - - name: Pause until user confirms - ansible.builtin.pause: - prompt: "Press return when ready!" + - name: Pause for user confirmation (API Key) + ansible.builtin.pause: + prompt: "API Key displayed. Press return to continue..." + when: api_key_result.rc == 0 # Only proceed if key generation succeeded -# System user auth key -- name: Create system key user and auth key if homelab build - when: generate_auth_key +- name: Create Headscale system user and auth key (if requested) + when: generate_auth_key | default(false) # Default to false if var is undefined block: - - name: Create system key user - ansible.builtin.shell: - cmd: docker compose exec -it headscale headscale users create "{{ auth_key_user }}" + # Note: These steps might not be fully idempotent. Re-running will attempt creation again. + - name: Create system key user '{{ auth_key_user }}' + ansible.builtin.command: # Using command module is safer + cmd: "docker compose exec headscale headscale users create {{ auth_key_user }}" chdir: /etc/docker/compose/headscale + register: user_create_result + changed_when: "'User created' in user_create_result.stdout" + failed_when: user_create_result.rc != 0 and 'Cannot create user' not in user_create_result.stderr - - name: Create auth key preauthkey - ansible.builtin.shell: - cmd: docker compose exec -it headscale headscale preauthkeys create --reusable --expiration "{{ auth_key_expiration }}" --user "{{ auth_key_user }}" + - name: Create auth key for user '{{ auth_key_user }}' + ansible.builtin.command: # Using command module is safer + cmd: "docker compose exec headscale headscale preauthkeys create --reusable --expiration {{ auth_key_expiration }} --user {{ auth_key_user }}" chdir: /etc/docker/compose/headscale register: auth_key_result + changed_when: true - - name: Store and display Auth Key + - name: Store and display newly generated Auth Key block: - - name: Define Auth Key Variable - set_fact: + # This stores the *newly generated* key. Be aware of Ansible variable precedence + # if 'headscale_user_auth_key' is also defined elsewhere (like vaults). + # This fact is primarily for immediate display and user interaction below. + - name: Store Auth Key in fact + ansible.builtin.set_fact: headscale_user_auth_key: "{{ auth_key_result.stdout }}" - - name: Echo new auth key + - name: Display Auth Key (Requires User Action) ansible.builtin.debug: - msg: "Please store this Auth Key for user {{ auth_key_user }}! {{ headscale_user_auth_key }}" + msg: "IMPORTANT: Please store this newly generated Auth Key for user '{{ auth_key_user }}'! {{ headscale_user_auth_key }}" - - name: Pause until user confirms + - name: Pause for user confirmation (Auth Key) ansible.builtin.pause: - prompt: "Press return when ready!" - -# Proxy setup (AFTER API key generation) -- name: Build proxy compose dirs and files - ansible.builtin.file: - state: directory - dest: '/etc/docker/compose/proxy/{{ item.path }}' - with_filetree: '../templates/proxy' - when: item.state == 'directory' - -- name: Build proxy compose templates - ansible.builtin.template: - src: '{{ item.src }}' - dest: '/etc/docker/compose/proxy/{{ item.path }}' - with_filetree: '../templates/proxy' - when: item.state == 'file' - -- name: Allow mail ports - with_items: - - "25" - - "587" - - "465" - - "993" - - "4190" - community.general.ufw: - rule: allow - port: "{{ item }}" - state: "enabled" - -- name: Daemon-reload and enable proxy - ansible.builtin.systemd_service: + prompt: "Auth Key displayed. Press return to continue..." + when: auth_key_result.rc == 0 + +- name: Deploy Open Internet -> Headnet Proxy + ansible.builtin.import_tasks: manage-docker-compose-service.yml + vars: + service_name: proxy + template_render_dir: "../templates/proxy" + service_destination_dir: "{{ proxy_base }}" state: started - enabled: true - daemon_reload: true - name: docker-compose@proxy - -- name: Perform rollout for proxy - ansible.builtin.shell: - cmd: "/usr/local/bin/docker-rollout rollout -f docker-compose.yml proxy" - chdir: "/etc/docker/compose/proxy" + rollout_services: + - name: "{{ vpn_proxy_filter_container_name }}" diff --git a/playbooks/roles/outbound/templates/headscale/config/config.yaml b/playbooks/roles/outbound/templates/headscale/config/config.yaml index 6bfbfb9..2586848 100644 --- a/playbooks/roles/outbound/templates/headscale/config/config.yaml +++ b/playbooks/roles/outbound/templates/headscale/config/config.yaml @@ -135,11 +135,11 @@ unix_socket_permission: "0770" oidc: only_start_if_oidc_is_available: false - issuer: "https://{{ idm_domain }}" + issuer: "https://{{ idm_domain }}/oauth2/openid/headscale" client_id: "headscale" client_secret: "{{ headscale_oidc_secret }}" - scope: ["openid", "profile", "email"] + scope: ["openid", "profile", "email", "groups"] pkce: # Enable or disable PKCE support (default: false) enabled: true @@ -150,7 +150,8 @@ oidc: allowed_domains: - {{ domain }} - allowed_users: {{ headscale_allowed_users }} + allowed_groups: + - vpn@{{ idm_domain }} strip_email_domain: true # Logtail configuration diff --git a/playbooks/roles/outbound/templates/headscale/docker-compose.yml b/playbooks/roles/outbound/templates/headscale/docker-compose.yml index ee140fb..04b3d9f 100644 --- a/playbooks/roles/outbound/templates/headscale/docker-compose.yml +++ b/playbooks/roles/outbound/templates/headscale/docker-compose.yml @@ -12,6 +12,7 @@ services: networks: - proxy environment: + - DEPLOYMENT_TIME={{ now() }} - VIRTUAL_HOST={{ headscale_host }} - VIRTUAL_PORT={{ headscale_port }} - LETSENCRYPT_HOST={{ headscale_host }} @@ -28,6 +29,7 @@ services: networks: - proxy environment: + - DEPLOYMENT_TIME={{ now() }} - VIRTUAL_HOST={{ headscale_host }} - VIRTUAL_PORT={{ headscale_port }} - LETSENCRYPT_HOST={{ headscale_host }} diff --git a/playbooks/roles/outbound/templates/proxy/docker-compose.yml b/playbooks/roles/outbound/templates/proxy/docker-compose.yml index 9642d6a..c5aa3ac 100644 --- a/playbooks/roles/outbound/templates/proxy/docker-compose.yml +++ b/playbooks/roles/outbound/templates/proxy/docker-compose.yml @@ -1,36 +1,69 @@ +--- + services: headscale-client: image: tailscale/tailscale:latest environment: + - DEPLOYMENT_TIME={{ now() }} - TS_AUTHKEY={{ headscale_user_auth_key }} - TS_EXTRA_ARGS=--login-server=https://{{ headscale_host }} --accept-routes --accept-dns --stateful-filtering=false - TS_STATE_DIR=/var/lib/tailscale - TS_USERSPACE=false - TZ={{ timezone }} - - - VIRTUAL_HOST=*.{{ domain }},{{ domain }} - - VIRTUAL_PORT=80 - - LETSENCRYPT_HOST=*.{{ domain }},{{ domain }} - hostname: headscale-outbound restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "tailscale status"] + interval: 1s + timeout: 5s + retries: 10 + hostname: headscale-outbound cap_add: - NET_ADMIN - SYS_ADMIN volumes: - ./data:/var/lib/tailscale + - ./headscale/headscale-client.sh:/headscale-client.sh - /dev/net/tun:/dev/net/tun networks: - - proxy - proxy: + - internal_subnet_router # magic. + sysctls: + - net.ipv4.ip_forward=1 + entrypoint: ["/bin/sh"] + command: /headscale-client.sh + + {{ vpn_proxy_filter_container_name }}: image: nginx:latest - network_mode: service:headscale-client + entrypoint: ["/bin/sh"] + command: /wait-for-bridge.sh + pre_stop: + - command: /dont-die-until-conn-closed.sh + cap_add: + - NET_ADMIN # to modify the routing table + environment: + - DEPLOYMENT_TIME={{ now() }} + - VIRTUAL_HOST=*.{{ domain }},{{ domain }} + - VIRTUAL_PORT=80 + - LETSENCRYPT_HOST=*.{{ domain }},{{ domain }} + healthcheck: + test: ["CMD", "curl", "http://localhost/health"] + interval: 10s + timeout: 5s + retries: 3 depends_on: - headscale-client volumes: - - ./sites-enabled:/etc/nginx/conf.d - - ./toplevel.conf.d:/etc/nginx/toplevel.conf.d + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + - ./nginx/conf.d:/etc/nginx/conf.d + - ./nginx/toplevel.conf.d:/etc/nginx/toplevel.conf.d + - ./nginx/wait-for-bridge.sh:/wait-for-bridge.sh + - ./nginx/dont-die-until-conn-closed.sh:/dont-die-until-conn-closed.sh + networks: + - proxy + - internal_subnet_router networks: + internal_subnet_router: + driver: bridge proxy: external: true diff --git a/playbooks/roles/outbound/templates/proxy/headscale/headscale-client.sh b/playbooks/roles/outbound/templates/proxy/headscale/headscale-client.sh new file mode 100755 index 0000000..1ce0acb --- /dev/null +++ b/playbooks/roles/outbound/templates/proxy/headscale/headscale-client.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +/usr/local/bin/containerboot & + +while ! tailscale status > /dev/null 2>&1; do + sleep 1 + echo '[+] Waiting for tailscale status to be up...' +done + +echo '[+] Tailscale is up. Enabling NAT...' +iptables -t nat -A POSTROUTING -o tailscale0 -j MASQUERADE +echo '[+] Done.' + +tail -f /dev/null diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/bin.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/bin.conf index 3c5682d..3c5682d 100644 --- a/playbooks/roles/outbound/templates/proxy/sites-enabled/bin.conf +++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/bin.conf diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/default.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/default.conf index d127cc5..f4a8007 100644 --- a/playbooks/roles/outbound/templates/proxy/sites-enabled/default.conf +++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/default.conf @@ -4,4 +4,8 @@ server { location / { return 404; } + + location /health { + return 200; + } } diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/idm.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/idm.conf index c85ebcf..c85ebcf 100644 --- a/playbooks/roles/outbound/templates/proxy/sites-enabled/idm.conf +++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/idm.conf diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/kanban.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/kanban.conf index b668310..b668310 100644 --- a/playbooks/roles/outbound/templates/proxy/sites-enabled/kanban.conf +++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/kanban.conf diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/mail.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/mail.conf index c810f5a..c810f5a 100644 --- a/playbooks/roles/outbound/templates/proxy/sites-enabled/mail.conf +++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/mail.conf diff --git a/playbooks/roles/outbound/templates/proxy/sites-enabled/notes.conf b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/notes.conf index f7937dd..f7937dd 100644 --- a/playbooks/roles/outbound/templates/proxy/sites-enabled/notes.conf +++ b/playbooks/roles/outbound/templates/proxy/nginx/conf.d/notes.conf diff --git a/playbooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh b/playbooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh new file mode 100755 index 0000000..967c2c0 --- /dev/null +++ b/playbooks/roles/outbound/templates/proxy/nginx/dont-die-until-conn-closed.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +echo "[/] sleeping to wait for some time for container to be marked as stop." +# https://stackoverflow.com/a/45146086 +sleep 3 + +pid_file="/run/nginx.pid" +max_wait_seconds=30 + +if [ ! -f "$pid_file" ]; then + echo "[!] Nginx PID file not found at $pid_file. Assuming Nginx not running or already stopped." + exit 0 +fi + +PID=$(cat "$pid_file") + +# Validate PID +if [ -z "$PID" ] || ! [[ "$PID" =~ ^[0-9]+$ ]]; then + echo "[!] Invalid PID found in $pid_file: '$PID'" + exit 1 +fi + +# Check if the process actually exists before sending quit +# kill -0 PID checks if a signal can be sent. +if ! kill -0 "$PID" 2>/dev/null; then + echo "[!] Nginx process $PID not found or already stopped." + exit 0 # Exit successfully +fi + +echo "[/] sending signal to nginx (PID: $PID) to quit" +nginx -s quit + +start_time=$SECONDS +echo "[/] Waiting for Nginx (PID: $PID) to stop (max ${max_wait_seconds}s)..." + +while [ -d /proc/$PID ]; do + current_time=$SECONDS + elapsed_time=$((current_time - start_time)) + + if [ "$elapsed_time" -ge "$max_wait_seconds" ]; then + echo "[!] Timeout: Nginx process $PID did not stop within ${max_wait_seconds} seconds." + echo "[!] Sending SIGKILL to PID $PID." + kill -9 "$PID" 2>/dev/null + + exit 1 + fi + + sleep 0.5 + if (( $(echo "$elapsed_time % 5" | bc) == 0 )); then + echo "[/] Nginx (PID: $PID) still running (waited ${elapsed_time}s)..." + fi +done + +echo "[+] Nginx process $PID stopped gracefully." +echo "[+] done. goodbye." +exit 0 diff --git a/playbooks/roles/outbound/templates/proxy/nginx/nginx.conf b/playbooks/roles/outbound/templates/proxy/nginx/nginx.conf new file mode 100644 index 0000000..32feb3a --- /dev/null +++ b/playbooks/roles/outbound/templates/proxy/nginx/nginx.conf @@ -0,0 +1,30 @@ +user www-data; +worker_processes 4; +pid /run/nginx.pid; +# load_module modules/ndk_http_module.so; +# load_module modules/ngx_http_set_misc_module.so; + +events { + worker_connections 768; +} + +include /etc/nginx/toplevel.conf.d/*.conf; + +http { + charset utf-8; + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + include /etc/nginx/mime.types; + default_type application/octet-stream; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + gzip on; + gzip_disable "msie6"; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf b/playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf new file mode 100644 index 0000000..193e65a --- /dev/null +++ b/playbooks/roles/outbound/templates/proxy/nginx/toplevel.conf.d/stream.conf @@ -0,0 +1,56 @@ +stream { + log_format basic '$proxy_protocol_addr - [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time'; + upstream imaps { + server {{ loadbalancer_ip }}:993; + } + upstream smtps { + server {{ loadbalancer_ip }}:465; + } + upstream smtptls { + server {{ loadbalancer_ip }}:587; + } + upstream smtp { + server {{ loadbalancer_ip }}:25; + } + upstream managesieve { + server {{ loadbalancer_ip }}:4190; + } + + server { + set_real_ip_from {{ docker_network }}; + listen 993 proxy_protocol; + + proxy_pass imaps; + proxy_protocol on; + } + server { + set_real_ip_from {{ docker_network }}; + listen 25 proxy_protocol; + + proxy_pass smtp; + proxy_protocol on; + } + server { + set_real_ip_from {{ docker_network }}; + listen 587 proxy_protocol; + + proxy_pass smtptls; + proxy_protocol on; + } + server { + set_real_ip_from {{ docker_network }}; + listen 465 proxy_protocol; + + proxy_pass smtps; + proxy_protocol on; + } + server { + set_real_ip_from {{ docker_network }}; + listen 4190 proxy_protocol; + + proxy_pass managesieve; + proxy_protocol on; + } +} diff --git a/playbooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh b/playbooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh new file mode 100755 index 0000000..da273a9 --- /dev/null +++ b/playbooks/roles/outbound/templates/proxy/nginx/wait-for-bridge.sh @@ -0,0 +1,33 @@ +#!/bin/sh + +set -e + +echo "[+] Waiting for headscale-client to be resolvable..." + +# Loop until headscale-client IP is found or timeout +timeout=30 +start_time=$(date +%s) + +HEADSCALE_IP="" +while [ -z "$HEADSCALE_IP" ]; do + HEADSCALE_IP=$(getent hosts headscale-client | awk '{ print $1 }' | head -n 1) + current_time=$(date +%s) + if [ $((current_time - start_time)) -ge $timeout ]; then + echo "[-] Timeout waiting for headscale-client DNS resolution." >&2 + exit 1 + fi + if [ -z "$HEADSCALE_IP" ]; then + sleep 1 + fi +done + +echo "[+] Found headscale-client IP: $HEADSCALE_IP" +echo "[+] Attempting to modify routing table..." + +apt update && apt install -y iproute2 +ip route del default || echo "[-] Warning: Failed to delete default route (maybe none existed)." +ip route add default via $HEADSCALE_IP +echo "[+] Default route set via $HEADSCALE_IP." + +echo "[+] Starting Nginx..." +nginx -g "daemon off;" diff --git a/playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf b/playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf deleted file mode 100644 index 68d5445..0000000 --- a/playbooks/roles/outbound/templates/proxy/toplevel.conf.d/stream.conf +++ /dev/null @@ -1,38 +0,0 @@ -stream { - upstream imaps { - server {{ loadbalancer_ip }}:993; - } - upstream smtps { - server {{ loadbalancer_ip }}:465; - } - upstream smtptls { - server {{ loadbalancer_ip }}:587; - } - upstream smtp { - server {{ loadbalancer_ip }}:25; - } - upstream managesieve { - server {{ loadbalancer_ip }}:4190; - } - - server { - listen 993; - proxy_pass imaps; - } - server { - listen 25; - proxy_pass smtp; - } - server { - listen 587; - proxy_pass smtptls; - } - server { - listen 465; - proxy_pass smtps; - } - server { - listen 4190; - proxy_pass managesieve; - } -} diff --git a/playbooks/roles/pihole/tasks/main.yml b/playbooks/roles/pihole/tasks/main.yml index 6990623..fc526dc 100644 --- a/playbooks/roles/pihole/tasks/main.yml +++ b/playbooks/roles/pihole/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build pihole compose dirs - ansible.builtin.file: - state: directory - dest: '{{ pihole_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy pihole + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: pihole + template_render_dir: "../templates" + service_destination_dir: "{{ pihole_base }}" -- name: Build pihole compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ pihole_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy Pihole stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ pihole_base }}/stacks/docker-compose.yml pihole" diff --git a/playbooks/roles/pihole/templates/stacks/docker-compose.yml b/playbooks/roles/pihole/templates/stacks/docker-compose.yml index a99d7db..47422f1 100644 --- a/playbooks/roles/pihole/templates/stacks/docker-compose.yml +++ b/playbooks/roles/pihole/templates/stacks/docker-compose.yml @@ -1,19 +1,33 @@ -version: '3.2' - services: pihole: image: pihole/pihole:latest + ports: + - "53:53/udp" + - "53:53/tcp" volumes: - {{ pihole_base }}/volumes/pihole:/etc/pihole - {{ pihole_base }}/volumes/dnsmasq:/etc/dnsmasq.d environment: + - DEPLOYMENT_TIME={{ now() }} - TZ={{ timezone }} - FTLCONF_webserver_api_password={{ pihole_webpwd }} - FTLCONF_dns_upstreams={{ upstream_dns_servers | join(';') }} networks: - proxy +{% if not homelab_build %} + healthcheck: + test: ["CMD-SHELL", "dig loadbalancer.{{ domain }} @127.0.0.1 | grep -q {{ loadbalancer_ip }}"] + retries: 3 + timeout: 5s + start_period: 8s +{% endif %} deploy: mode: replicated + update_config: + parallelism: 1 + order: start-first + failure_action: rollback + monitor: 10s replicas: 1 labels: - traefik.enable=true @@ -23,15 +37,6 @@ services: - traefik.http.routers.piholeweb.rule=Host(`{{ pihole_domain }}`) - traefik.http.routers.piholeweb.entrypoints=websecure - traefik.http.services.piholeweb.loadbalancer.server.port=80 - # 53/udp - - traefik.udp.routers.pihole-dns-udp.entrypoints=dns_udp - - traefik.udp.routers.pihole-dns-udp.service=pihole-dns-udp - - traefik.udp.services.pihole-dns-udp.loadbalancer.server.port=53 - # 53/tcp - - traefik.tcp.routers.pihole-dns-tcp.rule=HostSNI(`*`) - - traefik.tcp.routers.pihole-dns-tcp.entrypoints=dns_tcp - - traefik.tcp.routers.pihole-dns-tcp.service=pihole-dns-tcp - - traefik.tcp.services.pihole-dns-tcp.loadbalancer.server.port=53 networks: proxy: diff --git a/playbooks/roles/portainer/tasks/main.yml b/playbooks/roles/portainer/tasks/main.yml index db9146d..7f26a5f 100644 --- a/playbooks/roles/portainer/tasks/main.yml +++ b/playbooks/roles/portainer/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build portainer compose dirs - ansible.builtin.file: - state: directory - dest: '{{ portainer_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy portainer + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: portainer + template_render_dir: "../templates" + service_destination_dir: "{{ portainer_base }}" -- name: Build portainer compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ portainer_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy Portainer stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ portainer_base }}/stacks/docker-compose.yml portainer" diff --git a/playbooks/roles/portainer/templates/stacks/docker-compose.yml b/playbooks/roles/portainer/templates/stacks/docker-compose.yml index 1a02cef..c304153 100644 --- a/playbooks/roles/portainer/templates/stacks/docker-compose.yml +++ b/playbooks/roles/portainer/templates/stacks/docker-compose.yml @@ -12,6 +12,9 @@ services: mode: global placement: constraints: [node.platform.os == linux] + environment: + - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} portainer: image: portainer/portainer-ce:alpine @@ -21,6 +24,9 @@ services: volumes: - /var/run/docker.sock:/var/run/docker.sock - {{ portainer_base }}/volumes/data:/data + environment: + - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} networks: - proxy - agent_network diff --git a/playbooks/roles/silverbullet/tasks/main.yml b/playbooks/roles/silverbullet/tasks/main.yml index 2b66f61..19f531d 100644 --- a/playbooks/roles/silverbullet/tasks/main.yml +++ b/playbooks/roles/silverbullet/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build silverbullet compose dirs - ansible.builtin.file: - state: directory - dest: '{{ silverbullet_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy silverbullet + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: silverbullet + template_render_dir: "../templates" + service_destination_dir: "{{ silverbullet_base }}" -- name: Build silverbullet compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ silverbullet_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy silverbullet stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ silverbullet_base }}/stacks/docker-compose.yml silverbullet" diff --git a/playbooks/roles/silverbullet/templates/stacks/docker-compose.yml b/playbooks/roles/silverbullet/templates/stacks/docker-compose.yml index 21fe0ff..bedbeec 100644 --- a/playbooks/roles/silverbullet/templates/stacks/docker-compose.yml +++ b/playbooks/roles/silverbullet/templates/stacks/docker-compose.yml @@ -1,11 +1,11 @@ -version: '3.2' - services: silverbullet: image: ghcr.io/silverbulletmd/silverbullet restart: unless-stopped environment: - - SB_USER={{ silverbullet_password }} + - TZ={{ timezone }} + - DEPLOYMENT_TIME={{ now() }} + - SB_USER={{ silverbullet_password }} volumes: - {{ silverbullet_base }}/volumes/data:/space networks: @@ -13,6 +13,10 @@ services: deploy: mode: replicated replicas: 1 + update_config: + parallelism: 1 + order: start-first + failure_action: rollback labels: - traefik.enable=true - traefik.swarm.network=proxy diff --git a/playbooks/roles/traefik/tasks/main.yml b/playbooks/roles/traefik/tasks/main.yml index c365f55..ad96334 100644 --- a/playbooks/roles/traefik/tasks/main.yml +++ b/playbooks/roles/traefik/tasks/main.yml @@ -1,19 +1,13 @@ --- -- name: Build traefik compose dirs - ansible.builtin.file: - state: directory - dest: '{{ traefik_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy traefik and tailnet -> home proxy + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: traefik + template_render_dir: "../templates" + service_destination_dir: "{{ traefik_base }}" -- name: Build traefik compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ traefik_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy Traefik stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ traefik_base }}/stacks/docker-compose.yml traefik" +- name: Pause for user confirmation (Auth Key) + when: homelab_build + ansible.builtin.pause: + prompt: "Please accept the subnet router in headscale..." diff --git a/playbooks/roles/traefik/templates/stacks/docker-compose.yml b/playbooks/roles/traefik/templates/stacks/docker-compose.yml index 6b6aee3..7e9daef 100644 --- a/playbooks/roles/traefik/templates/stacks/docker-compose.yml +++ b/playbooks/roles/traefik/templates/stacks/docker-compose.yml @@ -4,11 +4,17 @@ services: hostname: headscale-traefik restart: unless-stopped environment: + - DEPLOYMENT_TIME={{ now() }} + - TZ={{ timezone }} - TS_AUTHKEY={{ headscale_user_auth_key }} - TS_EXTRA_ARGS=--login-server=https://{{ headscale_host }} --accept-dns --stateful-filtering=false --advertise-routes={{ loadbalancer_ip }}/32 - TS_STATE_DIR=/var/lib/tailscale - TS_USERSPACE=false - - TZ={{ timezone }} + healthcheck: + test: ["CMD-SHELL", "tailscale status"] + interval: 1s + timeout: 5s + retries: 10 volumes: - {{ traefik_base }}/volumes/headscale:/var/lib/tailscale - /dev/net/tun:/dev/net/tun @@ -20,20 +26,27 @@ services: deploy: mode: replicated replicas: 1 - placement: - constraints: [node.role == manager] + update_config: + parallelism: 1 + order: stop-first # hostname conflicts + failure_action: rollback + monitor: 8s traefik: image: traefik:v3 restart: unless-stopped depends_on: - headscale-client ports: + # http - 80:80 - 443:443 - - 53:53 - - 53:53/udp - - 3636:3636 + healthcheck: + test: traefik healthcheck --ping + interval: 10s + retries: 2 + timeout: 3s environment: + - DEPLOYMENT_TIME={{ now() }} - TZ={{ timezone }} - CF_API_EMAIL={{ cloudflare_email }} - CF_DNS_API_TOKEN={{ cloudflare_dns_api_token }} @@ -45,9 +58,13 @@ services: - proxy - headnet deploy: - mode: global - placement: - constraints: [node.role == manager] + mode: replicated + replicas: 2 + update_config: + parallelism: 1 + order: start-first + failure_action: rollback + monitor: 8s labels: - traefik.enable=true - traefik.http.routers.dashboard.rule=Host(`{{ traefik_domain }}`) && (PathPrefix(`/api`) || PathPrefix(`/dashboard`)) diff --git a/playbooks/roles/traefik/templates/stacks/traefik.yml b/playbooks/roles/traefik/templates/stacks/traefik.yml index 976ad7a..ceeb0cb 100644 --- a/playbooks/roles/traefik/templates/stacks/traefik.yml +++ b/playbooks/roles/traefik/templates/stacks/traefik.yml @@ -1,11 +1,11 @@ ping: {} accessLog: {} log: - level: DEBUG + level: INFO api: dashboard: true insecure: true - debug: false + debug: true entryPoints: web: address: ":80" @@ -16,22 +16,6 @@ entryPoints: scheme: https websecure: address: ":443" - dns_udp: - address: ":53/udp" - dns_tcp: - address: ":53/tcp" - ldaps: - address: ":3636/tcp" - sieve: - address: ":4190/tcp" - imap: - address: ":993/tcp" - smtps: - address: ":465/tcp" - smtp: - address: ":25/tcp" - smtptls: - address: ":587/tcp" serversTransport: insecureSkipVerify: true providers: diff --git a/playbooks/roles/traextor/tasks/main.yml b/playbooks/roles/traextor/tasks/main.yml index 19074fe..64dfc1e 100644 --- a/playbooks/roles/traextor/tasks/main.yml +++ b/playbooks/roles/traextor/tasks/main.yml @@ -1,19 +1,9 @@ --- -- name: Build traextor compose dirs - ansible.builtin.file: - state: directory - dest: '{{ traextor_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'directory' +- name: Deploy traextor + ansible.builtin.import_tasks: manage-docker-swarm-service.yml + vars: + service_name: traextor + template_render_dir: "../templates" + service_destination_dir: "{{ traextor_base }}" -- name: Build traextor compose files - ansible.builtin.template: - src: '{{ item.src }}' - dest: '{{ traextor_base }}/{{ item.path }}' - with_filetree: '../templates' - when: item.state == 'file' - -- name: Deploy Traextor stack - ansible.builtin.command: - cmd: "docker stack deploy -c {{ traextor_base }}/stacks/docker-compose.yml traextor" diff --git a/playbooks/roles/traextor/templates/stacks/docker-compose.yml b/playbooks/roles/traextor/templates/stacks/docker-compose.yml index 9012365..d15358e 100644 --- a/playbooks/roles/traextor/templates/stacks/docker-compose.yml +++ b/playbooks/roles/traextor/templates/stacks/docker-compose.yml @@ -8,5 +8,9 @@ services: - /var/run/docker.sock:/var/run/docker.sock command: -H unix:///var/run/docker.sock environment: + DEPLOYMENT_TIME: {{ now() }} TZ: {{ timezone }} OUTPUT_DIR: /certs + deploy: + mode: replicated + replicas: 1 diff --git a/tasks/copy-rendered-templates-recursive.yml b/tasks/copy-rendered-templates-recursive.yml new file mode 100644 index 0000000..e47c39c --- /dev/null +++ b/tasks/copy-rendered-templates-recursive.yml @@ -0,0 +1,58 @@ +--- + +- name: Create temporary directory on localhost + delegate_to: localhost + become: false + ansible.builtin.tempfile: + state: directory + register: tempdir + +- name: Ensure parent directories exist for rendered templates + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ tempdir.path }}/{{ item.path | dirname }}" + state: directory + mode: "{{ mode | default('0755') }}" + with_filetree: "{{ render_dir }}" + when: item.state == "file" + +- name: Recursively render templates + delegate_to: localhost + become: false + ansible.builtin.template: + src: "{{ item.src }}" + dest: "{{ tempdir.path }}/{{ item.path }}" + mode: "{{ mode | default('0755') }}" + with_filetree: "{{ render_dir }}" + when: item.state == "file" + +- name: Sync rendered templates to remote host + delegate_to: localhost + become: false + ansible.builtin.synchronize: + src: "{{ tempdir.path }}/" + dest: "{{ tempdir.path }}/" + +- name: Remove local temporary directory + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ tempdir.path }}" + state: absent + +- name: Update remote files + ansible.builtin.command: + cmd: bash -c 'cp -r {{ tempdir.path }}/* {{ destination_dir }}/' + +- name: Remove local temporary directory + delegate_to: localhost + become: false + ansible.builtin.file: + path: "{{ tempdir.path }}" + state: absent + +- name: Remove remote temporary directory + ansible.builtin.file: + path: "{{ tempdir.path }}" + state: absent diff --git a/tasks/manage-docker-compose-service.yml b/tasks/manage-docker-compose-service.yml new file mode 100644 index 0000000..793d2ac --- /dev/null +++ b/tasks/manage-docker-compose-service.yml @@ -0,0 +1,23 @@ +--- + +- name: "Copy rendered templates for {{ service_name }}" + ansible.builtin.import_tasks: copy-rendered-templates-recursive.yml + vars: + render_dir: "{{ template_render_dir }}" + destination_dir: "{{ service_destination_dir }}" + +- name: "Ensure {{ service_name }} service is enabled and started" + ansible.builtin.systemd_service: + state: "{{ state | default('restarted') }}" + enabled: true + daemon_reload: true + name: "docker-compose@{{ service_name }}" + +- name: "Perform rollout for {{ service_name }}" + with_items: "{{ rollout_services | default([]) }}" + ansible.builtin.shell: + cmd: "/usr/local/bin/docker-rollout rollout -f docker-compose.yml {{ item.rollout_extra_args | default('') }} {{ item.name }}" + chdir: "{{ service_destination_dir }}" + register: rollout_result + changed_when: "'Waiting for new containers to be healthy' in rollout_result.stdout" + failed_when: rollout_result.rc != 0 or 'Rolling back.' in rollout_result.stdout diff --git a/tasks/manage-docker-swarm-service.yml b/tasks/manage-docker-swarm-service.yml new file mode 100644 index 0000000..5332a3f --- /dev/null +++ b/tasks/manage-docker-swarm-service.yml @@ -0,0 +1,16 @@ +--- + +- name: "Copy rendered templates for {{ service_name }}" + ansible.builtin.import_tasks: copy-rendered-templates-recursive.yml + vars: + mode: "0777" + render_dir: "{{ template_render_dir }}" + destination_dir: "{{ service_destination_dir }}" + +- name: "Deploy stack for {{ service_name }}" + ansible.builtin.command: + cmd: "docker stack deploy --resolve-image changed --detach=false --compose-file {{ stack_file | default(service_destination_dir + '/stacks/docker-compose.yml') }} {{ service_name }}" + register: stack_result + changed_when: true + failed_when: stack_result.rc != 0 + |