summaryrefslogtreecommitdiff
path: root/playbooks
diff options
context:
space:
mode:
authorElizabeth Hunt <me@liz.coffee>2025-03-15 00:50:34 -0700
committerElizabeth Hunt <me@liz.coffee>2025-03-15 00:50:34 -0700
commitfb7e6890d8516618fa3baec0edf84048e2b6601d (patch)
treea7bc5cfce71288ab69e8fa590d0f02df90c55385 /playbooks
downloadinfra-fb7e6890d8516618fa3baec0edf84048e2b6601d.tar.gz
infra-fb7e6890d8516618fa3baec0edf84048e2b6601d.zip
a docker swarm
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/deploy-common.yml7
-rw-r--r--playbooks/deploy-docker.yml7
-rw-r--r--playbooks/deploy-nginx-proxy.yml7
-rw-r--r--playbooks/deploy-outbound.yml7
-rw-r--r--playbooks/deploy-swarm.yml115
-rw-r--r--playbooks/roles/common/files/authorized_keys1
-rw-r--r--playbooks/roles/common/files/sshd_config21
-rw-r--r--playbooks/roles/common/handlers/main.yml23
-rw-r--r--playbooks/roles/common/tasks/main.yml66
-rw-r--r--playbooks/roles/docker/files/docker-compose@.service19
-rwxr-xr-xplaybooks/roles/docker/files/docker-rollout204
-rw-r--r--playbooks/roles/docker/handlers/main.yml8
-rw-r--r--playbooks/roles/docker/tasks/main.yml55
-rw-r--r--playbooks/roles/nginx-proxy/handlers/main.yml7
-rw-r--r--playbooks/roles/nginx-proxy/tasks/main.yml17
-rw-r--r--playbooks/roles/nginx-proxy/templates/docker-compose.yml38
-rw-r--r--playbooks/roles/outbound/tasks/main.yml28
-rw-r--r--playbooks/roles/outbound/templates/config/config.yaml387
-rw-r--r--playbooks/roles/outbound/templates/data/.gitkeep0
-rw-r--r--playbooks/roles/outbound/templates/docker-compose.yml39
20 files changed, 1056 insertions, 0 deletions
diff --git a/playbooks/deploy-common.yml b/playbooks/deploy-common.yml
new file mode 100644
index 0000000..49ba22a
--- /dev/null
+++ b/playbooks/deploy-common.yml
@@ -0,0 +1,7 @@
+---
+
+- name: Common setup
+ hosts: all
+ become: true
+ roles:
+ - common
diff --git a/playbooks/deploy-docker.yml b/playbooks/deploy-docker.yml
new file mode 100644
index 0000000..a355bd3
--- /dev/null
+++ b/playbooks/deploy-docker.yml
@@ -0,0 +1,7 @@
+---
+
+- name: Docker setup
+ hosts: docker
+ become: true
+ roles:
+ - docker
diff --git a/playbooks/deploy-nginx-proxy.yml b/playbooks/deploy-nginx-proxy.yml
new file mode 100644
index 0000000..329f186
--- /dev/null
+++ b/playbooks/deploy-nginx-proxy.yml
@@ -0,0 +1,7 @@
+---
+
+- name: nginx-proxy setup
+ hosts: nginx-proxy
+ become: true
+ roles:
+ - nginx-proxy
diff --git a/playbooks/deploy-outbound.yml b/playbooks/deploy-outbound.yml
new file mode 100644
index 0000000..5867313
--- /dev/null
+++ b/playbooks/deploy-outbound.yml
@@ -0,0 +1,7 @@
+---
+
+- name: outbound setup
+ hosts: outbound
+ become: true
+ roles:
+ - outbound
diff --git a/playbooks/deploy-swarm.yml b/playbooks/deploy-swarm.yml
new file mode 100644
index 0000000..fc4da39
--- /dev/null
+++ b/playbooks/deploy-swarm.yml
@@ -0,0 +1,115 @@
+---
+
+- name: Install Ceph
+ hosts: swarm
+ become: true
+ tasks:
+ - name: Install Ceph
+ ansible.builtin.apt:
+ name:
+ - ceph-common
+ - ceph-fuse
+ state: present
+
+ # - name: Copy Ceph Secret
+ # ansible.builtin.copy:
+ # content: "{{ ceph_secret }}"
+ # dest: /etc/ceph/secret.key
+
+ # ceph config generate-minimal-conf
+ - name: Copy Ceph Configuration
+ ansible.builtin.copy:
+ content: "[global]\n fsid = {{ ceph_fsid }}\n mon_host = {{ ceph_mon_host }}\n"
+ dest: /etc/ceph/ceph.conf
+ mode: '0644'
+
+ # ceph fs authorize cephfs client.swarm / rw
+ - name: Copy Ceph Keyring
+ ansible.builtin.copy:
+ content: "[client.{{ ceph_client_name }}]\n key = {{ ceph_secret }}\n"
+ dest: "/etc/ceph/ceph.client.{{ ceph_client_name }}.keyring"
+ mode: '0600'
+
+ - name: Adjust ceph mount perms
+ ansible.builtin.file:
+ path: /mnt/ceph
+ owner: root
+ group: root
+ state: directory
+ recurse: true
+
+ - name: Mount Ceph on Boot
+ ansible.builtin.lineinfile:
+ path: /etc/fstab
+ regexp: ':/\s+/mnt\s+ceph'
+ line: "none /mnt/ceph fuse.ceph ceph.id={{ ceph_client_name }},_netdev,defaults 0 0"
+ create: true
+ mode: "0644"
+
+ - name: Mount ceph now
+ ansible.builtin.shell:
+ cmd: "mount -a"
+
+ - name: Adjust ceph mount perms for docker
+ ansible.builtin.file:
+ path: /mnt/ceph/docker
+ owner: root
+ group: docker
+ state: directory
+ recurse: true
+
+- name: Initial docker swarm fw rules
+ hosts: swarm
+ become: true
+ tasks:
+ - name: Enable local swarm comms
+ loop: "{{ rfc1918_cgnat_networks }}"
+ community.general.ufw:
+ rule: allow
+ port: "2377"
+ from: "{{ item }}"
+ state: "enabled"
+
+- name: Initial docker swarm init
+ hosts: swarm[0]
+ become: true
+ tasks:
+ - name: Check Docker Swarm status
+ ansible.builtin.shell: docker info --format '{{ "{{.Swarm.LocalNodeState}}" }}'
+ register: docker_swarm_status
+ changed_when: false
+
+ - name: Initialize Docker Swarm
+ ansible.builtin.shell:
+ cmd: docker swarm init --advertise-addr {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}
+ when: "'inactive' in docker_swarm_status.stdout"
+ register: swarm_init
+ changed_when: "'Swarm initialized' in swarm_init.stdout"
+
+ - name: Retrieve Docker Swarm manager token
+ ansible.builtin.shell: docker swarm join-token manager -q
+ register: manager_token
+ changed_when: false
+
+- name: Join remaining managers to Docker Swarm
+ hosts: swarm:!swarm[0]
+ become: true
+ tasks:
+ - name: Check Docker Swarm status before attempting to join
+ ansible.builtin.shell: docker info --format '{{ "{{.Swarm.LocalNodeState}}" }}'
+ register: docker_swarm_status
+ changed_when: false
+
+ - name: Join Swarm as manager
+ ansible.builtin.shell:
+ cmd: docker swarm join --token {{ hostvars[groups['swarm'][0]]['manager_token'].stdout }} {{ hostvars[groups['swarm'][0]]['ansible_default_ipv4']['address'] }}:2377
+ when: hostvars[groups['swarm'][0]]['manager_token'].stdout is defined and docker_swarm_status.stdout != "active"
+ register: swarm_join
+ changed_when: "'This node joined a swarm as a manager' in swarm_join.stdout"
+
+ - name: Label Docker Swarm manager nodes
+ ansible.builtin.shell:
+ cmd: docker node update --label-add manager=true {{ ansible_hostname }}
+ when: swarm_join is changed
+ changed_when: false
+
diff --git a/playbooks/roles/common/files/authorized_keys b/playbooks/roles/common/files/authorized_keys
new file mode 100644
index 0000000..6d49a82
--- /dev/null
+++ b/playbooks/roles/common/files/authorized_keys
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPnLAE5TrdYF8QWCSkvgUp15XKcwQJ9393a/CghSo8dG serve@ansible
diff --git a/playbooks/roles/common/files/sshd_config b/playbooks/roles/common/files/sshd_config
new file mode 100644
index 0000000..239a0c0
--- /dev/null
+++ b/playbooks/roles/common/files/sshd_config
@@ -0,0 +1,21 @@
+Include /etc/ssh/sshd_config.d/*.conf
+
+Port 22
+PermitRootLogin no
+PubkeyAuthentication yes
+PasswordAuthentication no
+
+KbdInteractiveAuthentication no
+
+UsePAM yes
+
+AllowAgentForwarding yes
+X11Forwarding no
+PrintMotd no
+PrintLastLog yes
+TCPKeepAlive yes
+ClientAliveInterval 300
+ClientAliveCountMax 1
+
+AcceptEnv LANG LC_*
+Subsystem sftp /usr/lib/openssh/sftp-server
diff --git a/playbooks/roles/common/handlers/main.yml b/playbooks/roles/common/handlers/main.yml
new file mode 100644
index 0000000..015db8b
--- /dev/null
+++ b/playbooks/roles/common/handlers/main.yml
@@ -0,0 +1,23 @@
+---
+
+- name: Enable systemd-timesyncd
+ ansible.builtin.service:
+ name: systemd-timesyncd
+ state: restarted
+ enabled: true
+
+- name: Restart sshd
+ ansible.builtin.service:
+ name: sshd
+ state: restarted
+ enabled: true
+
+- name: Enable ufw
+ ansible.builtin.service:
+ name: ufw
+ enabled: true
+
+- name: Reload ufw
+ ansible.builtin.service:
+ name: ufw
+ state: restarted
diff --git a/playbooks/roles/common/tasks/main.yml b/playbooks/roles/common/tasks/main.yml
new file mode 100644
index 0000000..446db35
--- /dev/null
+++ b/playbooks/roles/common/tasks/main.yml
@@ -0,0 +1,66 @@
+---
+
+### Rly base stuff
+
+- name: Apt upgrade, update
+ ansible.builtin.apt:
+ update_cache: true
+ upgrade: "dist"
+
+- name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - gnupg-agent
+ - software-properties-common
+ - vim
+ - git
+ - rsync
+ state: latest
+ update_cache: true
+
+### Time
+
+- name: Timesyncd
+ ansible.builtin.apt:
+ name:
+ - systemd-timesyncd
+ notify:
+ - Enable systemd-timesyncd
+
+### SSH
+
+- name: Copy sshd_config
+ ansible.builtin.copy:
+ src: files/sshd_config
+ dest: /etc/ssh/sshd_config
+ owner: root
+ group: root
+ mode: u=rw,g=r,o=r
+ notify:
+ - Restart sshd
+
+- name: Copy authorized_keys
+ ansible.builtin.copy:
+ src: files/authorized_keys
+ dest: /home/{{ ansible_user }}/.ssh/authorized_keys
+
+### UFW
+
+- name: Install ufw
+ ansible.builtin.apt:
+ name: ufw
+ state: present
+
+- name: Allow ssh from rfc1918 networks
+ loop: "{{ rfc1918_cgnat_networks }}"
+ community.general.ufw:
+ rule: allow
+ name: "OpenSSH"
+ from: "{{ item }}"
+ state: "enabled"
+ notify:
+ - Enable ufw
+ - Reload ufw
diff --git a/playbooks/roles/docker/files/docker-compose@.service b/playbooks/roles/docker/files/docker-compose@.service
new file mode 100644
index 0000000..77e8892
--- /dev/null
+++ b/playbooks/roles/docker/files/docker-compose@.service
@@ -0,0 +1,19 @@
+[Unit]
+Description=%i service with docker compose
+Requires=docker.service
+After=docker.service
+
+[Service]
+RemainAfterExit=true
+WorkingDirectory=/etc/docker/compose/%i
+ExecStartPre=/bin/bash -c "/usr/bin/docker compose pull || true"
+ExecStart=/usr/bin/docker compose up
+ExecStop=/usr/bin/docker compose down
+Restart=always
+RestartSec=5
+StartLimitInterval=500
+StartLimitBurst=3
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/playbooks/roles/docker/files/docker-rollout b/playbooks/roles/docker/files/docker-rollout
new file mode 100755
index 0000000..c15d5a8
--- /dev/null
+++ b/playbooks/roles/docker/files/docker-rollout
@@ -0,0 +1,204 @@
+#!/bin/sh
+set -e
+
+# Defaults
+HEALTHCHECK_TIMEOUT=60
+NO_HEALTHCHECK_TIMEOUT=10
+WAIT_AFTER_HEALTHY_DELAY=0
+
+# Print metadata for Docker CLI plugin
+if [ "$1" = "docker-cli-plugin-metadata" ]; then
+ cat <<EOF
+{
+ "SchemaVersion": "0.1.0",
+ "Vendor": "Karol Musur",
+ "Version": "v0.9",
+ "ShortDescription": "Rollout new Compose service version"
+}
+EOF
+ exit
+fi
+
+# Save docker arguments, i.e. arguments before "rollout"
+while [ $# -gt 0 ]; do
+ if [ "$1" = "rollout" ]; then
+ shift
+ break
+ fi
+
+ DOCKER_ARGS="$DOCKER_ARGS $1"
+ shift
+done
+
+# Check if compose v2 is available
+if docker compose >/dev/null 2>&1; then
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ COMPOSE_COMMAND="docker $DOCKER_ARGS compose"
+elif docker-compose >/dev/null 2>&1; then
+ COMPOSE_COMMAND="docker-compose"
+else
+ echo "docker compose or docker-compose is required"
+ exit 1
+fi
+
+usage() {
+ cat <<EOF
+
+Usage: docker rollout [OPTIONS] SERVICE
+
+Rollout new Compose service version.
+
+Options:
+ -h, --help Print usage
+ -f, --file FILE Compose configuration files
+ -t, --timeout N Healthcheck timeout (default: $HEALTHCHECK_TIMEOUT seconds)
+ -w, --wait N When no healthcheck is defined, wait for N seconds
+ before stopping old container (default: $NO_HEALTHCHECK_TIMEOUT seconds)
+ --wait-after-healthy N When healthcheck is defined and succeeds, wait for additional N seconds
+ before stopping the old container (default: 0 seconds)
+ --env-file FILE Specify an alternate environment file
+
+EOF
+}
+
+exit_with_usage() {
+ usage
+ exit 1
+}
+
+healthcheck() {
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS inspect --format='{{json .State.Health.Status}}' "$1" | grep -v "unhealthy" | grep -q "healthy"
+}
+
+scale() {
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ $COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES up --detach --scale "$1=$2" --no-recreate "$1"
+}
+
+main() {
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ if [ -z "$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE")" ]; then
+ echo "==> Service '$SERVICE' is not running. Starting the service."
+ $COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES up --detach --no-recreate "$SERVICE"
+ exit 0
+ fi
+
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ OLD_CONTAINER_IDS_STRING=$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE" | tr '\n' '|' | sed 's/|$//')
+ OLD_CONTAINER_IDS=$(echo "$OLD_CONTAINER_IDS_STRING" | tr '|' ' ')
+ SCALE=$(echo "$OLD_CONTAINER_IDS" | wc -w | tr -d ' ')
+ SCALE_TIMES_TWO=$((SCALE * 2))
+ echo "==> Scaling '$SERVICE' to '$SCALE_TIMES_TWO' instances"
+ scale "$SERVICE" $SCALE_TIMES_TWO
+
+ # Create a variable that contains the IDs of the new containers, but not the old ones
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ NEW_CONTAINER_IDS=$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE" | grep -Ev "$OLD_CONTAINER_IDS_STRING" | tr '\n' ' ')
+
+ # Check if first container has healthcheck
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ if docker $DOCKER_ARGS inspect --format='{{json .State.Health}}' "$(echo $OLD_CONTAINER_IDS | cut -d\ -f 1)" | grep -q "Status"; then
+ echo "==> Waiting for new containers to be healthy (timeout: $HEALTHCHECK_TIMEOUT seconds)"
+ for _ in $(seq 1 "$HEALTHCHECK_TIMEOUT"); do
+ SUCCESS=0
+
+ for NEW_CONTAINER_ID in $NEW_CONTAINER_IDS; do
+ if healthcheck "$NEW_CONTAINER_ID"; then
+ SUCCESS=$((SUCCESS + 1))
+ fi
+ done
+
+ if [ "$SUCCESS" = "$SCALE" ]; then
+ break
+ fi
+
+ sleep 1
+ done
+
+ SUCCESS=0
+
+ for NEW_CONTAINER_ID in $NEW_CONTAINER_IDS; do
+ if healthcheck "$NEW_CONTAINER_ID"; then
+ SUCCESS=$((SUCCESS + 1))
+ fi
+ done
+
+ if [ "$SUCCESS" != "$SCALE" ]; then
+ echo "==> New containers are not healthy. Rolling back." >&2
+
+ docker $DOCKER_ARGS stop $NEW_CONTAINER_IDS
+ docker $DOCKER_ARGS rm $NEW_CONTAINER_IDS
+
+ exit 1
+ fi
+
+ if [ "$WAIT_AFTER_HEALTHY_DELAY" != "0" ]; then
+ echo "==> Waiting for healthy containers to settle down ($WAIT_AFTER_HEALTHY_DELAY seconds)"
+ sleep $WAIT_AFTER_HEALTHY_DELAY
+ fi
+ else
+ echo "==> Waiting for new containers to be ready ($NO_HEALTHCHECK_TIMEOUT seconds)"
+ sleep "$NO_HEALTHCHECK_TIMEOUT"
+ fi
+
+ echo "==> Stopping and removing old containers"
+
+ # shellcheck disable=SC2086 # DOCKER_ARGS and OLD_CONTAINER_IDS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS stop $OLD_CONTAINER_IDS
+ # shellcheck disable=SC2086 # DOCKER_ARGS and OLD_CONTAINER_IDS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS rm $OLD_CONTAINER_IDS
+}
+
+while [ $# -gt 0 ]; do
+ case "$1" in
+ -h | --help)
+ usage
+ exit 0
+ ;;
+ -f | --file)
+ COMPOSE_FILES="$COMPOSE_FILES -f $2"
+ shift 2
+ ;;
+ --env-file)
+ ENV_FILES="$ENV_FILES --env-file $2"
+ shift 2
+ ;;
+ -t | --timeout)
+ HEALTHCHECK_TIMEOUT="$2"
+ shift 2
+ ;;
+ -w | --wait)
+ NO_HEALTHCHECK_TIMEOUT="$2"
+ shift 2
+ ;;
+ --wait-after-healthy)
+ WAIT_AFTER_HEALTHY_DELAY="$2"
+ shift 2
+ ;;
+ -*)
+ echo "Unknown option: $1"
+ exit_with_usage
+ ;;
+ *)
+ if [ -n "$SERVICE" ]; then
+ echo "SERVICE is already set to '$SERVICE'"
+
+ if [ "$SERVICE" != "$1" ]; then
+ exit_with_usage
+ fi
+ fi
+
+ SERVICE="$1"
+ shift
+ ;;
+ esac
+done
+
+# Require SERVICE argument
+if [ -z "$SERVICE" ]; then
+ echo "SERVICE is missing"
+ exit_with_usage
+fi
+
+main
diff --git a/playbooks/roles/docker/handlers/main.yml b/playbooks/roles/docker/handlers/main.yml
new file mode 100644
index 0000000..2db0186
--- /dev/null
+++ b/playbooks/roles/docker/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Enable docker
+ ansible.builtin.service:
+ name: docker
+ state: restarted
+ enabled: true
+
diff --git a/playbooks/roles/docker/tasks/main.yml b/playbooks/roles/docker/tasks/main.yml
new file mode 100644
index 0000000..8b91f6a
--- /dev/null
+++ b/playbooks/roles/docker/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+
+- name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - gnupg-agent
+ - software-properties-common
+ state: present
+ update_cache: true
+
+- name: Docker GPG key
+ become: true
+ ansible.builtin.apt_key:
+ url: >
+ https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
+ state: present
+
+- name: Repository docker
+ ansible.builtin.apt_repository:
+ repo: >
+ deb https://download.docker.com/linux/{{ ansible_distribution | lower }}
+ {{ ansible_distribution_release }} stable
+ state: present
+
+- name: Install docker
+ ansible.builtin.apt:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ state: present
+ update_cache: true
+ notify:
+ - Enable docker
+
+- name: Copy docker rollout script
+ ansible.builtin.copy:
+ src: docker-rollout
+ dest: /usr/local/bin/docker-rollout
+ mode: 0755
+
+- name: Copy docker-compose@.service
+ ansible.builtin.copy:
+ src: docker-compose@.service
+ dest: /etc/systemd/system/docker-compose@.service
+
+- name: Ensure /etc/docker/compose exist
+ ansible.builtin.file:
+ path: /etc/docker/compose
+ state: directory
+ mode: 0700
+
diff --git a/playbooks/roles/nginx-proxy/handlers/main.yml b/playbooks/roles/nginx-proxy/handlers/main.yml
new file mode 100644
index 0000000..43302b5
--- /dev/null
+++ b/playbooks/roles/nginx-proxy/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+
+- name: (Re)start nginx-proxy
+ ansible.builtin.service:
+ name: docker-compose@nginx-proxy
+ state: restarted
+ enabled: true
diff --git a/playbooks/roles/nginx-proxy/tasks/main.yml b/playbooks/roles/nginx-proxy/tasks/main.yml
new file mode 100644
index 0000000..9c14072
--- /dev/null
+++ b/playbooks/roles/nginx-proxy/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+
+- name: Build nginx-proxy compose dirs
+ ansible.builtin.file:
+ state: directory
+ dest: '/etc/docker/compose/nginx-proxy/{{ item.path }}'
+ with_filetree: '../templates'
+ when: item.state == 'directory'
+
+- name: Build nginx-proxy compose files
+ ansible.builtin.template:
+ src: '{{ item.src }}'
+ dest: '/etc/docker/compose/nginx-proxy/{{ item.path }}'
+ with_filetree: '../templates'
+ when: item.state == 'file'
+ notify:
+ - (Re)start nginx-proxy
diff --git a/playbooks/roles/nginx-proxy/templates/docker-compose.yml b/playbooks/roles/nginx-proxy/templates/docker-compose.yml
new file mode 100644
index 0000000..fd49712
--- /dev/null
+++ b/playbooks/roles/nginx-proxy/templates/docker-compose.yml
@@ -0,0 +1,38 @@
+---
+
+services:
+ nginx-proxy:
+ image: nginxproxy/nginx-proxy
+ container_name: nginx-proxy
+ ports:
+ - "80:80"
+ - "443:443"
+ volumes:
+ - /var/run/docker.sock:/tmp/docker.sock:ro
+ - ./certs:/etc/nginx/certs
+ networks:
+ - proxy
+ labels:
+ - com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy
+
+ nginx-acme-companion:
+ image: nginxproxy/acme-companion
+ depends_on:
+ - nginx-proxy
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock:ro
+ - acme:/etc/acme.sh
+ - ./certs:/etc/nginx/certs
+ environment:
+ - "DEFAULT_EMAIL={{ certs_email }}"
+ - "ACME_CHALLENGE=DNS-01"
+ - "ACMESH_DNS_API_CONFIG={'DNS_API': 'dns_cf', 'CF_Key': '{{ cloudflare_token }}', 'CF_Email': '{{ cloudflare_email }}'}"
+ networks:
+ - proxy
+
+volumes:
+ acme:
+
+networks:
+ proxy:
+ name: proxy
diff --git a/playbooks/roles/outbound/tasks/main.yml b/playbooks/roles/outbound/tasks/main.yml
new file mode 100644
index 0000000..84070d6
--- /dev/null
+++ b/playbooks/roles/outbound/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+
+- name: Build headscale compose dirs
+ ansible.builtin.file:
+ state: directory
+ dest: '/etc/docker/compose/headscale/{{ item.path }}'
+ with_filetree: '../templates'
+ when: item.state == 'directory'
+
+- name: Build headscale compose files
+ ansible.builtin.template:
+ src: '{{ item.src }}'
+ dest: '/etc/docker/compose/headscale/{{ item.path }}'
+ with_filetree: '../templates'
+ when: item.state == 'file'
+
+- name: Daemon-reload and enable headscale
+ ansible.builtin.systemd_service:
+ state: started
+ enabled: true
+ daemon_reload: true
+ name: docker-compose@headscale
+
+- name: Perform rollout incase daemon already started
+ ansible.builtin.shell:
+ cmd: /usr/local/bin/docker-rollout rollout -f docker-compose.yml headscale
+ chdir: /etc/docker/compose/headscale
+
diff --git a/playbooks/roles/outbound/templates/config/config.yaml b/playbooks/roles/outbound/templates/config/config.yaml
new file mode 100644
index 0000000..6d3fdae
--- /dev/null
+++ b/playbooks/roles/outbound/templates/config/config.yaml
@@ -0,0 +1,387 @@
+---
+
+server_url: '{{ headscale_url }}'
+listen_addr: '{{ headscale_listen_addr }}'
+
+# Address to listen to /metrics, you may want
+# to keep this endpoint private to your internal
+# network
+#
+metrics_listen_addr: 127.0.0.1:9090
+
+# Address to listen for gRPC.
+# gRPC is used for controlling a headscale server
+# remotely with the CLI
+# Note: Remote access _only_ works if you have
+# valid certificates.
+#
+# For production:
+# grpc_listen_addr: 0.0.0.0:50443
+grpc_listen_addr: 127.0.0.1:50443
+
+# Allow the gRPC admin interface to run in INSECURE
+# mode. This is not recommended as the traffic will
+# be unencrypted. Only enable if you know what you
+# are doing.
+grpc_allow_insecure: false
+
+# The Noise section includes specific configuration for the
+# TS2021 Noise protocol
+noise:
+ # The Noise private key is used to encrypt the
+ # traffic between headscale and Tailscale clients when
+ # using the new Noise-based protocol.
+ private_key_path: /var/lib/headscale/noise_private.key
+
+# List of IP prefixes to allocate tailaddresses from.
+# Each prefix consists of either an IPv4 or IPv6 address,
+# and the associated prefix length, delimited by a slash.
+# It must be within IP ranges supported by the Tailscale
+# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
+# See below:
+# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
+# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
+# Any other range is NOT supported, and it will cause unexpected issues.
+prefixes:
+ v4: 100.64.0.0/10
+ v6: fd7a:115c:a1e0::/48
+
+ # Strategy used for allocation of IPs to nodes, available options:
+ # - sequential (default): assigns the next free IP from the previous given IP.
+ # - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
+ allocation: sequential
+
+# DERP is a relay system that Tailscale uses when a direct
+# connection cannot be established.
+# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
+#
+# headscale needs a list of DERP servers that can be presented
+# to the clients.
+derp:
+ server:
+ # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
+ # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
+ enabled: false
+
+ # Region ID to use for the embedded DERP server.
+ # The local DERP prevails if the region ID collides with other region ID coming from
+ # the regular DERP config.
+ region_id: 999
+
+ # Region code and name are displayed in the Tailscale UI to identify a DERP region
+ region_code: "headscale"
+ region_name: "Headscale Embedded DERP"
+
+ # Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
+ # When the embedded DERP server is enabled stun_listen_addr MUST be defined.
+ #
+ # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
+ stun_listen_addr: "0.0.0.0:3478"
+
+ # Private key used to encrypt the traffic between headscale DERP
+ # and Tailscale clients.
+ # The private key file will be autogenerated if it's missing.
+ #
+ private_key_path: /var/lib/headscale/derp_server_private.key
+
+ # This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
+ # it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
+ # If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
+ automatically_add_embedded_derp_region: true
+
+ # For better connection stability (especially when using an Exit-Node and DNS is not working),
+ # it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
+ ipv4: 1.2.3.4
+ ipv6: 2001:db8::1
+
+ # List of externally available DERP maps encoded in JSON
+ urls:
+ - https://controlplane.tailscale.com/derpmap/default
+
+ # Locally available DERP map files encoded in YAML
+ #
+ # This option is mostly interesting for people hosting
+ # their own DERP servers:
+ # https://tailscale.com/kb/1118/custom-derp-servers/
+ #
+ # paths:
+ # - /etc/headscale/derp-example.yaml
+ paths: []
+
+ # If enabled, a worker will be set up to periodically
+ # refresh the given sources and update the derpmap
+ # will be set up.
+ auto_update_enabled: true
+
+ # How often should we check for DERP updates?
+ update_frequency: 24h
+
+# Disables the automatic check for headscale updates on startup
+disable_check_updates: false
+
+# Time before an inactive ephemeral node is deleted?
+ephemeral_node_inactivity_timeout: 30m
+
+database:
+ # Database type. Available options: sqlite, postgres
+ # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
+ # All new development, testing and optimisations are done with SQLite in mind.
+ type: sqlite
+
+ # Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
+ debug: false
+
+ # GORM configuration settings.
+ gorm:
+ # Enable prepared statements.
+ prepare_stmt: true
+
+ # Enable parameterized queries.
+ parameterized_queries: true
+
+ # Skip logging "record not found" errors.
+ skip_err_record_not_found: true
+
+ # Threshold for slow queries in milliseconds.
+ slow_threshold: 1000
+
+ # SQLite config
+ sqlite:
+ path: /var/lib/headscale/db.sqlite
+
+ # Enable WAL mode for SQLite. This is recommended for production environments.
+ # https://www.sqlite.org/wal.html
+ write_ahead_log: true
+
+ # Maximum number of WAL file frames before the WAL file is automatically checkpointed.
+ # https://www.sqlite.org/c3ref/wal_autocheckpoint.html
+ # Set to 0 to disable automatic checkpointing.
+ wal_autocheckpoint: 1000
+
+ # # Postgres config
+ # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
+ # See database.type for more information.
+ # postgres:
+ # # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
+ # host: localhost
+ # port: 5432
+ # name: headscale
+ # user: foo
+ # pass: bar
+ # max_open_conns: 10
+ # max_idle_conns: 10
+ # conn_max_idle_time_secs: 3600
+
+ # # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
+ # # in the 'ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
+ # ssl: false
+
+### TLS configuration
+#
+## Let's encrypt / ACME
+#
+# headscale supports automatically requesting and setting up
+# TLS for a domain with Let's Encrypt.
+#
+# URL to ACME directory
+acme_url: https://acme-v02.api.letsencrypt.org/directory
+
+# Email to register with ACME provider
+acme_email: ""
+
+# Domain name to request a TLS certificate for:
+tls_letsencrypt_hostname: ""
+
+# Path to store certificates and metadata needed by
+# letsencrypt
+# For production:
+tls_letsencrypt_cache_dir: /var/lib/headscale/cache
+
+# Type of ACME challenge to use, currently supported types:
+# HTTP-01 or TLS-ALPN-01
+# See: docs/ref/tls.md for more information
+tls_letsencrypt_challenge_type: HTTP-01
+# When HTTP-01 challenge is chosen, letsencrypt must set up a
+# verification endpoint, and it will be listening on:
+# :http = port 80
+tls_letsencrypt_listen: ":http"
+
+## Use already defined certificates:
+tls_cert_path: ""
+tls_key_path: ""
+
+log:
+ # Output formatting for logs: text or json
+ format: text
+ level: info
+
+## Policy
+# headscale supports Tailscale's ACL policies.
+# Please have a look to their KB to better
+# understand the concepts: https://tailscale.com/kb/1018/acls/
+policy:
+ # The mode can be "file" or "database" that defines
+ # where the ACL policies are stored and read from.
+ mode: file
+ # If the mode is set to "file", the path to a
+ # HuJSON file containing ACL policies.
+ path: ""
+
+## DNS
+#
+# headscale supports Tailscale's DNS configuration and MagicDNS.
+# Please have a look to their KB to better understand the concepts:
+#
+# - https://tailscale.com/kb/1054/dns/
+# - https://tailscale.com/kb/1081/magicdns/
+# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
+#
+# Please note that for the DNS configuration to have any effect,
+# clients must have the `--accept-dns=true` option enabled. This is the
+# default for the Tailscale client. This option is enabled by default
+# in the Tailscale client.
+#
+# Setting _any_ of the configuration and `--accept-dns=true` on the
+# clients will integrate with the DNS manager on the client or
+# overwrite /etc/resolv.conf.
+# https://tailscale.com/kb/1235/resolv-conf
+#
+# If you want stop Headscale from managing the DNS configuration
+# all the fields under `dns` should be set to empty values.
+dns:
+ # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
+ magic_dns: true
+
+ # Defines the base domain to create the hostnames for MagicDNS.
+ # This domain _must_ be different from the server_url domain.
+ # `base_domain` must be a FQDN, without the trailing dot.
+ # The FQDN of the hosts will be
+ # `hostname.base_domain` (e.g., _myhost.example.com_).
+ base_domain: "{{ headscale_base_domain }}"
+
+ # List of DNS servers to expose to clients.
+ nameservers:
+ global:
+ - {{ headscale_dns_for_connected_clients_1 }}
+ - {{ headscale_dns_for_connected_clients_2 }}
+
+ # NextDNS (see https://tailscale.com/kb/1218/nextdns/).
+ # "abc123" is example NextDNS ID, replace with yours.
+ # - https://dns.nextdns.io/abc123
+
+ # Split DNS (see https://tailscale.com/kb/1054/dns/),
+ # a map of domains and which DNS server to use for each.
+ split:
+ {}
+ # foo.bar.com:
+ # - 1.1.1.1
+ # darp.headscale.net:
+ # - 1.1.1.1
+ # - 8.8.8.8
+
+ # Set custom DNS search domains. With MagicDNS enabled,
+ # your tailnet base_domain is always the first search domain.
+ search_domains: []
+
+ # Extra DNS records
+ # so far only A and AAAA records are supported (on the tailscale side)
+ # See: docs/ref/dns.md
+ extra_records: []
+ # - name: "grafana.myvpn.example.com"
+ # type: "A"
+ # value: "100.64.0.3"
+ #
+ # # you can also put it in one line
+ # - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
+ #
+ # Alternatively, extra DNS records can be loaded from a JSON file.
+ # Headscale processes this file on each change.
+ # extra_records_path: /var/lib/headscale/extra-records.json
+
+# Unix socket used for the CLI to connect without authentication
+# Note: for production you will want to set this to something like:
+unix_socket: /var/run/headscale/headscale.sock
+unix_socket_permission: "0770"
+#
+# headscale supports experimental OpenID connect support,
+# it is still being tested and might have some bugs, please
+# help us test it.
+# OpenID Connect
+# oidc:
+# only_start_if_oidc_is_available: true
+# issuer: "https://your-oidc.issuer.com/path"
+# client_id: "your-oidc-client-id"
+# client_secret: "your-oidc-client-secret"
+# # Alternatively, set `client_secret_path` to read the secret from the file.
+# # It resolves environment variables, making integration to systemd's
+# # `LoadCredential` straightforward:
+# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
+# # client_secret and client_secret_path are mutually exclusive.
+#
+# # The amount of time from a node is authenticated with OpenID until it
+# # expires and needs to reauthenticate.
+# # Setting the value to "0" will mean no expiry.
+# expiry: 180d
+#
+# # Use the expiry from the token received from OpenID when the user logged
+# # in, this will typically lead to frequent need to reauthenticate and should
+# # only been enabled if you know what you are doing.
+# # Note: enabling this will cause `oidc.expiry` to be ignored.
+# use_expiry_from_token: false
+#
+# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
+# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
+#
+# scope: ["openid", "profile", "email", "custom"]
+# extra_params:
+# domain_hint: example.com
+#
+# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
+# # authentication request will be rejected.
+#
+# allowed_domains:
+# - example.com
+# # Note: Groups from keycloak have a leading '/'
+# allowed_groups:
+# - /headscale
+# allowed_users:
+# - alice@example.com
+#
+# # Optional: PKCE (Proof Key for Code Exchange) configuration
+# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
+# # by preventing authorization code interception attacks
+# # See https://datatracker.ietf.org/doc/html/rfc7636
+# pkce:
+# # Enable or disable PKCE support (default: false)
+# enabled: false
+# # PKCE method to use:
+# # - plain: Use plain code verifier
+# # - S256: Use SHA256 hashed code verifier (default, recommended)
+# method: S256
+#
+# # Map legacy users from pre-0.24.0 versions of headscale to the new OIDC users
+# # by taking the username from the legacy user and matching it with the username
+# # provided by the OIDC. This is useful when migrating from legacy users to OIDC
+# # to force them using the unique identifier from the OIDC and to give them a
+# # proper display name and picture if available.
+# # Note that this will only work if the username from the legacy user is the same
+# # and there is a possibility for account takeover should a username have changed
+# # with the provider.
+# # When this feature is disabled, it will cause all new logins to be created as new users.
+# # Note this option will be removed in the future and should be set to false
+# # on all new installations, or when all users have logged in with OIDC once.
+# map_legacy_users: false
+
+# Logtail configuration
+# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
+# to instruct tailscale nodes to log their activity to a remote server.
+logtail:
+ # Enable logtail for this headscales clients.
+ # As there is currently no support for overriding the log server in headscale, this is
+ # disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
+ enabled: false
+
+# Enabling this option makes devices prefer a random port for WireGuard traffic over the
+# default static port 41641. This option is intended as a workaround for some buggy
+# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
+randomize_client_port: false
diff --git a/playbooks/roles/outbound/templates/data/.gitkeep b/playbooks/roles/outbound/templates/data/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/playbooks/roles/outbound/templates/data/.gitkeep
diff --git a/playbooks/roles/outbound/templates/docker-compose.yml b/playbooks/roles/outbound/templates/docker-compose.yml
new file mode 100644
index 0000000..c644ca4
--- /dev/null
+++ b/playbooks/roles/outbound/templates/docker-compose.yml
@@ -0,0 +1,39 @@
+---
+
+services:
+ headscale:
+ image: headscale/headscale:stable-debug # until something better comes along with wget or i make my own dockerfile...
+ pull_policy: always
+ restart: unless-stopped
+ command: serve
+ volumes:
+ - ./config:/etc/headscale
+ - ./data:/var/lib/headscale
+ networks:
+ - proxy
+ environment:
+ - VIRTUAL_HOST={{ headscale_host }}
+ - VIRTUAL_PORT={{ headscale_port }}
+ - LETSENCRYPT_HOST={{ headscale_host }}
+ healthcheck:
+ test: ["CMD", "wget", "-qO", "-", "http://localhost:8080/health"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
+
+ headscale-ui:
+ image: ghcr.io/gurucomputing/headscale-ui:latest
+ pull_policy: always
+ restart: unless-stopped
+ networks:
+ - proxy
+ environment:
+ - VIRTUAL_HOST={{ headscale_host }}
+ - VIRTUAL_PORT={{ headscale_port }}
+ - LETSENCRYPT_HOST={{ headscale_host }}
+ - VIRTUAL_PATH=/web/
+ - VIRTUAL_DEST=/
+
+networks:
+ proxy:
+ external: true