summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--.yamllint34
-rw-r--r--README.md85
-rw-r--r--TODO.md7
-rwxr-xr-xansible-vault-init.sh66
-rw-r--r--ansible.cfg3
-rw-r--r--deploy.yml25
-rw-r--r--docs/INFRA_PLAYBOOK.md9
-rw-r--r--docs/PEOPLE_PLAYBOOK.md19
-rw-r--r--group_vars/all.yml14
-rw-r--r--group_vars/certbot.yml6
-rw-r--r--group_vars/host_domains.yml9
-rw-r--r--group_vars/kanidm.yml4
-rw-r--r--group_vars/mail.yml41
-rw-r--r--group_vars/mmt.yml8
-rw-r--r--group_vars/nginx.yml3
-rw-r--r--group_vars/wireguard-mesh.yml4
-rw-r--r--inventory31
-rw-r--r--playbooks/deploy-certbot.yml6
-rw-r--r--playbooks/deploy-common.yml6
-rw-r--r--playbooks/deploy-docker.yml6
-rw-r--r--playbooks/deploy-kanidm.yml6
-rw-r--r--playbooks/deploy-mail.yml6
-rw-r--r--playbooks/deploy-mmt.yml6
-rw-r--r--playbooks/deploy-nginx.yml6
-rw-r--r--playbooks/deploy-wireguard-endpoint.yml6
-rw-r--r--playbooks/deploy-wireguard-mesh.yml6
-rwxr-xr-xplaybooks/roles/certbot/files/renewal_post_upgrade.sh3
-rw-r--r--playbooks/roles/certbot/tasks/main.yml64
-rw-r--r--playbooks/roles/certbot/templates/cloudflare-credentials.ini.j21
-rw-r--r--playbooks/roles/common/files/authorized_keys2
-rw-r--r--playbooks/roles/common/files/jail.conf979
-rw-r--r--playbooks/roles/common/files/sshd_config21
-rw-r--r--playbooks/roles/common/handlers/main.yml25
-rw-r--r--playbooks/roles/common/tasks/main.yml76
-rw-r--r--playbooks/roles/common/tasks/systemd-resolved.yml64
-rw-r--r--playbooks/roles/docker/files/docker-compose@.service18
-rwxr-xr-xplaybooks/roles/docker/files/docker-rollout212
-rw-r--r--playbooks/roles/docker/handlers/main.yml7
-rw-r--r--playbooks/roles/docker/tasks/main.yml60
-rw-r--r--playbooks/roles/kanidm/tasks/main.yml47
-rw-r--r--playbooks/roles/kanidm/templates/docker-compose.yml.j213
-rw-r--r--playbooks/roles/kanidm/templates/server.toml.j210
-rw-r--r--playbooks/roles/mail/tasks/main.yml80
-rw-r--r--playbooks/roles/mail/templates/docker-compose.yml.j271
-rw-r--r--playbooks/roles/mail/templates/dovecot-ldap.conf.j210
-rw-r--r--playbooks/roles/mail/templates/oauth2.inc.php.j219
-rw-r--r--playbooks/roles/mail/templates/sieve.inc.php.j24
-rw-r--r--playbooks/roles/mail/templates/user-patches.sh.j220
-rw-r--r--playbooks/roles/mmt/tasks/main.yml23
-rw-r--r--playbooks/roles/mmt/templates/docker-compose.yml.j221
-rw-r--r--playbooks/roles/nginx/files/nginx.conf26
-rw-r--r--playbooks/roles/nginx/handlers/main.yml12
-rw-r--r--playbooks/roles/nginx/tasks/main.yml44
-rw-r--r--playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.auth.mistymountainstherapy.com.conf8
-rw-r--r--playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.mail.mistymountainstherapy.com.conf8
-rw-r--r--playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.auth.mistymountainstherapy.com.conf23
-rw-r--r--playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.mail.mistymountainstherapy.com.conf21
-rw-r--r--playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.mistymountainstherapy.com.conf8
-rw-r--r--playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.www.mistymountainstherapy.com.conf8
-rw-r--r--playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.mistymountainstherapy.com.conf21
-rw-r--r--playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.www.mistymountainstherapy.com.conf19
-rw-r--r--playbooks/roles/wireguard-endpoint/files/.gitignore1
-rw-r--r--playbooks/roles/wireguard-endpoint/tasks/main.yml40
-rw-r--r--playbooks/roles/wireguard-mesh/tasks/main.yml80
-rw-r--r--playbooks/roles/wireguard-mesh/templates/mmtmesh.conf.j217
-rw-r--r--requirements.yml4
-rw-r--r--secrets.txt12
68 files changed, 2628 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..8d86597
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+*.enc
+*.swp
+temp_secrets.yml
+*.pwd
diff --git a/.yamllint b/.yamllint
new file mode 100644
index 0000000..8b6f6ec
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,34 @@
+---
+
+yaml-files:
+ - '*.yaml'
+ - '*.yml'
+ - '.yamllint'
+
+rules:
+ braces: enable
+ brackets: enable
+ colons: enable
+ commas: enable
+ comments:
+ level: warning
+ comments-indentation:
+ level: warning
+ document-end: disable
+ document-start:
+ level: warning
+ empty-lines: enable
+ empty-values: disable
+ float-values: disable
+ hyphens: enable
+ indentation: enable
+ key-duplicates: enable
+ key-ordering: disable
+ line-length: disable
+ new-line-at-end-of-file: enable
+ new-lines: enable
+ octal-values: disable
+ quoted-strings: disable
+ trailing-spaces: enable
+ truthy:
+ level: warning
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d210c13
--- /dev/null
+++ b/README.md
@@ -0,0 +1,85 @@
+# hatecomputers.club infra
+
+A collection of playbooks to deploy the hatecomputers.club infra
+
+## Prerequisites
+
+- `ansible`
+- `yamllint`
+- `ansible-lint`
+- an ssh key accepted on the root of each host in the `inventory`
+
+## Setup
+
+### Vault
+
+Secrets are managed via `ansible-vault`. Initialize or update your vault
+with new secrets via our custom `./ansible-vault-init.sh` script.
+
+Additionally if you want to only update a single secret, use
+`./ansible-vault-init.sh <secret_name>`.
+
+If you don't want to be prompted to enter your password every time you
+deploy something, put your password as plain text into `secrets.pwd` as
+a single line in the root src directory:
+
+```bash
+echo "<your_password>" > secrets.pwd
+```
+
+Then you can add `--vault-password-file secrets.pwd` each time you run a
+deployment (or you know, use `pass` or something if you're paranoid).
+
+### Pre-commit hooks
+
+1. clone the repo
+
+ ```bash
+ git clone git@git.hatecomputers.club:hatecomputers.club/infra
+ cd infra
+ ```
+
+2. add a pre-commit hook
+
+ ```bash
+ cd .git/hooks
+ touch pre-commit
+ ```
+
+3. insert into `pre-commit` the following contents:
+
+ ```bash
+ #!/bin/sh
+
+ set -e
+
+ # lint yaml files
+ echo "running yamllint..."
+ yamllint --strict .
+
+ # follow ansible best-practices
+ echo "running ansible-lint"
+ ansible-lint
+ ```
+
+4. make it executable
+ ```bash
+ chmod +x pre-commit
+ ```
+
+## Running
+
+`ansible-playbook -e @secrets.enc deploy.yml` will run each respectively added playbook in `deploy.yml`
+using the vault intialized in the previous steps.
+
+Though in development, one should be testing individual playbooks, and `deploy.yml`
+should be left for an idea of general order of things, or for a
+full deployment after testing.
+
+NOTE: It is highly advised to run `ansible-playbook` in an `ssh-agent` session to avoid retyping your password over and over. Something along the lines of:
+
+```bash
+ssh-agent $(echo $SHELL)
+ssh-add ~/.ssh/<private-key>
+```
+
diff --git a/TODO.md b/TODO.md
new file mode 100644
index 0000000..127fcfa
--- /dev/null
+++ b/TODO.md
@@ -0,0 +1,7 @@
+- [ ] BACKUPS BACKUPS BACKUPS
+
+- [ ] nameservers for users
+- [ ] read email for service accounts dmarc.report, postmaster email users, give access to infra users
+- [ ] allow infra users to ssh into any machine in infra, regular users into their tilde account on himmel
+- [x] allow ufw and setup wireguard on himmel
+- [x] internal vpn for infra, figure out routing
diff --git a/ansible-vault-init.sh b/ansible-vault-init.sh
new file mode 100755
index 0000000..8219ec4
--- /dev/null
+++ b/ansible-vault-init.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+# usage: ./ansible-vault-init.sh <? secret-name-to-update>
+
+# password input
+while true; do
+ read -s -p "Password: " VAULT_PASSWORD
+ echo
+ read -s -p "Confirm password: " confirmationpwd
+ echo
+ [ "$VAULT_PASSWORD" = "$confirmationpwd" ] && break
+ echo "Please try again"
+done
+
+###
+
+SECRETS_KEYS_FILE="secrets.txt"
+# temporary secret store
+TEMP_FILE="temp_secrets.yml"
+VAULT_FILE="secrets.enc"
+
+if [ "$#" -eq 1 ]; then
+ SINGLE_SECRET_MODE=true
+ SECRET_TO_UPDATE=$1
+else
+ SINGLE_SECRET_MODE=false
+fi
+
+
+if [ -f "$VAULT_FILE" ]; then
+ ansible-vault decrypt "$VAULT_FILE" --output="$TEMP_FILE" --vault-password-file <(echo $VAULT_PASSWORD)
+else
+ # create the temporary file
+ > "$TEMP_FILE"
+fi
+
+IFS=$'\n' read -d '' -r -a secrets < "$SECRETS_KEYS_FILE"
+echo "Gathering secrets..."
+for secret_name in "${secrets[@]}"; do
+ if [ "$SINGLE_SECRET_MODE" = true ] && [ "$secret_name" != "$SECRET_TO_UPDATE" ]; then
+ continue
+ fi
+
+ if grep -q "^$secret_name:" "$TEMP_FILE"; then
+ if [ "$SINGLE_SECRET_MODE" = true ]; then
+ # Remove the old value of the secret
+ sed -i "/^$secret_name:/d" "$TEMP_FILE"
+ else
+ echo "Secret $secret_name already exists, skipping."
+ continue
+ fi
+ fi
+
+ echo -n "Enter value for $secret_name: "
+ read secret_value
+ echo "$secret_name: $secret_value" >> "$TEMP_FILE"
+done
+
+echo "Re-encrypting secrets..."
+
+ansible-vault encrypt "$TEMP_FILE" --output="$VAULT_FILE" --vault-password-file <(echo $VAULT_PASSWORD)
+
+# remove the temp secrets file securely
+shred -u "$TEMP_FILE"
+
+echo "Secrets have been encrypted into secrets.enc"
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 0000000..dcb0621
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,3 @@
+[defaults]
+inventory = inventory
+host_key_checking = False
diff --git a/deploy.yml b/deploy.yml
new file mode 100644
index 0000000..65ea464
--- /dev/null
+++ b/deploy.yml
@@ -0,0 +1,25 @@
+---
+
+- name: Wireguard Endpoint
+ ansible.builtin.import_playbook: playbooks/deploy-wireguard-endpoint.yml
+
+- name: Common configurations
+ ansible.builtin.import_playbook: playbooks/deploy-common.yml
+
+- name: Docker setup
+ ansible.builtin.import_playbook: playbooks/deploy-docker.yml
+
+- name: Certbot certificate cloudflare setup
+ ansible.builtin.import_playbook: playbooks/deploy-certbot.yml
+
+- name: Kanidm
+ ansible.builtin.import_playbook: playbooks/deploy-kanidm.yml
+
+- name: Mail
+ ansible.builtin.import_playbook: playbooks/deploy-mail.yml
+
+- name: Wireguard Mesh
+ ansible.builtin.import_playbook: playbooks/deploy-wireguard-mesh.yml
+
+- name: Website for mmt
+ ansible.builtin.import_playbook: playbooks/deploy-mmt.yml
diff --git a/docs/INFRA_PLAYBOOK.md b/docs/INFRA_PLAYBOOK.md
new file mode 100644
index 0000000..043d4dc
--- /dev/null
+++ b/docs/INFRA_PLAYBOOK.md
@@ -0,0 +1,9 @@
+Registering a new internal machine <hostname>:
+
+1. Register <hostname>.pub.infra.hatecomputers.club A record -> public ipv4
+2. Register <hostname>.int.infra.hatecomputers.club A record -> internal ipv4 in 10.155.0.0/16 subnet
+3. Put it on the internal VPN. i.e. add <hostname>.pub... in the wireguard-mesh after allowing ssh to root and everything
+4. Run the wireguard-mesh playbook
+5. Update the inventory record in wireguard-mesh to <hostname>.int...
+6. Now run the deploy-common playbook to allow ssh only internally, debugging as necessary if needed ; it should just work :))
+7. Add your new roles!
diff --git a/docs/PEOPLE_PLAYBOOK.md b/docs/PEOPLE_PLAYBOOK.md
new file mode 100644
index 0000000..2eb468b
--- /dev/null
+++ b/docs/PEOPLE_PLAYBOOK.md
@@ -0,0 +1,19 @@
+obviously, don't let people have usernames that would conflict with anything internal (i.e. "email", "infra*", etc.) and are only alphanumeric
+
+```sh
+kanidm login --name idm_admin
+kanidm person create --name idm_admin <username> "<display name>"
+kanidm person credential create-reset-token <username> --name idm_admin
+
+# allow them to set a unix/ldap password
+kanidm person posix set --name idm_admin <username>
+kanidm person posix set --name idm_admin <username> --shell /bin/zsh
+
+# give them email access (need unix access)
+kanidm person update <username> --legalname "<display name>" --mail <username>@hatecomputers.club
+kanidm group add-members mail <username>
+```
+
+groups you'll probably want to add people:
++ gitea-access
++ mail
diff --git a/group_vars/all.yml b/group_vars/all.yml
new file mode 100644
index 0000000..8e21681
--- /dev/null
+++ b/group_vars/all.yml
@@ -0,0 +1,14 @@
+---
+
+dns_servers:
+ - 1.1.1.1
+ - 1.0.0.1
+dns_domains:
+ - ["mistymountainstherapy.com"]
+dns_dnssec: true
+dns_stub_listener: false
+
+rfc1918_networks:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
diff --git a/group_vars/certbot.yml b/group_vars/certbot.yml
new file mode 100644
index 0000000..23a10e6
--- /dev/null
+++ b/group_vars/certbot.yml
@@ -0,0 +1,6 @@
+---
+
+cloudflare_credentials_destination: /root/.cloudflare-dns-api-key.ini
+certbot_post_hook_dir: /etc/letsencrypt/renewal-hooks/post
+certbot_live_dir: /etc/letsencrypt/live
+certbot_email: infra@mistymountainstherapy.com
diff --git a/group_vars/host_domains.yml b/group_vars/host_domains.yml
new file mode 100644
index 0000000..be02f26
--- /dev/null
+++ b/group_vars/host_domains.yml
@@ -0,0 +1,9 @@
+---
+
+host_domains:
+ www.int.mistymountainstherapy.com:
+ - www.mistymountainstherapy.com
+ - mistymountainstherapy.com
+ mail.int.mistymountainstherapy.com:
+ - mail.mistymountainstherapy.com
+ - auth.mistymountainstherapy.com
diff --git a/group_vars/kanidm.yml b/group_vars/kanidm.yml
new file mode 100644
index 0000000..6d755d4
--- /dev/null
+++ b/group_vars/kanidm.yml
@@ -0,0 +1,4 @@
+---
+
+kanidm_domain: auth.mistymountainstherapy.com
+kanidm_bind_address: "{{ lookup('community.general.dig', inventory_hostname) }}"
diff --git a/group_vars/mail.yml b/group_vars/mail.yml
new file mode 100644
index 0000000..8e7591b
--- /dev/null
+++ b/group_vars/mail.yml
@@ -0,0 +1,41 @@
+---
+
+postmaster_email: postmaster@mistymountainstherapy.com
+
+domain: mistymountainstherapy.com
+mail_domain: mail.mistymountainstherapy.com
+
+ldap_server: "auth.mistymountainstherapy.com"
+ldap_server_host: "ldaps://{{ ldap_server }}:3636"
+ldap_intranet: >
+ {{ lookup('community.general.dig',
+ 'mail.int.mistymountainstherapy.com') }}
+ldap_search_base: "dc=auth,dc=mistymountainstherapy,dc=com"
+ldap_bind_dn: "dn=token"
+
+ldap_query_filter_user: "(&(objectClass=posixAccount)(mail=%s))"
+ldap_query_filter_group: "(&(objectClass=posixAccount)(|(mail=%s)(uid=%s)))"
+ldap_query_filter_alias: "(&(objectClass=posixAccount)(emailalternative=%s))"
+ldap_query_filter_domain: "(&(objectClass=posixAccount)(|(mail=%s)(uid=%s)))"
+ldap_query_filter_senders: "(&(objectClass=posixAccount)(|(mail=%s)(uid=%s)))"
+
+sasl_ldap_filter: >
+ (&(|(uid=%U)(mail=%U))(class=posixAccount)
+ (memberOf=cn=mail,dc=auth,dc=mistymountainstherapy,dc=com))
+
+dovecot_user_filter: >
+ (&(class=posixAccount)(uid=%u)
+ (memberOf=cn=mail,dc=auth,dc=mistymountainstherapy,dc=com))
+dovecot_auth_bind_userdn: "uid=%u,dc=auth,dc=mistymountainstherapy,dc=com"
+dovecot_user_attrs: "=mail=maildir:~/Maildir,uidNumber=uid,gidNumber=gid"
+
+roundcube_default_host: "ssl://mail.mistymountainstherapy.com"
+roundcube_default_port: 993
+roundcube_smtp_host: "ssl://mail.mistymountainstherapy.com"
+roundcube_smtp_port: 465
+roundcube_plugins: "archive,zipdownload,managesieve,markasjunk"
+
+roundcube_oauth2_auth_uri: "https://auth.mistymountainstherapy.com/ui/oauth2"
+roundcube_oauth2_user_uri: >
+ https://auth.mistymountainstherapy.com/oauth2/openid/roundcube/userinfo
+roundcube_oauth2_token_uri: "https://auth.mistymountainstherapy.com/oauth2/token"
diff --git a/group_vars/mmt.yml b/group_vars/mmt.yml
new file mode 100644
index 0000000..47e3829
--- /dev/null
+++ b/group_vars/mmt.yml
@@ -0,0 +1,8 @@
+---
+
+from_email: "{{ mmt_from_email }}"
+hcaptcha_secret: "{{ mmt_hcaptcha_secret }}"
+smtp_server: "{{ mmt_smtp_server }}"
+smtp_password: "{{ mmt_smtp_password }}"
+smtp_username: "{{ mmt_smtp_username }}"
+form_to_email: "{{ mmt_form_to_email }}"
diff --git a/group_vars/nginx.yml b/group_vars/nginx.yml
new file mode 100644
index 0000000..26f919f
--- /dev/null
+++ b/group_vars/nginx.yml
@@ -0,0 +1,3 @@
+---
+
+dh_params_src: https://ssl-config.mozilla.org/ffdhe2048.txt
diff --git a/group_vars/wireguard-mesh.yml b/group_vars/wireguard-mesh.yml
new file mode 100644
index 0000000..e5a7985
--- /dev/null
+++ b/group_vars/wireguard-mesh.yml
@@ -0,0 +1,4 @@
+---
+
+wireguard_listen_port: 51830
+wireguard_subnet: 10.212.0.0/16
diff --git a/inventory b/inventory
new file mode 100644
index 0000000..9c76e0f
--- /dev/null
+++ b/inventory
@@ -0,0 +1,31 @@
+[docker]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[host_domains]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[nginx]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[certbot]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[kanidm]
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[mail]
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[wireguard-mesh]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+mail.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[wireguard-endpoint]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
+
+[mmt]
+www.int.mistymountainstherapy.com ansible_user=root ansible_connection=ssh
diff --git a/playbooks/deploy-certbot.yml b/playbooks/deploy-certbot.yml
new file mode 100644
index 0000000..1fa6cb1
--- /dev/null
+++ b/playbooks/deploy-certbot.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Certbot setup
+ hosts: certbot
+ roles:
+ - certbot
diff --git a/playbooks/deploy-common.yml b/playbooks/deploy-common.yml
new file mode 100644
index 0000000..a605873
--- /dev/null
+++ b/playbooks/deploy-common.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Common host setup
+ hosts: all
+ roles:
+ - common
diff --git a/playbooks/deploy-docker.yml b/playbooks/deploy-docker.yml
new file mode 100644
index 0000000..db55b4a
--- /dev/null
+++ b/playbooks/deploy-docker.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Docker setup
+ hosts: docker
+ roles:
+ - docker
diff --git a/playbooks/deploy-kanidm.yml b/playbooks/deploy-kanidm.yml
new file mode 100644
index 0000000..6476e57
--- /dev/null
+++ b/playbooks/deploy-kanidm.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Kanidm setup
+ hosts: kanidm
+ roles:
+ - kanidm
diff --git a/playbooks/deploy-mail.yml b/playbooks/deploy-mail.yml
new file mode 100644
index 0000000..dc9a7b2
--- /dev/null
+++ b/playbooks/deploy-mail.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Mail setup
+ hosts: mail
+ roles:
+ - mail
diff --git a/playbooks/deploy-mmt.yml b/playbooks/deploy-mmt.yml
new file mode 100644
index 0000000..a511935
--- /dev/null
+++ b/playbooks/deploy-mmt.yml
@@ -0,0 +1,6 @@
+---
+
+- name: MMT setup
+ hosts: mmt
+ roles:
+ - mmt
diff --git a/playbooks/deploy-nginx.yml b/playbooks/deploy-nginx.yml
new file mode 100644
index 0000000..95f747d
--- /dev/null
+++ b/playbooks/deploy-nginx.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Nginx setup
+ hosts: nginx
+ roles:
+ - nginx
diff --git a/playbooks/deploy-wireguard-endpoint.yml b/playbooks/deploy-wireguard-endpoint.yml
new file mode 100644
index 0000000..4f87e97
--- /dev/null
+++ b/playbooks/deploy-wireguard-endpoint.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Wireguard-endpoint setup
+ hosts: wireguard-endpoint
+ roles:
+ - wireguard-endpoint
diff --git a/playbooks/deploy-wireguard-mesh.yml b/playbooks/deploy-wireguard-mesh.yml
new file mode 100644
index 0000000..6e7cd85
--- /dev/null
+++ b/playbooks/deploy-wireguard-mesh.yml
@@ -0,0 +1,6 @@
+---
+
+- name: Wireguard-mesh setup
+ hosts: wireguard-mesh
+ roles:
+ - wireguard-mesh
diff --git a/playbooks/roles/certbot/files/renewal_post_upgrade.sh b/playbooks/roles/certbot/files/renewal_post_upgrade.sh
new file mode 100755
index 0000000..ab9c5b1
--- /dev/null
+++ b/playbooks/roles/certbot/files/renewal_post_upgrade.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+/usr/sbin/service nginx restart
diff --git a/playbooks/roles/certbot/tasks/main.yml b/playbooks/roles/certbot/tasks/main.yml
new file mode 100644
index 0000000..717eac0
--- /dev/null
+++ b/playbooks/roles/certbot/tasks/main.yml
@@ -0,0 +1,64 @@
+---
+
+- name: Install certbot deps
+ ansible.builtin.apt:
+ name:
+ - python3-certbot
+ - python3-certbot-dns-cloudflare
+ state: present
+
+- name: Install
+ ansible.builtin.template:
+ src: cloudflare-credentials.ini.j2
+ dest: "{{ cloudflare_credentials_destination }}"
+ mode: 0700
+
+- name: Ensure existance of {{ certbot_post_hook_dir }}
+ ansible.builtin.file:
+ path: "{{ certbot_post_hook_dir }}"
+ state: directory
+ mode: o=rw,g=r,a+x
+
+- name: Add renewal_post_upgrade hook
+ ansible.builtin.copy:
+ src: renewal_post_upgrade.sh
+ dest: "{{ certbot_post_hook_dir }}/renewal_post_upgrade.sh"
+ mode: a+x
+ owner: root
+ group: root
+
+- name: Check for existence of certificates
+ ansible.builtin.stat:
+ path: "{{ certbot_live_dir }}/{{ item }}/fullchain.pem"
+ loop: "{{ host_domains[inventory_hostname] }}"
+ register: cert_check
+- name: Construct domains needing ACME requests list
+ ansible.builtin.set_fact:
+ domain_request_list: >
+ {% for domain in host_domains[inventory_hostname] %}
+ {% set domain_index = loop.index0 %}
+ {% if not cert_check.results[domain_index].stat.exists %}
+ {{ domain }}
+ {% endif %}
+ {% endfor %}
+
+- name: Request acmedns challenges if there are such domains that need certs
+ ansible.builtin.shell: >
+ certbot certonly --dns-cloudflare \
+ --dns-cloudflare-credentials {{ cloudflare_credentials_destination }} \
+ --non-interactive \
+ --manual-public-ip-logging-ok \
+ --agree-tos -m {{ certbot_email }} \
+ --preferred-challenges dns --debug-challenges \
+ --dns-cloudflare-propagation-seconds 40 \
+ -d {{ item }}
+ loop: "{{ domain_request_list.split() }}"
+ changed_when: domain_request_list | trim != ''
+
+- name: Certbot daily renewal cron job
+ ansible.builtin.cron:
+ name: "letsencrypt_daily_renewal"
+ special_time: "daily"
+ job: "certbot renew --non-interactive"
+ cron_file: "certbot_renewal"
+ user: root
diff --git a/playbooks/roles/certbot/templates/cloudflare-credentials.ini.j2 b/playbooks/roles/certbot/templates/cloudflare-credentials.ini.j2
new file mode 100644
index 0000000..4e7d9ac
--- /dev/null
+++ b/playbooks/roles/certbot/templates/cloudflare-credentials.ini.j2
@@ -0,0 +1 @@
+dns_cloudflare_api_token = {{ cloudflare_api_token }}
diff --git a/playbooks/roles/common/files/authorized_keys b/playbooks/roles/common/files/authorized_keys
new file mode 100644
index 0000000..5cc07b6
--- /dev/null
+++ b/playbooks/roles/common/files/authorized_keys
@@ -0,0 +1,2 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDRHu3h9mDjQyFbojcxGKW0hPUDfgUmb2WCzd4Dv2qISM3GGt9LjD8o0IbWRNaTf5UyId5lu7wNHtygs5ZDfUVnlfxrI1CmoExuqkYFjy+R9Cu0x1J2w7+MrKPBd5akLCuKTTnXbyv79T0tLb07rCpGHojW8HH6wdDtg0siVqsPqZVTjg7WGbBYqiqlA5p8s+V9xN1q8lTOZrRI0PdgoU8W+1oIr9OHSG1ZeUBQx60izTEwMnWBxY2aA8SQolIVvsJCcMMc/EAnaz/rdJ5IkeqXGslIhUI7WCPHnPWN8CSdwMOLi5BNaOAK7Y2FkfKTUlO7I52BL87Cl3YpMxR0mTDrfSJTSp0B3ZAbUIXDA7biSh04YLwGQVI799vcyJf355A60btPaiuiBgI0am3h0WxnOACg7K6eV023EiUQ24UjlQ8pufHcJ1oDW8v6LHlp/atCWOl9KQIun9UUg8DD8/BLPprc0wzAV6Nco0ZIedouxZuUhduYYvUrLJ+ICpaZg6oPGitVJPIgyyI+WTfjRN4WTj/Z3Yhuj0RqF8b5ea4FNWuJtfF724t7SVnZsYlZGSCqL8gaEzbIATVe3THn5VwbK+S4ELD/9W6MOd6aZcTOK2yP3jlwjcjnW8sLuX+2qNwtSVVa4o5VsRZU40Da+3flzoBsyUwSE3H2PsFPH29lIQ==
+lizzy@yubikey
diff --git a/playbooks/roles/common/files/jail.conf b/playbooks/roles/common/files/jail.conf
new file mode 100644
index 0000000..2c37a26
--- /dev/null
+++ b/playbooks/roles/common/files/jail.conf
@@ -0,0 +1,979 @@
+#
+# WARNING: heavily refactored in 0.9.0 release. Please review and
+# customize settings for your setup.
+#
+# Changes: in most of the cases you should not modify this
+# file, but provide customizations in jail.local file,
+# or separate .conf files under jail.d/ directory, e.g.:
+#
+# HOW TO ACTIVATE JAILS:
+#
+# YOU SHOULD NOT MODIFY THIS FILE.
+#
+# It will probably be overwritten or improved in a distribution update.
+#
+# Provide customizations in a jail.local file or a jail.d/customisation.local.
+# For example to change the default bantime for all jails and to enable the
+# ssh-iptables jail the following (uncommented) would appear in the .local file.
+# See man 5 jail.conf for details.
+#
+# [DEFAULT]
+# bantime = 1h
+#
+#
+# See jail.conf(5) man page for more information
+
+
+
+# Comments: use '#' for comment lines and ';' (following a space) for inline comments
+
+
+[INCLUDES]
+
+#before = paths-distro.conf
+before = paths-debian.conf
+
+# The DEFAULT allows a global definition of the options. They can be overridden
+# in each jail afterwards.
+
+[DEFAULT]
+
+#
+# MISCELLANEOUS OPTIONS
+#
+
+# "bantime.increment" allows to use database for searching of previously banned ip's to increase a
+# default ban time using special formula, default it is banTime * 1, 2, 4, 8, 16, 32...
+#bantime.increment = true
+
+# "bantime.rndtime" is the max number of seconds using for mixing with random time
+# to prevent "clever" botnets calculate exact time IP can be unbanned again:
+#bantime.rndtime =
+
+# "bantime.maxtime" is the max number of seconds using the ban time can reach (doesn't grow further)
+#bantime.maxtime =
+
+# "bantime.factor" is a coefficient to calculate exponent growing of the formula or common multiplier,
+# default value of factor is 1 and with default value of formula, the ban time
+# grows by 1, 2, 4, 8, 16 ...
+#bantime.factor = 1
+
+# "bantime.formula" used by default to calculate next value of ban time, default value below,
+# the same ban time growing will be reached by multipliers 1, 2, 4, 8, 16, 32...
+#bantime.formula = ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor
+#
+# more aggressive example of formula has the same values only for factor "2.0 / 2.885385" :
+#bantime.formula = ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)
+
+# "bantime.multipliers" used to calculate next value of ban time instead of formula, corresponding
+# previously ban count and given "bantime.factor" (for multipliers default is 1);
+# following example grows ban time by 1, 2, 4, 8, 16 ... and if last ban count greater as multipliers count,
+# always used last multiplier (64 in example), for factor '1' and original ban time 600 - 10.6 hours
+#bantime.multipliers = 1 2 4 8 16 32 64
+# following example can be used for small initial ban time (bantime=60) - it grows more aggressive at begin,
+# for bantime=60 the multipliers are minutes and equal: 1 min, 5 min, 30 min, 1 hour, 5 hour, 12 hour, 1 day, 2 day
+#bantime.multipliers = 1 5 30 60 300 720 1440 2880
+
+# "bantime.overalljails" (if true) specifies the search of IP in the database will be executed
+# cross over all jails, if false (default), only current jail of the ban IP will be searched
+#bantime.overalljails = false
+
+# --------------------
+
+# "ignoreself" specifies whether the local resp. own IP addresses should be ignored
+# (default is true). Fail2ban will not ban a host which matches such addresses.
+#ignoreself = true
+
+# "ignoreip" can be a list of IP addresses, CIDR masks or DNS hosts. Fail2ban
+# will not ban a host which matches an address in this list. Several addresses
+# can be defined using space (and/or comma) separator.
+#ignoreip = 127.0.0.1/8 ::1
+
+# External command that will take an tagged arguments to ignore, e.g. <ip>,
+# and return true if the IP is to be ignored. False otherwise.
+#
+# ignorecommand = /path/to/command <ip>
+ignorecommand =
+
+# "bantime" is the number of seconds that a host is banned.
+bantime = 10m
+
+# A host is banned if it has generated "maxretry" during the last "findtime"
+# seconds.
+findtime = 10m
+
+# "maxretry" is the number of failures before a host get banned.
+maxretry = 5
+
+# "maxmatches" is the number of matches stored in ticket (resolvable via tag <matches> in actions).
+maxmatches = %(maxretry)s
+
+# "backend" specifies the backend used to get files modification.
+# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
+# This option can be overridden in each jail as well.
+#
+# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
+# If pyinotify is not installed, Fail2ban will use auto.
+# gamin: requires Gamin (a file alteration monitor) to be installed.
+# If Gamin is not installed, Fail2ban will use auto.
+# polling: uses a polling algorithm which does not require external libraries.
+# systemd: uses systemd python library to access the systemd journal.
+# Specifying "logpath" is not valid for this backend.
+# See "journalmatch" in the jails associated filter config
+# auto: will try to use the following backends, in order:
+# pyinotify, gamin, polling.
+#
+# Note: if systemd backend is chosen as the default but you enable a jail
+# for which logs are present only in its own log files, specify some other
+# backend for that jail (e.g. polling) and provide empty value for
+# journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200
+backend = systemd
+
+# "usedns" specifies if jails should trust hostnames in logs,
+# warn when DNS lookups are performed, or ignore all hostnames in logs
+#
+# yes: if a hostname is encountered, a DNS lookup will be performed.
+# warn: if a hostname is encountered, a DNS lookup will be performed,
+# but it will be logged as a warning.
+# no: if a hostname is encountered, will not be used for banning,
+# but it will be logged as info.
+# raw: use raw value (no hostname), allow use it for no-host filters/actions (example user)
+usedns = warn
+
+# "logencoding" specifies the encoding of the log files handled by the jail
+# This is used to decode the lines from the log file.
+# Typical examples: "ascii", "utf-8"
+#
+# auto: will use the system locale setting
+logencoding = auto
+
+# "enabled" enables the jails.
+# By default all jails are disabled, and it should stay this way.
+# Enable only relevant to your setup jails in your .local or jail.d/*.conf
+#
+# true: jail will be enabled and log files will get monitored for changes
+# false: jail is not enabled
+enabled = false
+
+
+# "mode" defines the mode of the filter (see corresponding filter implementation for more info).
+mode = normal
+
+# "filter" defines the filter to use by the jail.
+# By default jails have names matching their filter name
+#
+filter = %(__name__)s[mode=%(mode)s]
+
+
+#
+# ACTIONS
+#
+
+# Some options used for actions
+
+# Destination email address used solely for the interpolations in
+# jail.{conf,local,d/*} configuration files.
+destemail = root@localhost
+
+# Sender email address used solely for some actions
+sender = root@<fq-hostname>
+
+# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the
+# mailing. Change mta configuration parameter to mail if you want to
+# revert to conventional 'mail'.
+mta = sendmail
+
+# Default protocol
+protocol = tcp
+
+# Specify chain where jumps would need to be added in ban-actions expecting parameter chain
+chain = <known/chain>
+
+# Ports to be banned
+# Usually should be overridden in a particular jail
+port = 0:65535
+
+# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3
+fail2ban_agent = Fail2Ban/%(fail2ban_version)s
+
+#
+# Action shortcuts. To be used to define action parameter
+
+# Default banning action (e.g. iptables, iptables-new,
+# iptables-multiport, shorewall, etc) It is used to define
+# action_* variables. Can be overridden globally or per
+# section within jail.local file
+banaction = iptables-multiport
+banaction_allports = iptables-allports
+
+# The simplest action to take: ban only
+action_ = %(banaction)s[port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
+
+# ban & send an e-mail with whois report to the destemail.
+action_mw = %(action_)s
+ %(mta)s-whois[sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"]
+
+# ban & send an e-mail with whois report and relevant log lines
+# to the destemail.
+action_mwl = %(action_)s
+ %(mta)s-whois-lines[sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
+
+# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action
+#
+# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines
+# to the destemail.
+action_xarf = %(action_)s
+ xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath="%(logpath)s", port="%(port)s"]
+
+# ban & send a notification to one or more of the 50+ services supported by Apprise.
+# See https://github.com/caronc/apprise/wiki for details on what is supported.
+#
+# You may optionally over-ride the default configuration line (containing the Apprise URLs)
+# by using 'apprise[config="/alternate/path/to/apprise.cfg"]' otherwise
+# /etc/fail2ban/apprise.conf is sourced for your supported notification configuration.
+# action = %(action_)s
+# apprise
+
+# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines
+# to the destemail.
+action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"]
+ %(mta)s-whois-lines[sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
+
+# Report block via blocklist.de fail2ban reporting service API
+#
+# See the IMPORTANT note in action.d/blocklist_de.conf for when to use this action.
+# Specify expected parameters in file action.d/blocklist_de.local or if the interpolation
+# `action_blocklist_de` used for the action, set value of `blocklist_de_apikey`
+# in your `jail.local` globally (section [DEFAULT]) or per specific jail section (resp. in
+# corresponding jail.d/my-jail.local file).
+#
+action_blocklist_de = blocklist_de[email="%(sender)s", service="%(__name__)s", apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"]
+
+# Report ban via abuseipdb.com.
+#
+# See action.d/abuseipdb.conf for usage example and details.
+#
+action_abuseipdb = abuseipdb
+
+# Choose default action. To change, just override value of 'action' with the
+# interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local
+# globally (section [DEFAULT]) or per specific section
+action = %(action_)s
+
+
+#
+# JAILS
+#
+
+#
+# SSH servers
+#
+
+[sshd]
+
+# To use more aggressive sshd modes set filter parameter "mode" in jail.local:
+# normal (default), ddos, extra or aggressive (combines all).
+# See "tests/files/logs/sshd" or "filter.d/sshd.conf" for usage example and details.
+mode = normal
+enabled = true
+port = ssh
+logpath = %(sshd_log)s
+backend = %(sshd_backend)s
+
+
+[dropbear]
+
+port = ssh
+logpath = %(dropbear_log)s
+backend = %(dropbear_backend)s
+
+
+[selinux-ssh]
+
+port = ssh
+logpath = %(auditd_log)s
+
+
+#
+# HTTP servers
+#
+
+[apache-auth]
+
+port = http,https
+logpath = %(apache_error_log)s
+
+
+[apache-badbots]
+# Ban hosts which agent identifies spammer robots crawling the web
+# for email addresses. The mail outputs are buffered.
+port = http,https
+logpath = %(apache_access_log)s
+bantime = 48h
+maxretry = 1
+
+
+[apache-noscript]
+
+port = http,https
+logpath = %(apache_error_log)s
+
+
+[apache-overflows]
+
+port = http,https
+logpath = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-nohome]
+
+port = http,https
+logpath = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-botsearch]
+
+port = http,https
+logpath = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-fakegooglebot]
+
+port = http,https
+logpath = %(apache_access_log)s
+maxretry = 1
+ignorecommand = %(fail2ban_confpath)s/filter.d/ignorecommands/apache-fakegooglebot <ip>
+
+
+[apache-modsecurity]
+
+port = http,https
+logpath = %(apache_error_log)s
+maxretry = 2
+
+
+[apache-shellshock]
+
+port = http,https
+logpath = %(apache_error_log)s
+maxretry = 1
+
+
+[openhab-auth]
+
+filter = openhab
+banaction = %(banaction_allports)s
+logpath = /opt/openhab/logs/request.log
+
+
+# To use more aggressive http-auth modes set filter parameter "mode" in jail.local:
+# normal (default), aggressive (combines all), auth or fallback
+# See "tests/files/logs/nginx-http-auth" or "filter.d/nginx-http-auth.conf" for usage example and details.
+[nginx-http-auth]
+# mode = normal
+port = http,https
+logpath = %(nginx_error_log)s
+
+# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module`
+# and define `limit_req` and `limit_req_zone` as described in nginx documentation
+# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html
+# or for example see in 'config/filter.d/nginx-limit-req.conf'
+[nginx-limit-req]
+port = http,https
+logpath = %(nginx_error_log)s
+
+[nginx-botsearch]
+
+port = http,https
+logpath = %(nginx_error_log)s
+
+[nginx-bad-request]
+port = http,https
+logpath = %(nginx_access_log)s
+
+# Ban attackers that try to use PHP's URL-fopen() functionality
+# through GET/POST variables. - Experimental, with more than a year
+# of usage in production environments.
+
+[php-url-fopen]
+
+port = http,https
+logpath = %(nginx_access_log)s
+ %(apache_access_log)s
+
+
+[suhosin]
+
+port = http,https
+logpath = %(suhosin_log)s
+
+
+[lighttpd-auth]
+# Same as above for Apache's mod_auth
+# It catches wrong authentifications
+port = http,https
+logpath = %(lighttpd_error_log)s
+
+
+#
+# Webmail and groupware servers
+#
+
+[roundcube-auth]
+
+port = http,https
+logpath = %(roundcube_errors_log)s
+# Use following line in your jail.local if roundcube logs to journal.
+#backend = %(syslog_backend)s
+
+
+[openwebmail]
+
+port = http,https
+logpath = /var/log/openwebmail.log
+
+
+[horde]
+
+port = http,https
+logpath = /var/log/horde/horde.log
+
+
+[groupoffice]
+
+port = http,https
+logpath = /home/groupoffice/log/info.log
+
+
+[sogo-auth]
+# Monitor SOGo groupware server
+# without proxy this would be:
+# port = 20000
+port = http,https
+logpath = /var/log/sogo/sogo.log
+
+
+[tine20]
+
+logpath = /var/log/tine20/tine20.log
+port = http,https
+
+
+#
+# Web Applications
+#
+#
+
+[drupal-auth]
+
+port = http,https
+logpath = %(syslog_daemon)s
+backend = %(syslog_backend)s
+
+[guacamole]
+
+port = http,https
+logpath = /var/log/tomcat*/catalina.out
+#logpath = /var/log/guacamole.log
+
+[monit]
+#Ban clients brute-forcing the monit gui login
+port = 2812
+logpath = /var/log/monit
+ /var/log/monit.log
+
+
+[webmin-auth]
+
+port = 10000
+logpath = %(syslog_authpriv)s
+backend = %(syslog_backend)s
+
+
+[froxlor-auth]
+
+port = http,https
+logpath = %(syslog_authpriv)s
+backend = %(syslog_backend)s
+
+
+#
+# HTTP Proxy servers
+#
+#
+
+[squid]
+
+port = 80,443,3128,8080
+logpath = /var/log/squid/access.log
+
+
+[3proxy]
+
+port = 3128
+logpath = /var/log/3proxy.log
+
+
+#
+# FTP servers
+#
+
+
+[proftpd]
+
+port = ftp,ftp-data,ftps,ftps-data
+logpath = %(proftpd_log)s
+backend = %(proftpd_backend)s
+
+
+[pure-ftpd]
+
+port = ftp,ftp-data,ftps,ftps-data
+logpath = %(pureftpd_log)s
+backend = %(pureftpd_backend)s
+
+
+[gssftpd]
+
+port = ftp,ftp-data,ftps,ftps-data
+logpath = %(syslog_daemon)s
+backend = %(syslog_backend)s
+
+
+[wuftpd]
+
+port = ftp,ftp-data,ftps,ftps-data
+logpath = %(wuftpd_log)s
+backend = %(wuftpd_backend)s
+
+
+[vsftpd]
+# or overwrite it in jails.local to be
+# logpath = %(syslog_authpriv)s
+# if you want to rely on PAM failed login attempts
+# vsftpd's failregex should match both of those formats
+port = ftp,ftp-data,ftps,ftps-data
+logpath = %(vsftpd_log)s
+
+
+#
+# Mail servers
+#
+
+# ASSP SMTP Proxy Jail
+[assp]
+
+port = smtp,465,submission
+logpath = /root/path/to/assp/logs/maillog.txt
+
+
+[courier-smtp]
+
+port = smtp,465,submission
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[postfix]
+# To use another modes set filter parameter "mode" in jail.local:
+mode = more
+port = smtp,465,submission
+logpath = %(postfix_log)s
+backend = %(postfix_backend)s
+
+
+[postfix-rbl]
+
+filter = postfix[mode=rbl]
+port = smtp,465,submission
+logpath = %(postfix_log)s
+backend = %(postfix_backend)s
+maxretry = 1
+
+
+[sendmail-auth]
+
+port = submission,465,smtp
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[sendmail-reject]
+# To use more aggressive modes set filter parameter "mode" in jail.local:
+# normal (default), extra or aggressive
+# See "tests/files/logs/sendmail-reject" or "filter.d/sendmail-reject.conf" for usage example and details.
+#mode = normal
+port = smtp,465,submission
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[qmail-rbl]
+
+filter = qmail
+port = smtp,465,submission
+logpath = /service/qmail/log/main/current
+
+
+# dovecot defaults to logging to the mail syslog facility
+# but can be set by syslog_facility in the dovecot configuration.
+[dovecot]
+
+port = pop3,pop3s,imap,imaps,submission,465,sieve
+logpath = %(dovecot_log)s
+backend = %(dovecot_backend)s
+
+
+[sieve]
+
+port = smtp,465,submission
+logpath = %(dovecot_log)s
+backend = %(dovecot_backend)s
+
+
+[solid-pop3d]
+
+port = pop3,pop3s
+logpath = %(solidpop3d_log)s
+
+
+[exim]
+# see filter.d/exim.conf for further modes supported from filter:
+#mode = normal
+port = smtp,465,submission
+logpath = %(exim_main_log)s
+
+
+[exim-spam]
+
+port = smtp,465,submission
+logpath = %(exim_main_log)s
+
+
+[kerio]
+
+port = imap,smtp,imaps,465
+logpath = /opt/kerio/mailserver/store/logs/security.log
+
+
+#
+# Mail servers authenticators: might be used for smtp,ftp,imap servers, so
+# all relevant ports get banned
+#
+
+[courier-auth]
+
+port = smtp,465,submission,imap,imaps,pop3,pop3s
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[postfix-sasl]
+
+filter = postfix[mode=auth]
+port = smtp,465,submission,imap,imaps,pop3,pop3s
+# You might consider monitoring /var/log/mail.warn instead if you are
+# running postfix since it would provide the same log lines at the
+# "warn" level but overall at the smaller filesize.
+logpath = %(postfix_log)s
+backend = %(postfix_backend)s
+
+
+[perdition]
+
+port = imap,imaps,pop3,pop3s
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[squirrelmail]
+
+port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks
+logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log
+
+
+[cyrus-imap]
+
+port = imap,imaps
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+[uwimap-auth]
+
+port = imap,imaps
+logpath = %(syslog_mail)s
+backend = %(syslog_backend)s
+
+
+#
+#
+# DNS servers
+#
+
+
+# !!! WARNING !!!
+# Since UDP is connection-less protocol, spoofing of IP and imitation
+# of illegal actions is way too simple. Thus enabling of this filter
+# might provide an easy way for implementing a DoS against a chosen
+# victim. See
+# http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html
+# Please DO NOT USE this jail unless you know what you are doing.
+#
+# IMPORTANT: see filter.d/named-refused for instructions to enable logging
+# This jail blocks UDP traffic for DNS requests.
+# [named-refused-udp]
+#
+# filter = named-refused
+# port = domain,953
+# protocol = udp
+# logpath = /var/log/named/security.log
+
+# IMPORTANT: see filter.d/named-refused for instructions to enable logging
+# This jail blocks TCP traffic for DNS requests.
+
+[named-refused]
+
+port = domain,953
+logpath = /var/log/named/security.log
+
+
+[nsd]
+
+port = 53
+action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
+ %(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
+logpath = /var/log/nsd.log
+
+
+#
+# Miscellaneous
+#
+
+[asterisk]
+
+port = 5060,5061
+action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
+ %(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
+logpath = /var/log/asterisk/messages
+maxretry = 10
+
+
+[freeswitch]
+
+port = 5060,5061
+action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
+ %(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
+logpath = /var/log/freeswitch.log
+maxretry = 10
+
+
+# enable adminlog; it will log to a file inside znc's directory by default.
+[znc-adminlog]
+
+port = 6667
+logpath = /var/lib/znc/moddata/adminlog/znc.log
+
+
+# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or
+# equivalent section:
+# log-warnings = 2
+#
+# for syslog (daemon facility)
+# [mysqld_safe]
+# syslog
+#
+# for own logfile
+# [mysqld]
+# log-error=/var/log/mysqld.log
+[mysqld-auth]
+
+port = 3306
+logpath = %(mysql_log)s
+backend = %(mysql_backend)s
+
+
+[mssql-auth]
+# Default configuration for Microsoft SQL Server for Linux
+# See the 'mssql-conf' manpage how to change logpath or port
+logpath = /var/opt/mssql/log/errorlog
+port = 1433
+filter = mssql-auth
+
+
+# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf')
+[mongodb-auth]
+# change port when running with "--shardsvr" or "--configsvr" runtime operation
+port = 27017
+logpath = /var/log/mongodb/mongodb.log
+
+
+# Jail for more extended banning of persistent abusers
+# !!! WARNINGS !!!
+# 1. Make sure that your loglevel specified in fail2ban.conf/.local
+# is not at DEBUG level -- which might then cause fail2ban to fall into
+# an infinite loop constantly feeding itself with non-informative lines
+# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days)
+# to maintain entries for failed logins for sufficient amount of time
+[recidive]
+
+logpath = /var/log/fail2ban.log
+banaction = %(banaction_allports)s
+bantime = 1w
+findtime = 1d
+
+
+# Generic filter for PAM. Has to be used with action which bans all
+# ports such as iptables-allports, shorewall
+
+[pam-generic]
+# pam-generic filter can be customized to monitor specific subset of 'tty's
+banaction = %(banaction_allports)s
+logpath = %(syslog_authpriv)s
+backend = %(syslog_backend)s
+
+
+[xinetd-fail]
+
+banaction = iptables-multiport-log
+logpath = %(syslog_daemon)s
+backend = %(syslog_backend)s
+maxretry = 2
+
+
+# stunnel - need to set port for this
+[stunnel]
+
+logpath = /var/log/stunnel4/stunnel.log
+
+
+[ejabberd-auth]
+
+port = 5222
+logpath = /var/log/ejabberd/ejabberd.log
+
+
+[counter-strike]
+
+logpath = /opt/cstrike/logs/L[0-9]*.log
+tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039
+udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015
+action_ = %(default/action_)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp"]
+ %(default/action_)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp"]
+
+[softethervpn]
+port = 500,4500
+protocol = udp
+logpath = /usr/local/vpnserver/security_log/*/sec.log
+
+[gitlab]
+port = http,https
+logpath = /var/log/gitlab/gitlab-rails/application.log
+
+[grafana]
+port = http,https
+logpath = /var/log/grafana/grafana.log
+
+[bitwarden]
+port = http,https
+logpath = /home/*/bwdata/logs/identity/Identity/log.txt
+
+[centreon]
+port = http,https
+logpath = /var/log/centreon/login.log
+
+# consider low maxretry and a long bantime
+# nobody except your own Nagios server should ever probe nrpe
+[nagios]
+
+logpath = %(syslog_daemon)s ; nrpe.cfg may define a different log_facility
+backend = %(syslog_backend)s
+maxretry = 1
+
+
+[oracleims]
+# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above
+logpath = /opt/sun/comms/messaging64/log/mail.log_current
+banaction = %(banaction_allports)s
+
+[directadmin]
+logpath = /var/log/directadmin/login.log
+port = 2222
+
+[portsentry]
+logpath = /var/lib/portsentry/portsentry.history
+maxretry = 1
+
+[pass2allow-ftp]
+# this pass2allow example allows FTP traffic after successful HTTP authentication
+port = ftp,ftp-data,ftps,ftps-data
+# knocking_url variable must be overridden to some secret value in jail.local
+knocking_url = /knocking/
+filter = apache-pass[knocking_url="%(knocking_url)s"]
+# access log of the website with HTTP auth
+logpath = %(apache_access_log)s
+blocktype = RETURN
+returntype = DROP
+action = %(action_)s[blocktype=%(blocktype)s, returntype=%(returntype)s,
+ actionstart_on_demand=false, actionrepair_on_unban=true]
+bantime = 1h
+maxretry = 1
+findtime = 1
+
+
+[murmur]
+# AKA mumble-server
+port = 64738
+action_ = %(default/action_)s[name=%(__name__)s-tcp, protocol="tcp"]
+ %(default/action_)s[name=%(__name__)s-udp, protocol="udp"]
+logpath = /var/log/mumble-server/mumble-server.log
+
+
+[screensharingd]
+# For Mac OS Screen Sharing Service (VNC)
+logpath = /var/log/system.log
+logencoding = utf-8
+
+[haproxy-http-auth]
+# HAProxy by default doesn't log to file you'll need to set it up to forward
+# logs to a syslog server which would then write them to disk.
+# See "haproxy-http-auth" filter for a brief cautionary note when setting
+# maxretry and findtime.
+logpath = /var/log/haproxy.log
+
+[slapd]
+port = ldap,ldaps
+logpath = /var/log/slapd.log
+
+[domino-smtp]
+port = smtp,ssmtp
+logpath = /home/domino01/data/IBM_TECHNICAL_SUPPORT/console.log
+
+[phpmyadmin-syslog]
+port = http,https
+logpath = %(syslog_authpriv)s
+backend = %(syslog_backend)s
+
+
+[zoneminder]
+# Zoneminder HTTP/HTTPS web interface auth
+# Logs auth failures to apache2 error log
+port = http,https
+logpath = %(apache_error_log)s
+
+[traefik-auth]
+# to use 'traefik-auth' filter you have to configure your Traefik instance,
+# see `filter.d/traefik-auth.conf` for details and service example.
+port = http,https
+logpath = /var/log/traefik/access.log
+
+[scanlogd]
+logpath = %(syslog_local0)s
+banaction = %(banaction_allports)s
+
+[monitorix]
+port = 8080
+logpath = /var/log/monitorix-httpd
diff --git a/playbooks/roles/common/files/sshd_config b/playbooks/roles/common/files/sshd_config
new file mode 100644
index 0000000..0a5c50e
--- /dev/null
+++ b/playbooks/roles/common/files/sshd_config
@@ -0,0 +1,21 @@
+Include /etc/ssh/sshd_config.d/*.conf
+
+Port 22
+PermitRootLogin yes
+PubkeyAuthentication yes
+PasswordAuthentication no
+
+KbdInteractiveAuthentication no
+
+UsePAM yes
+
+AllowAgentForwarding yes
+X11Forwarding no
+PrintMotd no
+PrintLastLog yes
+TCPKeepAlive yes
+ClientAliveInterval 300
+ClientAliveCountMax 1
+
+AcceptEnv LANG LC_*
+Subsystem sftp /usr/lib/openssh/sftp-server
diff --git a/playbooks/roles/common/handlers/main.yml b/playbooks/roles/common/handlers/main.yml
new file mode 100644
index 0000000..22b2bec
--- /dev/null
+++ b/playbooks/roles/common/handlers/main.yml
@@ -0,0 +1,25 @@
+---
+
+- name: Restart sshd
+ ansible.builtin.service:
+ name: sshd
+ state: restarted
+ enabled: true
+
+- name: Reload ufw
+ ansible.builtin.service:
+ name: ufw
+ state: restarted
+ enabled: true
+
+- name: Enable fail2ban
+ ansible.builtin.service:
+ name: fail2ban
+ state: restarted
+ enabled: true
+
+- name: Enable systemd-timesyncd
+ ansible.builtin.service:
+ name: systemd-timesyncd
+ state: restarted
+ enabled: true
diff --git a/playbooks/roles/common/tasks/main.yml b/playbooks/roles/common/tasks/main.yml
new file mode 100644
index 0000000..f32893a
--- /dev/null
+++ b/playbooks/roles/common/tasks/main.yml
@@ -0,0 +1,76 @@
+---
+
+- name: Apt upgrade, update
+ ansible.builtin.apt:
+ update_cache: true
+ upgrade: "dist"
+
+- name: Set a hostname specifying strategy
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname }}"
+ use: systemd
+
+- name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - gnupg-agent
+ - software-properties-common
+ - systemd-timesyncd
+ - systemd-resolved
+ - vim
+ - git
+ - rsync
+ state: latest
+ update_cache: true
+ notify:
+ - Enable systemd-timesyncd
+
+## DNS
+- name: Configure systemd-resolved
+ ansible.builtin.include_tasks:
+ file: "systemd-resolved.yml"
+
+## SSH
+- name: Copy sshd_config
+ ansible.builtin.copy:
+ src: files/sshd_config
+ dest: /etc/ssh/sshd_config
+ owner: root
+ group: root
+ mode: u=rw,g=r,o=r
+ notify:
+ - Restart sshd
+
+- name: Copy authorized keys
+ ansible.builtin.copy:
+ src: files/authorized_keys
+ dest: /root/.ssh/authorized_keys
+ owner: root
+ group: root
+
+## FAIL2BAN
+- name: Install Fail2Ban
+ ansible.builtin.apt:
+ name: fail2ban
+ state: present
+ notify:
+ - Enable fail2ban
+
+## FIREWALL
+- name: Install ufw
+ ansible.builtin.apt:
+ name: ufw
+ state: present
+
+- name: Allow ssh from rfc1918 networks
+ loop: "{{ rfc1918_networks }}"
+ community.general.ufw:
+ rule: allow
+ name: "OpenSSH"
+ from: "{{ item }}"
+ state: "enabled"
+ notify:
+ - Reload ufw
diff --git a/playbooks/roles/common/tasks/systemd-resolved.yml b/playbooks/roles/common/tasks/systemd-resolved.yml
new file mode 100644
index 0000000..f0f7163
--- /dev/null
+++ b/playbooks/roles/common/tasks/systemd-resolved.yml
@@ -0,0 +1,64 @@
+---
+
+- name: Add dns servers
+ community.general.ini_file:
+ path: /etc/systemd/resolved.conf
+ section: Resolve
+ option: DNS
+ value: '{{ dns_servers[0] }}'
+ mode: '0644'
+ no_extra_spaces: true
+ register: conf_dns
+ when: dns_servers | length > 0
+
+- name: Add dns fallback server
+ community.general.ini_file:
+ path: /etc/systemd/resolved.conf
+ section: Resolve
+ option: FallbackDNS
+ value: '{{ dns_servers[1] }}'
+ mode: '0644'
+ no_extra_spaces: true
+ register: conf_fallbackdns
+ when: dns_servers | length > 1
+
+- name: Enable dnssec
+ community.general.ini_file:
+ path: /etc/systemd/resolved.conf
+ section: Resolve
+ option: DNSSEC
+ value: '{{ "yes" if dns_dnssec else "no" }}'
+ mode: '0644'
+ no_extra_spaces: true
+ register: conf_dnssec
+
+- name: Add search domains
+ community.general.ini_file:
+ path: /etc/systemd/resolved.conf
+ section: Resolve
+ option: Domains
+ value: '{{ dns_domains | join(" ") }}'
+ mode: '0644'
+ no_extra_spaces: true
+ register: conf_domains
+
+- name: Stub listener
+ community.general.ini_file:
+ path: /etc/systemd/resolved.conf
+ section: Resolve
+ option: DNSStubListener
+ value: '{{ "yes" if dns_stub_listener else "no" }}'
+ mode: '0644'
+ no_extra_spaces: true
+ register: conf_domains
+
+- name: Reload systemd-resolved
+ ansible.builtin.service:
+ name: systemd-resolved
+ state: restarted
+ enabled: true
+ when:
+ - conf_dns is changed or
+ conf_fallbackdns is changed or
+ conf_dnssec is changed or
+ conf_domains is changed
diff --git a/playbooks/roles/docker/files/docker-compose@.service b/playbooks/roles/docker/files/docker-compose@.service
new file mode 100644
index 0000000..bd8dedb
--- /dev/null
+++ b/playbooks/roles/docker/files/docker-compose@.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=%i service with docker compose
+Requires=docker.service
+After=docker.service
+
+[Service]
+RemainAfterExit=true
+WorkingDirectory=/etc/docker/compose/%i
+ExecStartPre=/usr/bin/docker compose pull
+ExecStart=/usr/bin/docker compose up --detach --remove-orphans
+ExecStop=/usr/bin/docker compose down
+Restart=always
+RestartSec=5
+StartLimitInterval=500
+StartLimitBurst=3
+
+[Install]
+WantedBy=multi-user.target
diff --git a/playbooks/roles/docker/files/docker-rollout b/playbooks/roles/docker/files/docker-rollout
new file mode 100755
index 0000000..5da1986
--- /dev/null
+++ b/playbooks/roles/docker/files/docker-rollout
@@ -0,0 +1,212 @@
+#!/bin/bash
+set -e
+
+# Defaults
+HEALTHCHECK_TIMEOUT=60
+NO_HEALTHCHECK_TIMEOUT=10
+
+# Print metadata for Docker CLI plugin
+if [[ "$1" == "docker-cli-plugin-metadata" ]]; then
+ cat <<EOF
+{
+ "SchemaVersion": "0.1.0",
+ "Vendor": "Karol Musur",
+ "Version": "v0.7",
+ "ShortDescription": "Rollout new Compose service version"
+}
+EOF
+ exit
+fi
+
+# Save docker arguments, i.e. arguments before "rollout"
+while [[ $# -gt 0 ]]; do
+ if [[ "$1" == "rollout" ]]; then
+ shift
+ break
+ fi
+
+ DOCKER_ARGS="$DOCKER_ARGS $1"
+ shift
+done
+
+# Check if compose v2 is available
+if docker compose >/dev/null 2>&1; then
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ COMPOSE_COMMAND="docker $DOCKER_ARGS compose"
+elif docker-compose >/dev/null 2>&1; then
+ COMPOSE_COMMAND="docker-compose"
+else
+ echo "docker compose or docker-compose is required"
+ exit 1
+fi
+
+usage() {
+ cat <<EOF
+
+Usage: docker rollout [OPTIONS] SERVICE
+
+Rollout new Compose service version.
+
+Options:
+ -h, --help Print usage
+ -f, --file FILE Compose configuration files
+ -t, --timeout N Healthcheck timeout (default: $HEALTHCHECK_TIMEOUT seconds)
+ -w, --wait N When no healthcheck is defined, wait for N seconds
+ before stopping old container (default: $NO_HEALTHCHECK_TIMEOUT seconds)
+ --env-file FILE Specify an alternate environment file
+
+EOF
+}
+
+exit_with_usage() {
+ usage
+ exit 1
+}
+
+healthcheck() {
+ local container_id="$1"
+
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ if docker $DOCKER_ARGS inspect --format='{{json .State.Health.Status}}' "$container_id" | grep -v "unhealthy" | grep -q "healthy"; then
+ return 0
+ fi
+
+ return 1
+}
+
+scale() {
+ local service="$1"
+ local replicas="$2"
+
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ $COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES up --detach --scale "$service=$replicas" --no-recreate "$service"
+}
+
+main() {
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ if [[ "$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE")" == "" ]]; then
+ echo "==> Service '$SERVICE' is not running. Starting the service."
+ $COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES up --detach --no-recreate "$SERVICE"
+ exit 0
+ fi
+
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ OLD_CONTAINER_IDS_STRING=$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE")
+ OLD_CONTAINER_IDS=()
+ for container_id in $OLD_CONTAINER_IDS_STRING; do
+ OLD_CONTAINER_IDS+=("$container_id")
+ done
+
+ SCALE=${#OLD_CONTAINER_IDS[@]}
+ SCALE_TIMES_TWO=$((SCALE * 2))
+ echo "==> Scaling '$SERVICE' to '$SCALE_TIMES_TWO' instances"
+ scale "$SERVICE" $SCALE_TIMES_TWO
+
+ # Create a variable that contains the IDs of the new containers, but not the old ones
+ # shellcheck disable=SC2086 # COMPOSE_FILES and ENV_FILES must be unquoted to allow multiple files
+ NEW_CONTAINER_IDS_STRING=$($COMPOSE_COMMAND $COMPOSE_FILES $ENV_FILES ps --quiet "$SERVICE" | grep --invert-match --file <(echo "$OLD_CONTAINER_IDS_STRING"))
+ NEW_CONTAINER_IDS=()
+ for container_id in $NEW_CONTAINER_IDS_STRING; do
+ NEW_CONTAINER_IDS+=("$container_id")
+ done
+
+ # Check if first container has healthcheck
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ if docker $DOCKER_ARGS inspect --format='{{json .State.Health}}' "${OLD_CONTAINER_IDS[0]}" | grep --quiet "Status"; then
+ echo "==> Waiting for new containers to be healthy (timeout: $HEALTHCHECK_TIMEOUT seconds)"
+ for _ in $(seq 1 "$HEALTHCHECK_TIMEOUT"); do
+ SUCCESS=0
+
+ for NEW_CONTAINER_ID in "${NEW_CONTAINER_IDS[@]}"; do
+ if healthcheck "$NEW_CONTAINER_ID"; then
+ SUCCESS=$((SUCCESS + 1))
+ fi
+ done
+
+ if [[ "$SUCCESS" == "$SCALE" ]]; then
+ break
+ fi
+
+ sleep 1
+ done
+
+ SUCCESS=0
+
+ for NEW_CONTAINER_ID in "${NEW_CONTAINER_IDS[@]}"; do
+ if healthcheck "$NEW_CONTAINER_ID"; then
+ SUCCESS=$((SUCCESS + 1))
+ fi
+ done
+
+ if [[ "$SUCCESS" != "$SCALE" ]]; then
+ echo "==> New containers are not healthy. Rolling back." >&2
+
+ for NEW_CONTAINER_ID in "${NEW_CONTAINER_IDS[@]}"; do
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS stop "$NEW_CONTAINER_ID"
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS rm "$NEW_CONTAINER_ID"
+ done
+
+ exit 1
+ fi
+ else
+ echo "==> Waiting for new containers to be ready ($NO_HEALTHCHECK_TIMEOUT seconds)"
+ sleep "$NO_HEALTHCHECK_TIMEOUT"
+ fi
+
+ echo "==> Stopping old containers"
+
+ for OLD_CONTAINER_ID in "${OLD_CONTAINER_IDS[@]}"; do
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS stop "$OLD_CONTAINER_ID"
+ # shellcheck disable=SC2086 # DOCKER_ARGS must be unquoted to allow multiple arguments
+ docker $DOCKER_ARGS rm "$OLD_CONTAINER_ID"
+ done
+}
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ -h | --help)
+ usage
+ exit 0
+ ;;
+ -f | --file)
+ COMPOSE_FILES="$COMPOSE_FILES -f $2"
+ shift 2
+ ;;
+ --env-file)
+ ENV_FILES="$ENV_FILES --env-file $2"
+ shift 2
+ ;;
+ -t | --timeout)
+ HEALTHCHECK_TIMEOUT="$2"
+ shift 2
+ ;;
+ -w | --wait)
+ NO_HEALTHCHECK_TIMEOUT="$2"
+ shift 2
+ ;;
+ -*)
+ echo "Unknown option: $1"
+ exit_with_usage
+ ;;
+ *)
+ if [[ -n "$SERVICE" ]]; then
+ echo "SERVICE is already set to '$SERVICE'"
+ exit_with_usage
+ fi
+
+ SERVICE="$1"
+ shift
+ ;;
+ esac
+done
+
+# Require SERVICE argument
+if [[ -z "$SERVICE" ]]; then
+ echo "SERVICE is missing"
+ exit_with_usage
+fi
+
+main
diff --git a/playbooks/roles/docker/handlers/main.yml b/playbooks/roles/docker/handlers/main.yml
new file mode 100644
index 0000000..787c613
--- /dev/null
+++ b/playbooks/roles/docker/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+
+- name: Enable docker
+ ansible.builtin.service:
+ name: docker
+ state: restarted
+ enabled: true
diff --git a/playbooks/roles/docker/tasks/main.yml b/playbooks/roles/docker/tasks/main.yml
new file mode 100644
index 0000000..da01958
--- /dev/null
+++ b/playbooks/roles/docker/tasks/main.yml
@@ -0,0 +1,60 @@
+---
+
+- name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - gnupg-agent
+ - software-properties-common
+ state: present
+ update_cache: true
+
+- name: Docker GPG key
+ ansible.builtin.apt_key:
+ url: >
+ https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
+ state: present
+
+- name: Repository docker
+ ansible.builtin.apt_repository:
+ repo: >
+ deb https://download.docker.com/linux/{{ ansible_distribution | lower }}
+ {{ ansible_distribution_release }} stable
+ state: present
+
+- name: Install docker
+ ansible.builtin.apt:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ state: present
+ update_cache: true
+ notify:
+ - Enable docker
+
+- name: Copy docker-compose@.service
+ ansible.builtin.copy:
+ src: docker-compose@.service
+ dest: /etc/systemd/system/docker-compose@.service
+ owner: root
+ group: root
+ mode: u=rw,g=r,o=r
+
+- name: Ensure /etc/docker/compose exist
+ ansible.builtin.file:
+ path: /etc/docker/compose
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Copy docker rollout script
+ ansible.builtin.copy:
+ src: docker-rollout
+ dest: /usr/local/bin/docker-rollout
+ owner: root
+ group: root
+ mode: 0755
diff --git a/playbooks/roles/kanidm/tasks/main.yml b/playbooks/roles/kanidm/tasks/main.yml
new file mode 100644
index 0000000..37cc0da
--- /dev/null
+++ b/playbooks/roles/kanidm/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+
+- name: Ensure kanidm docker/compose exist
+ ansible.builtin.file:
+ path: /etc/docker/compose/kanidm
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Build kanidm docker-compose.yml.j2
+ ansible.builtin.template:
+ src: docker-compose.yml.j2
+ dest: /etc/docker/compose/kanidm/docker-compose.yml
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Ensure kanidm docker/compose/data exist
+ ansible.builtin.file:
+ path: /etc/docker/compose/kanidm/data
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Build kanidm config
+ ansible.builtin.template:
+ src: server.toml.j2
+ dest: /etc/docker/compose/kanidm/data/server.toml
+ owner: root
+ group: root
+ mode: 0755
+
+- name: Allow LDAPS from rfc1918 networks
+ loop: "{{ rfc1918_networks }}"
+ community.general.ufw:
+ rule: allow
+ proto: tcp
+ port: '3636'
+ from: "{{ item }}"
+
+- name: Enable kanidm
+ ansible.builtin.systemd_service:
+ state: restarted
+ enabled: true
+ name: docker-compose@kanidm
diff --git a/playbooks/roles/kanidm/templates/docker-compose.yml.j2 b/playbooks/roles/kanidm/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..b269865
--- /dev/null
+++ b/playbooks/roles/kanidm/templates/docker-compose.yml.j2
@@ -0,0 +1,13 @@
+
+version: '3'
+
+services:
+ kanidm:
+ image: kanidm/server:1.1.0-rc.16
+ restart: always
+ volumes:
+ - ./data:/data
+ - /etc/letsencrypt:/certs:ro
+ ports:
+ - 127.0.0.1:8443:8443
+ - {{ kanidm_bind_address }}:3636:3636
diff --git a/playbooks/roles/kanidm/templates/server.toml.j2 b/playbooks/roles/kanidm/templates/server.toml.j2
new file mode 100644
index 0000000..ac470cc
--- /dev/null
+++ b/playbooks/roles/kanidm/templates/server.toml.j2
@@ -0,0 +1,10 @@
+bindaddress = "[::]:8443"
+ldapbindaddress = "[::]:3636"
+trust_x_forward_for = true
+db_path = "/data/kanidm.db"
+tls_chain = "/certs/live/{{ kanidm_domain }}/fullchain.pem"
+tls_key = "/certs/live/{{ kanidm_domain }}/privkey.pem"
+log_level = "info"
+
+domain = "{{ kanidm_domain }}"
+origin = "https://{{ kanidm_domain }}"
diff --git a/playbooks/roles/mail/tasks/main.yml b/playbooks/roles/mail/tasks/main.yml
new file mode 100644
index 0000000..81ced1d
--- /dev/null
+++ b/playbooks/roles/mail/tasks/main.yml
@@ -0,0 +1,80 @@
+---
+
+- name: Ensure mail docker/compose exist
+ ansible.builtin.file:
+ path: /etc/docker/compose/mail
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Ensure mail config volume exist
+ ansible.builtin.file:
+ path: /etc/docker/compose/mail/docker-data/dms/config
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Ensure mail entries volume exist with correct permission
+ ansible.builtin.file:
+ path: /etc/docker/compose/mail/docker-data/dms/mail-data/
+ state: directory
+ owner: 5000
+ group: 5000
+ mode: 0700
+ recurse: true
+
+- name: Ensure dovecot ldap config exist
+ ansible.builtin.template:
+ src: user-patches.sh.j2
+ dest: /etc/docker/compose/mail/docker-data/dms/config/user-patches.sh
+ owner: root
+ group: root
+ mode: 0755
+
+- name: Ensure config user overrides config exist
+ ansible.builtin.template:
+ src: dovecot-ldap.conf.j2
+ dest: /etc/docker/compose/mail/docker-data/dms/config/dovecot-ldap.conf
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Ensure roundcube config volume exist
+ ansible.builtin.file:
+ path: /etc/docker/compose/mail/docker-data/roundcube/config
+ state: directory
+ owner: root
+ group: root
+ mode: 0777
+
+- name: Build roundcube oauth2 config
+ ansible.builtin.template:
+ src: oauth2.inc.php.j2
+ dest: /etc/docker/compose/mail/docker-data/roundcube/config/oauth2.inc.php
+ owner: root
+ group: root
+ mode: 0777
+
+- name: Build roundcube sieve plugin config
+ ansible.builtin.template:
+ src: sieve.inc.php.j2
+ dest: /etc/docker/compose/mail/docker-data/roundcube/config/sieve.inc.php
+ owner: root
+ group: root
+ mode: 0777
+
+- name: Build mail docker-compose.yml.j2
+ ansible.builtin.template:
+ src: docker-compose.yml.j2
+ dest: /etc/docker/compose/mail/docker-compose.yml
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Daemon-reload and enable mail
+ ansible.builtin.systemd_service:
+ state: restarted
+ enabled: true
+ name: docker-compose@mail
diff --git a/playbooks/roles/mail/templates/docker-compose.yml.j2 b/playbooks/roles/mail/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..f122185
--- /dev/null
+++ b/playbooks/roles/mail/templates/docker-compose.yml.j2
@@ -0,0 +1,71 @@
+version: '3'
+
+services:
+ roundcube:
+ image: roundcube/roundcubemail:latest
+ restart: always
+ volumes:
+ - ./docker-data/roundcube/www:/var/www/html
+ - ./docker-data/roundcube/db/sqlite:/var/roundcube/db
+ - ./docker-data/roundcube/config:/var/roundcube/config
+ ports:
+ - 127.0.0.1:9002:80
+ environment:
+ - ROUNDCUBEMAIL_DB_TYPE=sqlite
+ - ROUNDCUBEMAIL_SKIN=elastic
+ - ROUNDCUBEMAIL_PLUGINS={{ roundcube_plugins }}
+ - ROUNDCUBEMAIL_DEFAULT_HOST={{ roundcube_default_host }}
+ - ROUNDCUBEMAIL_DEFAULT_PORT={{ roundcube_default_port }}
+ - ROUNDCUBEMAIL_SMTP_SERVER={{ roundcube_smtp_host }}
+ - ROUNDCUBEMAIL_SMTP_PORT={{ roundcube_smtp_port }}
+
+ mailserver:
+ image: ghcr.io/docker-mailserver/docker-mailserver:latest
+ hostname: {{ mail_domain }}
+ restart: always
+ ports:
+ - 0.0.0.0:25:25
+ - 0.0.0.0:465:465
+ - 0.0.0.0:587:587
+ - 0.0.0.0:993:993
+ - 0.0.0.0:4190:4190
+ volumes:
+ - ./docker-data/dms/mail-data/:/var/mail/
+ - ./docker-data/dms/mail-state/:/var/mail-state/
+ - ./docker-data/dms/mail-logs/:/var/log/mail/
+ - ./docker-data/dms/config/:/tmp/docker-mailserver/
+ - ./docker-data/dms/config/dovecot-ldap.conf:/etc/dovecot/dovecot-ldap.conf.ext
+ - /etc/letsencrypt:/etc/letsencrypt:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - SSL_TYPE=letsencrypt
+ - ENABLE_CLAMAV=0
+ - ENABLE_AMAVIS=1
+ - ENABLE_FAIL2BAN=1
+ - ENABLE_SASLAUTHD=1
+ - ENABLE_MANAGESIEVE=1
+ - ENABLE_POSTGREY=0
+
+ - SPOOF_PROTECTION=1
+ - ACCOUNT_PROVISIONER=LDAP
+ - LDAP_SERVER_HOST={{ ldap_server_host }}
+ - LDAP_SEARCH_BASE={{ ldap_search_base }}
+ - LDAP_BIND_DN={{ ldap_bind_dn }}
+ - LDAP_BIND_PW={{ email_ldap_api_token }}
+
+ - LDAP_QUERY_FILTER_USER={{ ldap_query_filter_user }}
+ - LDAP_QUERY_FILTER_GROUP={{ ldap_query_filter_group }}
+ - LDAP_QUERY_FILTER_ALIAS={{ ldap_query_filter_alias }}
+ - LDAP_QUERY_FILTER_DOMAIN={{ ldap_query_filter_domain }}
+ - LDAP_QUERY_FILTER_SENDERS={{ ldap_query_filter_senders }}
+
+ - POSTMASTER_ADDRESS={{ postmaster_email }}
+
+ - ENABLE_SASLAUTHD=1
+ - SASLAUTHD_MECHANISMS=ldap
+ - SASLAUTHD_LDAP_FILTER={{ sasl_ldap_filter }}
+
+ - ENABLE_OAUTH2=1
+ - OAUTH2_INTROSPECTION_URL={{ roundcube_oauth2_user_uri }}
+ extra_hosts:
+ - {{ ldap_server }}:{{ ldap_intranet }}
diff --git a/playbooks/roles/mail/templates/dovecot-ldap.conf.j2 b/playbooks/roles/mail/templates/dovecot-ldap.conf.j2
new file mode 100644
index 0000000..92bfcbd
--- /dev/null
+++ b/playbooks/roles/mail/templates/dovecot-ldap.conf.j2
@@ -0,0 +1,10 @@
+base = {{ ldap_search_base }}
+uris = {{ ldap_server_host }}
+tls = no
+ldap_version = 3
+default_pass_scheme = SSHA
+
+auth_bind = yes
+auth_bind_userdn = {{ dovecot_auth_bind_userdn }}
+user_filter = {{ dovecot_user_filter }}
+user_attrs = {{ dovecot_user_attrs }}
diff --git a/playbooks/roles/mail/templates/oauth2.inc.php.j2 b/playbooks/roles/mail/templates/oauth2.inc.php.j2
new file mode 100644
index 0000000..919e162
--- /dev/null
+++ b/playbooks/roles/mail/templates/oauth2.inc.php.j2
@@ -0,0 +1,19 @@
+<?php
+
+$config['oauth_provider'] = 'generic';
+$config['oauth_provider_name'] = 'Misty Mountains Therapy SSO';
+$config['oauth_client_id'] = '{{ roundcube_oauth2_client_id }}';
+$config['oauth_client_secret'] = '{{ roundcube_oauth2_client_basic_secret }}';
+$config['oauth_auth_uri'] = '{{ roundcube_oauth2_auth_uri }}';
+$config['oauth_token_uri'] = '{{ roundcube_oauth2_token_uri }}';
+$config['oauth_identity_uri'] = '{{ roundcube_oauth2_user_uri }}';
+
+$config['oauth_verify_peer'] = true;
+
+$config['oauth_scope'] = 'email openid profile';
+$config['oauth_identity_fields'] = ['email'];
+
+$config['oauth_login_redirect'] = false;
+
+$config['force_https'] = true;
+$config['use_https'] = true;
diff --git a/playbooks/roles/mail/templates/sieve.inc.php.j2 b/playbooks/roles/mail/templates/sieve.inc.php.j2
new file mode 100644
index 0000000..e7b08b0
--- /dev/null
+++ b/playbooks/roles/mail/templates/sieve.inc.php.j2
@@ -0,0 +1,4 @@
+<?php
+
+$config['managesieve_host'] = "tls://{{ mail_domain }}";
+$config['managesieve_auth_type'] = "PLAIN";
diff --git a/playbooks/roles/mail/templates/user-patches.sh.j2 b/playbooks/roles/mail/templates/user-patches.sh.j2
new file mode 100644
index 0000000..6845563
--- /dev/null
+++ b/playbooks/roles/mail/templates/user-patches.sh.j2
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+postconf -e 'smtpd_sasl_type = dovecot'
+postconf -e 'smtpd_sasl_path = /dev/shm/sasl-auth.sock'
+postconf -e 'smtpd_sasl_auth_enable = yes'
+postconf -e 'broken_sasl_auth_clients = yes'
+
+echo 'auth_username_format = %Ln' >> /etc/dovecot/conf.d/10-auth.conf
+
+echo 'username_format = %Ln' >> /etc/dovecot/dovecot-oauth2.conf.ext
+
+echo "passdb {
+ driver = ldap
+ args = /etc/dovecot/dovecot-ldap.conf.ext
+}
+
+userdb {
+ driver = static
+ args = uid=5000 gid=5000 home=/var/mail/%u
+}" > /etc/dovecot/conf.d/auth-ldap.conf.ext
diff --git a/playbooks/roles/mmt/tasks/main.yml b/playbooks/roles/mmt/tasks/main.yml
new file mode 100644
index 0000000..0e84170
--- /dev/null
+++ b/playbooks/roles/mmt/tasks/main.yml
@@ -0,0 +1,23 @@
+---
+
+- name: Ensure mmt docker/compose exist
+ ansible.builtin.file:
+ path: /etc/docker/compose/mmt
+ state: directory
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Build mmt docker-compose.yml.j2
+ ansible.builtin.template:
+ src: docker-compose.yml.j2
+ dest: /etc/docker/compose/mmt/docker-compose.yml
+ owner: root
+ group: root
+ mode: 0700
+
+- name: Enable mmt
+ ansible.builtin.systemd_service:
+ state: restarted
+ enabled: true
+ name: docker-compose@mmt
diff --git a/playbooks/roles/mmt/templates/docker-compose.yml.j2 b/playbooks/roles/mmt/templates/docker-compose.yml.j2
new file mode 100644
index 0000000..40036cb
--- /dev/null
+++ b/playbooks/roles/mmt/templates/docker-compose.yml.j2
@@ -0,0 +1,21 @@
+version: "3"
+
+services:
+ mmt:
+ restart: always
+ image: git.simponic.xyz/simponic/mistymountains
+ healthcheck:
+ test: ["CMD", "wget", "--spider", "http://localhost:8080/api/health"]
+ interval: 5s
+ timeout: 10s
+ retries: 5
+ ports:
+ - "127.0.0.1:8821:3000"
+ environment:
+ - HCAPTCHA_SECRET={{ hcaptcha_secret }}
+ - FROM_EMAIL={{ from_email }}
+ - SMTP_SERVER={{ smtp_server }}
+ - SMTP_PASSWORD={{ smtp_password }}
+ - SMTP_USERNAME={{ smtp_username }}
+ - FORM_TO_EMAIL={{ form_to_email }}
+ - SMTP_PORT=465
diff --git a/playbooks/roles/nginx/files/nginx.conf b/playbooks/roles/nginx/files/nginx.conf
new file mode 100644
index 0000000..6ddd8ab
--- /dev/null
+++ b/playbooks/roles/nginx/files/nginx.conf
@@ -0,0 +1,26 @@
+user www-data;
+worker_processes 4;
+pid /run/nginx.pid;
+
+events {
+ worker_connections 768;
+}
+
+http {
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ types_hash_max_size 2048;
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ access_log /var/log/nginx/access.log;
+ error_log /var/log/nginx/error.log;
+
+ gzip on;
+ gzip_disable "msie6";
+
+ include /etc/nginx/conf.d/*.conf;
+ include /etc/nginx/sites-enabled/*;
+}
diff --git a/playbooks/roles/nginx/handlers/main.yml b/playbooks/roles/nginx/handlers/main.yml
new file mode 100644
index 0000000..2ce85ba
--- /dev/null
+++ b/playbooks/roles/nginx/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: Restart nginx
+ ansible.builtin.service:
+ name: nginx
+ state: restarted
+ enabled: true
+
+- name: Restart ufw
+ ansible.builtin.service:
+ name: ufw
+ state: restarted
diff --git a/playbooks/roles/nginx/tasks/main.yml b/playbooks/roles/nginx/tasks/main.yml
new file mode 100644
index 0000000..b4cd6ed
--- /dev/null
+++ b/playbooks/roles/nginx/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+
+- name: Allow http
+ community.general.ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow https
+ community.general.ufw:
+ rule: allow
+ port: '443'
+ proto: tcp
+ notify:
+ - Restart ufw
+
+- name: Install nginx
+ ansible.builtin.apt:
+ name: nginx
+ state: present
+ notify:
+ - Restart nginx
+
+- name: Download dhparams
+ ansible.builtin.get_url:
+ url: "{{ dh_params_src }}"
+ dest: /etc/nginx/dhparams.pem
+ mode: '0755'
+
+- name: Add system nginx config
+ ansible.builtin.copy:
+ src: nginx.conf
+ dest: /etc/nginx/nginx.conf
+ mode: '0755'
+
+- name: Copy nginx sites
+ ansible.builtin.template:
+ src: "{{ item }}"
+ dest: "/etc/nginx/sites-enabled/"
+ mode: '0755'
+ with_fileglob:
+ - "templates/{{ inventory_hostname }}/*.conf"
+ notify:
+ - Restart nginx
diff --git a/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.auth.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.auth.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..9a767f2
--- /dev/null
+++ b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.auth.mistymountainstherapy.com.conf
@@ -0,0 +1,8 @@
+server {
+ listen 80;
+ server_name auth.mistymountainstherapy.com;
+
+ location / {
+ rewrite ^ https://auth.mistymountainstherapy.com$request_uri? permanent;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.mail.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.mail.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..8f6d782
--- /dev/null
+++ b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/http.mail.mistymountainstherapy.com.conf
@@ -0,0 +1,8 @@
+server {
+ listen 80;
+ server_name mail.mistymountainstherapy.com;
+
+ location / {
+ rewrite ^ https://mail.mistymountainstherapy.com$request_uri? permanent;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.auth.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.auth.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..fe39586
--- /dev/null
+++ b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.auth.mistymountainstherapy.com.conf
@@ -0,0 +1,23 @@
+server {
+ server_name auth.mistymountainstherapy.com;
+ listen 443 ssl;
+
+ ssl_dhparam /etc/nginx/dhparams.pem;
+
+ ssl_session_timeout 1d;
+ ssl_session_tickets off;
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+ ssl_prefer_server_ciphers off;
+
+ ssl_certificate /etc/letsencrypt/live/auth.mistymountainstherapy.com/fullchain.pem;
+ ssl_certificate_key /etc/letsencrypt/live/auth.mistymountainstherapy.com/privkey.pem;
+
+ location / {
+ proxy_pass https://localhost:8443;
+ proxy_redirect off;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Host $server_name;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.mail.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.mail.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..2a6a7bc
--- /dev/null
+++ b/playbooks/roles/nginx/templates/mail.int.mistymountainstherapy.com/https.mail.mistymountainstherapy.com.conf
@@ -0,0 +1,21 @@
+server {
+ server_name mail.mistymountainstherapy.com;
+ listen 443 ssl;
+
+ ssl_dhparam /etc/nginx/dhparams.pem;
+
+ ssl_session_timeout 1d;
+ ssl_session_tickets off;
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+ ssl_prefer_server_ciphers off;
+
+ ssl_certificate /etc/letsencrypt/live/mail.mistymountainstherapy.com/fullchain.pem;
+ ssl_certificate_key /etc/letsencrypt/live/mail.mistymountainstherapy.com/privkey.pem;
+
+ location / {
+ proxy_pass http://127.0.0.1:9002;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header Host $host;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..fc6e8f6
--- /dev/null
+++ b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.mistymountainstherapy.com.conf
@@ -0,0 +1,8 @@
+server {
+ listen 80;
+ server_name mistymountainstherapy.com;
+
+ location / {
+ rewrite ^ https://mistymountainstherapy.com$request_uri? permanent;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.www.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.www.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..d165e01
--- /dev/null
+++ b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/http.www.mistymountainstherapy.com.conf
@@ -0,0 +1,8 @@
+server {
+ listen 80;
+ server_name www.mistymountainstherapy.com;
+
+ location / {
+ rewrite ^ https://mistymountainstherapy.com$request_uri? permanent;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..6cdd63f
--- /dev/null
+++ b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.mistymountainstherapy.com.conf
@@ -0,0 +1,21 @@
+server {
+ server_name mistymountainstherapy.com;
+ listen 443 ssl;
+
+ ssl_dhparam /etc/nginx/dhparams.pem;
+
+ ssl_session_timeout 1d;
+ ssl_session_tickets off;
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+ ssl_prefer_server_ciphers off;
+
+ ssl_certificate /etc/letsencrypt/live/mistymountainstherapy.com/fullchain.pem;
+ ssl_certificate_key /etc/letsencrypt/live/mistymountainstherapy.com/privkey.pem;
+
+ location / {
+ proxy_pass http://127.0.0.1:8821;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header Host $host;
+ }
+}
diff --git a/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.www.mistymountainstherapy.com.conf b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.www.mistymountainstherapy.com.conf
new file mode 100644
index 0000000..c6ae568
--- /dev/null
+++ b/playbooks/roles/nginx/templates/www.int.mistymountainstherapy.com/https.www.mistymountainstherapy.com.conf
@@ -0,0 +1,19 @@
+server {
+ server_name www.mistymountainstherapy.com;
+ listen 443 ssl;
+
+ ssl_dhparam /etc/nginx/dhparams.pem;
+
+ ssl_session_timeout 1d;
+ ssl_session_tickets off;
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
+ ssl_prefer_server_ciphers off;
+
+ ssl_certificate /etc/letsencrypt/live/www.mistymountainstherapy.com/fullchain.pem;
+ ssl_certificate_key /etc/letsencrypt/live/www.mistymountainstherapy.com/privkey.pem;
+
+ location / {
+ rewrite ^ https://mistymountainstherapy.com$request_uri? permanent;
+ }
+}
diff --git a/playbooks/roles/wireguard-endpoint/files/.gitignore b/playbooks/roles/wireguard-endpoint/files/.gitignore
new file mode 100644
index 0000000..5571ff7
--- /dev/null
+++ b/playbooks/roles/wireguard-endpoint/files/.gitignore
@@ -0,0 +1 @@
+wireguard.cfg
diff --git a/playbooks/roles/wireguard-endpoint/tasks/main.yml b/playbooks/roles/wireguard-endpoint/tasks/main.yml
new file mode 100644
index 0000000..ed11411
--- /dev/null
+++ b/playbooks/roles/wireguard-endpoint/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+
+- name: Install wireguard
+ ansible.builtin.apt:
+ name:
+ - wireguard
+ state: latest
+
+- name: Copy config
+ ansible.builtin.copy:
+ src: wireguard.cfg
+ dest: /etc/wireguard/simponic.conf
+ owner: root
+ group: root
+ mode: 0600
+
+- name: Enable and persist ip forwarding
+ ansible.builtin.sysctl:
+ name: net.ipv4.ip_forward
+ value: "1"
+ state: present
+ sysctl_set: true
+ reload: true
+
+- name: Allow wireguard endpoint ufw
+ ansible.builtin.ufw:
+ rule: allow
+ port: '51820'
+ proto: 'udp'
+
+- name: Start wireguard and enable on boot
+ ansible.builtin.systemd:
+ name: wg-quick@simponic
+ enabled: true
+ state: started
+
+- name: Hotreload wireguard
+ ansible.builtin.shell: >
+ bash -c
+ "wg syncconf mmtmesh <(wg-quick strip mmtmesh)"
diff --git a/playbooks/roles/wireguard-mesh/tasks/main.yml b/playbooks/roles/wireguard-mesh/tasks/main.yml
new file mode 100644
index 0000000..9f9419f
--- /dev/null
+++ b/playbooks/roles/wireguard-mesh/tasks/main.yml
@@ -0,0 +1,80 @@
+---
+
+- name: Install wireguard
+ ansible.builtin.apt:
+ name:
+ - wireguard
+ - ufw
+ state: present
+
+- name: Get node ips from dns records
+ ansible.builtin.shell: "dig +short {{ item }} | tail -n1"
+ register: wireguard_node_ip
+ with_items: "{{ groups['wireguard-mesh'] }}"
+
+- name: Massage node ips
+ ansible.builtin.set_fact: >
+ wireguard_node_ips={{ wireguard_node_ips|default({})
+ | combine( {item.item: item.stdout} ) }}
+ with_items: "{{ wireguard_node_ip.results }}"
+
+- name: Allow wireguard endpoint ufw
+ ansible.builtin.ufw:
+ rule: allow
+ port: "{{ wireguard_listen_port }}"
+ proto: 'udp'
+
+- name: Generate Wireguard keypair
+ ansible.builtin.shell: >
+ wg genkey | tee /etc/wireguard/privatekey
+ | wg pubkey | tee /etc/wireguard/publickey
+ args:
+ creates: /etc/wireguard/privatekey
+
+- name: Register private key
+ ansible.builtin.shell: cat /etc/wireguard/privatekey
+ register: wireguard_private_key
+ changed_when: false
+
+- name: Register public key
+ ansible.builtin.shell: cat /etc/wireguard/publickey
+ register: wireguard_public_key
+ changed_when: false
+
+- name: Generate Preshared keyskeypair
+ ansible.builtin.shell: "wg genpsk > /etc/wireguard/psk-{{ item }}"
+ args:
+ creates: "/etc/wireguard/psk-{{ item }}"
+ when: inventory_hostname < item
+ with_items: "{{ groups['wireguard-mesh'] }}"
+
+- name: Register preshared key
+ ansible.builtin.shell: "cat /etc/wireguard/psk-{{ item }}"
+ register: wireguard_preshared_key
+ changed_when: false
+ when: inventory_hostname < item
+ with_items: "{{ groups['wireguard-mesh'] }}"
+
+- name: Massage preshared keys
+ ansible.builtin.set_fact: >
+ wireguard_preshared_keys={{ wireguard_preshared_keys|default({})
+ | combine( {item.item: item.stdout} ) }}
+ when: item.skipped is not defined
+ with_items: "{{ wireguard_preshared_key.results }}"
+
+- name: Build config
+ ansible.builtin.template:
+ src: mmtmesh.conf.j2
+ dest: /etc/wireguard/mmtmesh.conf
+ owner: root
+ mode: 0640
+
+- name: Enable wireguard
+ ansible.builtin.systemd:
+ name: wg-quick@mmtmesh
+ enabled: true
+
+- name: Hotreload wireguard
+ ansible.builtin.shell: >
+ bash -c
+ "wg syncconf mmtmesh <(wg-quick strip mmtmesh)"
diff --git a/playbooks/roles/wireguard-mesh/templates/mmtmesh.conf.j2 b/playbooks/roles/wireguard-mesh/templates/mmtmesh.conf.j2
new file mode 100644
index 0000000..aa15d23
--- /dev/null
+++ b/playbooks/roles/wireguard-mesh/templates/mmtmesh.conf.j2
@@ -0,0 +1,17 @@
+[Interface]
+Address={{ wireguard_node_ips[inventory_hostname] }}/32
+SaveConfig=true
+ListenPort={{ wireguard_listen_port }}
+PrivateKey={{ wireguard_private_key.stdout }}
+
+{% for peer in groups['wireguard-mesh'] %}
+{% if peer != inventory_hostname %}
+
+[Peer]
+PublicKey={{ hostvars[peer].wireguard_public_key.stdout }}
+PresharedKey={{ wireguard_preshared_keys[peer] if inventory_hostname < peer else hostvars[peer].wireguard_preshared_keys[inventory_hostname] }}
+AllowedIPs={{ wireguard_node_ips[peer] }}/32
+Endpoint={{ peer | replace('.int.', '.pub.') }}:{{ wireguard_listen_port }}
+
+{% endif %}
+{% endfor %}
diff --git a/requirements.yml b/requirements.yml
new file mode 100644
index 0000000..f806655
--- /dev/null
+++ b/requirements.yml
@@ -0,0 +1,4 @@
+---
+collections:
+ - community.general
+ - community.docker
diff --git a/secrets.txt b/secrets.txt
new file mode 100644
index 0000000..d0e021d
--- /dev/null
+++ b/secrets.txt
@@ -0,0 +1,12 @@
+cloudflare_api_token
+cloudflare_zone_id
+certbot_email
+email_ldap_api_token
+roundcube_oauth2_client_id
+roundcube_oauth2_client_basic_secret
+mmt_hcaptcha_secret
+mmt_from_email
+mmt_smtp_server
+mmt_smtp_password
+mmt_smtp_username
+mmt_form_to_email