initial commit

This commit is contained in:
Nikos Papadakis 2025-02-18 23:33:30 +02:00 committed by nikos
commit e563d395df
30 changed files with 1563 additions and 0 deletions

1
.gitattributes vendored Normal file
View file

@ -0,0 +1 @@
files/attic.tar.xz filter=lfs diff=lfs merge=lfs -text

23
caddy_services.yml Normal file
View file

@ -0,0 +1,23 @@
---
- name: Caddy reverse proxy
hosts: ulna
tasks:
- name: Ensure Caddy is installed
ansible.builtin.apt:
name: caddy
state: present
become: true
- name: Reverse proxies for ulna
when: inventory_hostname in groups['ulna']
ansible.builtin.template:
src: templates/Caddyfile.ulna.j2
dest: /etc/caddy/Caddyfile
become: true
- name: Restart caddy
ansible.builtin.systemd:
state: restarted
name: caddy
become: true

25
docker_mailserver.yml Normal file
View file

@ -0,0 +1,25 @@
---
- name: docker-mailserver
hosts: ulna
tasks:
- name: Create docker-mailserver directory
ansible.builtin.file:
path: docker-mailserver
state: directory
- name: Copy docker-compose.yml
ansible.builtin.template:
src: templates/docker-mailserver/docker-compose.yml.j2
dest: docker-mailserver/docker-compose.yml
- name: Copy mailserver.env
ansible.builtin.template:
src: templates/docker-mailserver/mailserver.env.j2
dest: docker-mailserver/mailserver.env
- name: Start container
community.docker.docker_compose:
project_src: docker-mailserver
pull: true
become: true

30
group_vars/all/vars.yml Normal file
View file

@ -0,0 +1,30 @@
# docker-mailserver
mail_host: "{{ vault_mail_host }}"
# Gitea
forgejo_host: "{{ vault_forgejo_host }}"
# Etebase
etebase_host: "{{ vault_etebase_host }}"
etebase_home_dir: "/home/etebase"
etebase_socket_file: "/var/run/etebase/etebase.sock"
# Woodpecker-CI
woodpecker_port: 8000
woodpecker_host: "{{ vault_woodpecker_host }}"
woodpecker_agent_secret: "{{ vault_woodpecker_agent_secret }}"
woodpecker_client_id: "{{ vault_woodpecker_client_id }}"
woodpecker_client_secret: "{{ vault_woodpecker_client_secret }}"
# Attic
attic_token: "{{ vault_attic_token }}"
s3_bucket: "prymn-cache"
s3_endpoint: "https://75178f9eca227dea51b3db4db2c15a5a.r2.cloudflarestorage.com"
s3_access_key_id: "{{ vault_s3_access_key_id }}"
s3_secret_access_key: "{{ vault_s3_secret_access_key }}"
# Wireguard
wireguard_network_mask: "24"
# protonmail-bridge-docker
protonmail_bridge_docker_image: "{{ vault_protonmail_bridge_docker_image }}"

61
group_vars/all/vault.yml Normal file
View file

@ -0,0 +1,61 @@
$ANSIBLE_VAULT;1.1;AES256
38383539666264636531373766316330386436363837343337333533653865353230633936646436
3737623161383635383062363331353033386465363732620a646639616531643138323239346131
31323064653338633361663131643834663438383536363262623435366663393939613236326337
3765666461613132360a613263323362313163613732633338343032383466353161633539653166
61343731393931366630366462643536623636333831306234363838666361326538633265353437
34333132643734666466393963383164376634633061666535326233326533643364643230323663
66346330366331336537333231376337353061363962316130373530323137663537353665336138
61393966313361336537623662653439303061623063626663386531636561383133346235336639
34383337363538646132323964613337373537316562373238313161616265653065303734623763
32343662383665373432393431616534366535313465613966666435336138396161313130346239
31616565626630303861336562343436323831353161666231613732623062326462393563353732
34383736653964316539656562613737356663356262383866623837323630613064393363356132
32373733363534653538666436376234373262633035333935323034313730666434646565363230
35626136343238343232346262643432323132353136356432393033363562396338663433383630
61313762323032326236316130393432383564623062306239336331643063373534373938666232
38303631363562396265633937643131373165646134366530353935393364626632393063656230
35613735313464313235323030613733326636373530313831636262346230633230383437663931
34393639613663633538336231666339313464386332333338613731373666393734303130613933
65626662346465353464353062353863336135306363663163623334343962326431653161303038
35373035303063636166326331323963623234396563306637346665353535373962386533323633
31653566353538613965633264623933346530303930653736386332323935336630626232383764
33323864386230626432666234363737626466363330306366346435363839396365346562336531
33623165346634313430366236636563616664303631653962356338326465613866393238303862
37646132393362656666613739343131376361373032663164323433646234653633346134323062
38396239323563353532353035343563663031646633613963306266373233306431373261633838
37613237346566666263356430393664346338343539346262643764613565663763626465306461
38386462336236663831393166383336333031363934646237616462393832303234343937343339
65386232373735303566663062666135643661663064623039386161393334396130376264613132
35626363613465313636623164656432303532333833366563303330653135313762373731323839
33336163386331613930386536343637616332656664613735333139663138393262623439316530
65643664373562666438626138313930623239363739313364356635376433666165393566343635
65326437343236636463393739333835333333643637393462323366363264313435346261653430
37666437346466343466386234393030376335633233386334383032653361306639663730643065
65316335316330613864353136626535333132386530323538326430623165336339393962666661
37373962623037366339386133623032623334643636343638626633373431363035336165653364
63656430353130366238353137623638646632373535393634616332303763306533313135653633
64616361343166303431366536383465373738666664363637386130656139653564393033393465
39633938333237663466306130383835643566383166306161353932306362623663663965326333
61653833643162363838616438326439323030326164643966656239623262306235393964363234
37346637643932616537353830396565656462323766323439386634613363626566326230343839
63626431616139623039663565363761653364353437626337396338326461343561323633666536
62646666623431386266323137346135396266373838336339343436663163613362366637633436
31313837373264383234333961643334623734393437623535386564336561613736383433363462
61366634633061313433363765623762383361626238613132353663393964386137323736316137
32653834303462316665613564646635653531343532333665653962353962613633323761636135
65616162613537376234313731393930313230336536633833626236646635326231316330326431
62623833383239366233376161393236623139353530643733616332313434353235373564356530
65313164643864613265663162623666366436643665383634303239353032376331383565333566
64656564613065306330613965323464313034376564353333343062326632313732313132316337
63373264663938643837653863353463366530653636376363373735343061323764323461343765
61616235656633643436383738643836343234643665333962353337353366346133313636353464
33623734396236623239656432386432396134306530333864623732656133396261623264663837
37653763333235336232613135366562626335356236626433316638376532376639353563306437
63376334316335633131376462643663623665393163613061323635623162356361383863646165
35646164336466613836326661663934643763343634383630613432336330313761356465626165
37326661356165613131323435626466323035326262366535616565666466646362376238306132
39613463333632326635656238373535613131363266643634356539353662393961366166346330
33323535383432643335653739616563383436326261306637333261363066646431626132393165
65376563363739303936393661663636356532313933336261366562393466363335363264623435
32613562663031656231

View file

@ -0,0 +1,2 @@
ansible_become_pass: "{{ ulna_become_pass | default(omit) }}"
wireguard_ip: "10.0.42.1"

49
install_act_runner.yml Normal file
View file

@ -0,0 +1,49 @@
---
- name: Install act runner for gitea
hosts: ulna
tasks:
- name: Download the binary
become: true
ansible.builtin.get_url:
url: https://gitea.com/gitea/act_runner/releases/download/v0.2.0/act_runner-0.2.0-linux-arm64
dest: /usr/local/bin/act_runner
mode: '755'
- name: Create the runner user
become: true
ansible.builtin.user:
name: runner
group: docker
comment: Gitea runner
shell: /bin/bash
password: '!'
home: /home/runner
create_home: true
- name: Register the runner
ansible.builtin.command: act_runner register --no-interactive --instance https://{{ gitea_host }} --token {{ act_runner_token|quote }}
become: true
become_user: runner
args:
chdir: /home/runner
ignore_errors: true
- name: Service file
become: true
ansible.builtin.template:
src: templates/gitea/runner.service.j2
dest: /etc/systemd/system/runner.service
- name: Make sure docker is running
ansible.builtin.systemd:
state: started
name: docker
- name: Re(start) the systemd service
become: true
ansible.builtin.systemd:
state: restarted
name: runner
enabled: true
daemon_reload: true

45
install_attic.yml Normal file
View file

@ -0,0 +1,45 @@
---
- name: Install attic
hosts: ulna
tasks:
- name: Install nix libraries
ansible.builtin.apt:
name: nix-bin
state: absent
become: true
# - name: Unarchive
# ansible.builtin.unarchive:
# src: files/attic.tar.xz
# dest: /usr/local
# become: true
- name: Install service file
ansible.builtin.template:
src: templates/attic/attic.service.j2
dest: /etc/systemd/system/attic.service
become: true
- name: Install config file
ansible.builtin.template:
src: templates/attic/attic-server.toml.j2
dest: /etc/attic-server.toml
become: true
- name: Create share directory
ansible.builtin.file:
path: /usr/local/share/attic
mode: "0700"
owner: root
group: root
state: directory
become: true
- name: Restart service
ansible.builtin.systemd:
state: stopped
name: attic
enabled: false
daemon_reload: true
become: true

29
install_docker.yml Normal file
View file

@ -0,0 +1,29 @@
---
- name: Install docker
hosts: ulna
tasks:
- name: Setup
ansible.builtin.command: dpkg --print-architecture
register: architecture
- name: Add Docker GPG key
become: true
ansible.builtin.shell: wget -O- https://download.docker.com/linux/debian/gpg | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/docker.gpg
- name: Add Docker repository
become: true
ansible.builtin.apt_repository:
repo: "deb [arch=\"{{ architecture.stdout_lines[0] }}\" signed-by=/etc/apt/trusted.gpg.d/docker.gpg] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
state: present
- name: Install docker
become: true
ansible.builtin.apt:
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
state: latest
update_cache: true

105
install_etebase.yml Normal file
View file

@ -0,0 +1,105 @@
---
- name: Install Etebase server
hosts: ulna
tasks:
- name: Python3 and virtualenv
ansible.builtin.apt:
name: python3-virtualenv
state: present
become: true
- name: Create etebase group
ansible.builtin.group:
name: etebase
state: present
become: true
- name: Create etebase user
ansible.builtin.user:
name: etebase
group: etebase
comment: Etebase user
password: '!'
system: true
home: "{{ etebase_home_dir }}"
state: present
become: true
- name: Clone etebase repo
ansible.builtin.git:
repo: "https://github.com/etesync/server.git"
dest: "{{ etebase_home_dir }}/etebase"
single_branch: true
force: true
become: true
become_user: etebase
- name: Install etebase python requirements
ansible.builtin.pip:
requirements: "{{ etebase_home_dir }}/etebase/requirements.txt"
virtualenv: "{{ etebase_home_dir }}/venv"
state: present
become: true
become_user: etebase
- name: Install uvicorn inside venv
ansible.builtin.pip:
name: "uvicorn[standard]"
virtualenv: "{{ etebase_home_dir }}/venv"
state: present
become: true
become_user: etebase
- name: Setup configuration
ansible.builtin.template:
src: "templates/etebase-server.ini.j2"
dest: "{{ etebase_home_dir }}/etebase/etebase-server.ini"
mode: "0640"
group: etebase
owner: etebase
become: true
become_user: etebase
- name: Create directories
ansible.builtin.file:
path: "{{ item.dir }}"
mode: "{{ item.mode }}"
owner: etebase
group: "{{ item.group | default('etebase') }}"
state: directory
loop:
- { dir: "{{ etebase_home_dir }}/media", mode: "0750" }
- { dir: "{{ etebase_home_dir }}/secret", mode: "0750" }
- { dir: "{{ etebase_home_dir }}/static", mode: "0750", group: "www-data" }
- { dir: "/var/run/etebase", mode: "0750", group: "www-data" }
become: true
- name: Run manage.py migrate
ansible.builtin.command:
cmd: "{{ etebase_home_dir }}/venv/bin/python3 ./manage.py migrate"
chdir: "{{ etebase_home_dir }}/etebase"
become: true
become_user: etebase
# FIXME: Stuck?
# - name: Run manage.py collectstatic
# ansible.builtin.command:
# cmd: "{{ etebase_home_dir }}/venv/bin/python3 ./manage.py collectstatic"
# chdir: "{{ etebase_home_dir }}/etebase"
# become: true
# become_user: etebase
- name: Setup systemd service
ansible.builtin.template:
src: "templates/etebase.service.j2"
dest: "/etc/systemd/system/etebase.service"
become: true
- name: (Re)start the systemd service
ansible.builtin.systemd:
state: restarted
name: etebase
enabled: true
daemon_reload: true
become: true

72
install_forgejo.yml Normal file
View file

@ -0,0 +1,72 @@
---
- name: Install and deploy forgejo
hosts: ulna
tasks:
- name: Ensure git is installed
ansible.builtin.apt:
name: git
state: present
become: true
- name: Create git group
ansible.builtin.group:
name: git
state: present
become: true
- name: Create git user
ansible.builtin.user:
name: git
comment: Git user
group: git
shell: /bin/bash
password: '!'
home: /home/git
create_home: true
become: true
- name: Create required directories
ansible.builtin.file:
path: "{{ item.dir }}"
mode: "{{ item.mode }}"
owner: "{{ item.owner }}"
group: "{{ item.group }}"
state: directory
loop:
- { dir: "/var/lib/gitea/custom", owner: "git", group: "git", mode: "0750" }
- { dir: "/var/lib/gitea/data", owner: "git", group: "git", mode: "0750" }
- { dir: "/var/lib/gitea/log", owner: "git", group: "git", mode: "0750" }
- { dir: "/etc/gitea", owner: "root", group: "git", mode: "0770" }
become: true
- name: Download gitea
ansible.builtin.get_url:
url: https://codeberg.org/forgejo/forgejo/releases/download/v9.0.2/forgejo-9.0.2-linux-arm64
checksum: sha256:4394e17de2b792f63b4b8d465c342e1023a916def2d33be7e9bf7d0ab32d11c2
dest: /usr/local/bin/forgejo
mode: '775'
owner: git
group: git
become: true
- name: Service file
ansible.builtin.template:
src: templates/forgejo/forgejo.service.j2
dest: /etc/systemd/system/forgejo.service
become: true
- name: robots.txt
ansible.builtin.template:
src: templates/forgejo/robots.txt.j2
dest: /var/lib/gitea/custom/robots.txt
owner: git
become: true
- name: (Re)start the systemd service
ansible.builtin.systemd:
state: restarted
name: forgejo
enabled: true
daemon_reload: true
become: true

48
install_syncthing.yml Normal file
View file

@ -0,0 +1,48 @@
---
- name: Install and deploy syncthing
hosts: ulna
tasks:
- name: Add PGP key
ansible.builtin.get_url:
url: https://syncthing.net/release-key.gpg
dest: /usr/share/keyrings/syncthing-archive-keyring.gpg
become: true
- name: Add APT repository
ansible.builtin.apt_repository:
repo: deb [signed-by=/usr/share/keyrings/syncthing-archive-keyring.gpg] https://apt.syncthing.net/ syncthing stable
state: present
become: true
- name: Install syncthing
ansible.builtin.apt:
name: syncthing
state: present
update_cache: true
become: true
- name: Create syncthing group
ansible.builtin.group:
name: syncthing
state: present
become: true
- name: Create syncthing user
ansible.builtin.user:
name: syncthing
group: syncthing
comment: Syncthing user
shell: /bin/bash
password: '!'
home: /home/syncthing
create_home: true
become: true
- name: (Re)start the syncthing service
ansible.builtin.systemd:
state: restarted
name: syncthing@syncthing
enabled: true
become: true

24
install_woodpecker.yml Normal file
View file

@ -0,0 +1,24 @@
---
- name: Install Woodpecker CI
hosts: ulna
tasks:
- name: ensures directory exists
become: true
ansible.builtin.file:
path: /usr/local/etc/woodpecker
state: directory
- name: Copy docker-compose.yml
become: true
ansible.builtin.template:
src: woodpecker.yml
dest: /usr/local/etc/woodpecker/docker-compose.yml
mode: "0600"
- name: Start woodpecker with docker-compose
community.docker.docker_compose_v2:
project_src: /usr/local/etc/woodpecker
pull: always
register: output
become: true

10
inventory.yml Normal file
View file

@ -0,0 +1,10 @@
$ANSIBLE_VAULT;1.1;AES256
63383865353331316465646437666363343664626434643739393438303730393631393432343637
3733346231383266656534303832633034633732626561390a613162336331666664343736653738
31623637366437303533373737353131333132393734376261313739613263343936633364616333
3666623632636534390a383965643035633062396265623534333733313864326566346530313532
63313566316465376561623938326139306237633131666232656634623361306333373761323266
38376437613930303163393364656435396538616334666334353231356131313537393164663838
30653966373633343231656235336639626631623337303330616232653430383132343634626239
37366231366632353162393233646233343231316561363136633062353636333363663639623333
3939

15
proton-bridge.yml Normal file
View file

@ -0,0 +1,15 @@
---
- hosts: ulna
tasks:
- name: Copy service file
become: true
ansible.builtin.template:
src: protonmail-bridge-docker.service.j2
dest: /etc/systemd/system/protonmail-bridge-docker.service
- name: Enable service
become: true
ansible.builtin.service:
state: started
daemon_reload: true
name: protonmail-bridge-docker

View file

@ -0,0 +1,20 @@
{{ mail_host }} {
respond ""
}
{{ forgejo_host }} {
reverse_proxy localhost:3000
}
{{ woodpecker_host }} {
reverse_proxy localhost:8000
}
{{ etebase_host }} {
route {
file_server /static/* {
root {{ etebase_home_dir }}
}
reverse_proxy unix/{{ etebase_socket_file }}
}
}

View file

@ -0,0 +1,136 @@
# Socket address to listen on
listen = "[::]:8080"
# Allowed `Host` headers
#
# This _must_ be configured for production use. If unconfigured or the
# list is empty, all `Host` headers are allowed.
allowed-hosts = []
# The canonical API endpoint of this server
#
# This is the endpoint exposed to clients in `cache-config` responses.
#
# This _must_ be configured for production use. If not configured, the
# API endpoint is synthesized from the client's `Host` header which may
# be insecure.
#
# The API endpoint _must_ end with a slash (e.g., `https://domain.tld/attic/`
# not `https://domain.tld/attic`).
#api-endpoint = "https://your.domain.tld/"
# Whether to soft-delete caches
#
# If this is enabled, caches are soft-deleted instead of actually
# removed from the database. Note that soft-deleted caches cannot
# have their names reused as long as the original database records
# are there.
#soft-delete-caches = false
# Whether to require fully uploading a NAR if it exists in the global cache.
#
# If set to false, simply knowing the NAR hash is enough for
# an uploader to gain access to an existing NAR in the global
# cache.
#require-proof-of-possession = true
# JWT signing token
#
# Set this to the Base64 encoding of some random data.
# You can also set it via the `ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64` environment
# variable.
token-hs256-secret-base64 = "{{ attic_token }}"
# Database connection
[database]
# Connection URL
#
# For production use it's recommended to use PostgreSQL.
url = "sqlite:///usr/local/share/attic/server.db"
# Whether to enable sending on periodic heartbeat queries
#
# If enabled, a heartbeat query will be sent every minute
#heartbeat = false
# File storage configuration
[storage]
# Storage type
#
# Can be "local" or "s3".
type = "local"
## Local storage
# The directory to store all files under
path = "/usr/local/share/attic/storage"
# ## S3 Storage (set type to "s3" and uncomment below)
# The AWS region
# region = "auto"
# The name of the bucket
# bucket = "{{ s3_bucket }}"
# Custom S3 endpoint
#
# Set this if you are using an S3-compatible object storage (e.g., Minio).
# endpoint = "{{ s3_endpoint }}"
# Credentials
#
# If unset, the credentials are read from the `AWS_ACCESS_KEY_ID` and
# `AWS_SECRET_ACCESS_KEY` environment variables.
# [storage.credentials]
# access_key_id = "{{ s3_access_key_id }}"
# secret_access_key = "{{ s3_secret_access_key }}"
# Data chunking
#
# Warning: If you change any of the values here, it will be
# difficult to reuse existing chunks for newly-uploaded NARs
# since the cutpoints will be different. As a result, the
# deduplication ratio will suffer for a while after the change.
[chunking]
# The minimum NAR size to trigger chunking
#
# If 0, chunking is disabled entirely for newly-uploaded NARs.
# If 1, all NARs are chunked.
nar-size-threshold = 65536 # chunk files that are 64 KiB or larger
# The preferred minimum size of a chunk, in bytes
min-size = 16384 # 16 KiB
# The preferred average size of a chunk, in bytes
avg-size = 65536 # 64 KiB
# The preferred maximum size of a chunk, in bytes
max-size = 262144 # 256 KiB
# Compression
[compression]
# Compression type
#
# Can be "none", "brotli", "zstd", or "xz"
type = "zstd"
# Compression level
#level = 8
# Garbage collection
[garbage-collection]
# The frequency to run garbage collection at
#
# By default it's 12 hours. You can use natural language
# to specify the interval, like "1 day".
#
# If zero, automatic garbage collection is disabled, but
# it can still be run manually with `atticd --mode garbage-collector-once`.
interval = "0"
# Default retention period
#
# Zero (default) means time-based garbage-collection is
# disabled by default. You can enable it on a per-cache basis.
#default-retention-period = "6 months"

View file

@ -0,0 +1,10 @@
[Unit]
Description=Attic Daemon
[Service]
Type=simple
ExecStart=/usr/local/bin/atticd -f /etc/attic-server.toml
Restart=always
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,28 @@
services:
mailserver:
image: ghcr.io/docker-mailserver/docker-mailserver:latest
container_name: mailserver
hostname: {{ mail_host }}
env_file: mailserver.env
ports:
- "25:25" # SMTP (explicit TLS => STARTTLS)
- "143:143" # IMAP4 (explicit TLS => STARTTLS)
- "465:465" # ESMTP (implicit TLS)
- "587:587" # ESMTP (explicit TLS => STARTTLS)
- "993:993" # IMAP4 (implicit TLS)
volumes:
- ./docker-data/dms/mail-data/:/var/mail/
- ./docker-data/dms/mail-state/:/var/mail-state/
- ./docker-data/dms/mail-logs/:/var/log/mail/
- ./docker-data/dms/config/:/tmp/docker-mailserver/
- /etc/localtime:/etc/localtime:ro
- /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{{ mail_host }}/{{ mail_host }}.crt:/etc/letsencrypt/live/{{ mail_host }}/fullchain.pem
- /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{{ mail_host }}/{{ mail_host }}.key:/etc/letsencrypt/live/{{ mail_host }}/privkey.pem
restart: always
stop_grace_period: 1m
# cap_add:
# - NET_ADMIN
healthcheck:
test: "ss --listening --tcp | grep -P 'LISTEN.+:smtp' || exit 1"
timeout: 3s
retries: 0

View file

@ -0,0 +1,609 @@
# -----------------------------------------------
# --- Mailserver Environment Variables ----------
# -----------------------------------------------
# DOCUMENTATION FOR THESE VARIABLES IS FOUND UNDER
# https://docker-mailserver.github.io/docker-mailserver/latest/config/environment/
# -----------------------------------------------
# --- General Section ---------------------------
# -----------------------------------------------
# empty => uses the `hostname` command to get the mail server's canonical hostname
# => Specify a fully-qualified domainname to serve mail for. This is used for many of the config features so if you can't set your hostname (e.g. you're in a container platform that doesn't let you) specify it in this environment variable.
OVERRIDE_HOSTNAME=
# REMOVED in version v11.0.0! Use LOG_LEVEL instead.
DMS_DEBUG=0
# Set the log level for DMS.
# This is mostly relevant for container startup scripts and change detection event feedback.
#
# Valid values (in order of increasing verbosity) are: `error`, `warn`, `info`, `debug` and `trace`.
# The default log level is `info`.
LOG_LEVEL=info
# critical => Only show critical messages
# error => Only show erroneous output
# **warn** => Show warnings
# info => Normal informational output
# debug => Also show debug messages
SUPERVISOR_LOGLEVEL=
# 0 => mail state in default directories
# 1 => consolidate all states into a single directory (`/var/mail-state`) to allow persistence using docker volumes
ONE_DIR=1
# **empty** => use FILE
# LDAP => use LDAP authentication
# OIDC => use OIDC authentication (not yet implemented)
# FILE => use local files (this is used as the default)
ACCOUNT_PROVISIONER=
# empty => postmaster@domain.com
# => Specify the postmaster address
POSTMASTER_ADDRESS=postmaster@papadakis.xyz
# Check for updates on container start and then once a day
# If an update is available, a mail is sent to POSTMASTER_ADDRESS
# 0 => Update check disabled
# 1 => Update check enabled
ENABLE_UPDATE_CHECK=1
# Customize the update check interval.
# Number + Suffix. Suffix must be 's' for seconds, 'm' for minutes, 'h' for hours or 'd' for days.
UPDATE_CHECK_INTERVAL=1d
# Set different options for mynetworks option (can be overwrite in postfix-main.cf)
# **WARNING**: Adding the docker network's gateway to the list of trusted hosts, e.g. using the `network` or
# `connected-networks` option, can create an open relay
# https://github.com/docker-mailserver/docker-mailserver/issues/1405#issuecomment-590106498
# The same can happen for rootless podman. To prevent this, set the value to "none" or configure slirp4netns
# https://github.com/docker-mailserver/docker-mailserver/issues/2377
#
# none => Explicitly force authentication
# container => Container IP address only
# host => Add docker container network (ipv4 only)
# network => Add all docker container networks (ipv4 only)
# connected-networks => Add all connected docker networks (ipv4 only)
PERMIT_DOCKER=none
# Set the timezone. If this variable is unset, the container runtime will try to detect the time using
# `/etc/localtime`, which you can alternatively mount into the container. The value of this variable
# must follow the pattern `AREA/ZONE`, i.e. of you want to use Germany's time zone, use `Europe/Berlin`.
# You can lookup all available timezones here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=
# In case you network interface differs from 'eth0', e.g. when you are using HostNetworking in Kubernetes,
# you can set NETWORK_INTERFACE to whatever interface you want. This interface will then be used.
# - **empty** => eth0
NETWORK_INTERFACE=
# empty => modern
# modern => Enables TLSv1.2 and modern ciphers only. (default)
# intermediate => Enables TLSv1, TLSv1.1 and TLSv1.2 and broad compatibility ciphers.
TLS_LEVEL=
# Configures the handling of creating mails with forged sender addresses.
#
# **0** => (not recommended) Mail address spoofing allowed. Any logged in user may create email messages with a forged sender address (see also https://en.wikipedia.org/wiki/Email_spoofing).
# 1 => Mail spoofing denied. Each user may only send with his own or his alias addresses. Addresses with extension delimiters(http://www.postfix.org/postconf.5.html#recipient_delimiter) are not able to send messages.
SPOOF_PROTECTION=1
# Enables the Sender Rewriting Scheme. SRS is needed if your mail server acts as forwarder. See [postsrsd](https://github.com/roehling/postsrsd/blob/master/README.md#sender-rewriting-scheme-crash-course) for further explanation.
# - **0** => Disabled
# - 1 => Enabled
ENABLE_SRS=0
# Enables the OpenDKIM service.
# **1** => Enabled
# 0 => Disabled
ENABLE_OPENDKIM=0
# Enables the OpenDMARC service.
# **1** => Enabled
# 0 => Disabled
ENABLE_OPENDMARC=1
# Enabled `policyd-spf` in Postfix's configuration. You will likely want to set this
# to `0` in case you're using Rspamd (`ENABLE_RSPAMD=1`).
#
# - 0 => Disabled
# - **1** => Enabled
ENABLE_POLICYD_SPF=1
# 1 => Enables POP3 service
# empty => disables POP3
ENABLE_POP3=
# Enables ClamAV, and anti-virus scanner.
# 1 => Enabled
# **0** => Disabled
ENABLE_CLAMAV=0
# Enables Rspamd
# **0** => Disabled
# 1 => Enabled
ENABLE_RSPAMD=1
# When `ENABLE_RSPAMD=1`, an internal Redis instance is enabled implicitly.
# This setting provides an opt-out to allow using an external instance instead.
# 0 => Disabled
# 1 => Enabled
ENABLE_RSPAMD_REDIS=1
# When enabled,
#
# 1. the "[autolearning][rspamd-autolearn]" feature is turned on;
# 2. the Bayes classifier will be trained when moving mails from or to the Junk folder (with the help of Sieve scripts).
#
# **0** => disabled
# 1 => enabled
RSPAMD_LEARN=1
# Controls whether the Rspamd Greylisting module is enabled.
# This module can further assist in avoiding spam emails by greylisting
# e-mails with a certain spam score.
#
# **0** => disabled
# 1 => enabled
RSPAMD_GREYLISTING=0
# Can be used to enable or disable the Hfilter group module.
#
# - 0 => Disabled
# - **1** => Enabled
RSPAMD_HFILTER=1
# Can be used to control the score when the HFILTER_HOSTNAME_UNKNOWN symbol applies. A higher score is more punishing. Setting it to 15 is equivalent to rejecting the email when the check fails.
#
# Default: 6
RSPAMD_HFILTER_HOSTNAME_UNKNOWN_SCORE=6
# Amavis content filter (used for ClamAV & SpamAssassin)
# 0 => Disabled
# 1 => Enabled
ENABLE_AMAVIS=1
# -1/-2/-3 => Only show errors
# **0** => Show warnings
# 1/2 => Show default informational output
# 3/4/5 => log debug information (very verbose)
AMAVIS_LOGLEVEL=0
# This enables DNS block lists in Postscreen.
# Note: Emails will be rejected, if they don't pass the block list checks!
# **0** => DNS block lists are disabled
# 1 => DNS block lists are enabled
ENABLE_DNSBL=0
# If you enable Fail2Ban, don't forget to add the following lines to your `compose.yaml`:
# cap_add:
# - NET_ADMIN
# Otherwise, `nftables` won't be able to ban IPs.
ENABLE_FAIL2BAN=0
# Fail2Ban blocktype
# drop => drop packet (send NO reply)
# reject => reject packet (send ICMP unreachable)
FAIL2BAN_BLOCKTYPE=drop
# 1 => Enables Managesieve on port 4190
# empty => disables Managesieve
ENABLE_MANAGESIEVE=
# **enforce** => Allow other tests to complete. Reject attempts to deliver mail with a 550 SMTP reply, and log the helo/sender/recipient information. Repeat this test the next time the client connects.
# drop => Drop the connection immediately with a 521 SMTP reply. Repeat this test the next time the client connects.
# ignore => Ignore the failure of this test. Allow other tests to complete. Repeat this test the next time the client connects. This option is useful for testing and collecting statistics without blocking mail.
POSTSCREEN_ACTION=enforce
# empty => all daemons start
# 1 => only launch postfix smtp
SMTP_ONLY=
# Please read [the SSL page in the documentation](https://docker-mailserver.github.io/docker-mailserver/latest/config/security/ssl) for more information.
#
# empty => SSL disabled
# letsencrypt => Enables Let's Encrypt certificates
# custom => Enables custom certificates
# manual => Let's you manually specify locations of your SSL certificates for non-standard cases
# self-signed => Enables self-signed certificates
SSL_TYPE=letsencrypt
# These are only supported with `SSL_TYPE=manual`.
# Provide the path to your cert and key files that you've mounted access to within the container.
SSL_CERT_PATH=
SSL_KEY_PATH=
# Optional: A 2nd certificate can be supported as fallback (dual cert support), eg ECDSA with an RSA fallback.
# Useful for additional compatibility with older MTA and MUA (eg pre-2015).
SSL_ALT_CERT_PATH=
SSL_ALT_KEY_PATH=
# Set how many days a virusmail will stay on the server before being deleted
# empty => 7 days
VIRUSMAILS_DELETE_DELAY=
# Configure Postfix `virtual_transport` to deliver mail to a different LMTP client (default is a dovecot socket).
# Provide any valid URI. Examples:
#
# empty => `lmtp:unix:/var/run/dovecot/lmtp` (default, configured in Postfix main.cf)
# `lmtp:unix:private/dovecot-lmtp` (use socket)
# `lmtps:inet:<host>:<port>` (secure lmtp with starttls)
# `lmtp:<kopano-host>:2003` (use kopano as mailstore)
POSTFIX_DAGENT=
# Set the mailbox size limit for all users. If set to zero, the size will be unlimited (default).
#
# empty => 0
POSTFIX_MAILBOX_SIZE_LIMIT=
# See https://docker-mailserver.github.io/docker-mailserver/edge/config/user-management/accounts/#notes
# 0 => Dovecot quota is disabled
# 1 => Dovecot quota is enabled
ENABLE_QUOTAS=1
# Set the message size limit for all users. If set to zero, the size will be unlimited (not recommended!)
#
# empty => 10240000 (~10 MB)
POSTFIX_MESSAGE_SIZE_LIMIT=
# Mails larger than this limit won't be scanned.
# ClamAV must be enabled (ENABLE_CLAMAV=1) for this.
#
# empty => 25M (25 MB)
CLAMAV_MESSAGE_SIZE_LIMIT=
# Enables regular pflogsumm mail reports.
# This is a new option. The old REPORT options are still supported for backwards compatibility. If this is not set and reports are enabled with the old options, logrotate will be used.
#
# not set => No report
# daily_cron => Daily report for the previous day
# logrotate => Full report based on the mail log when it is rotated
PFLOGSUMM_TRIGGER=
# Recipient address for pflogsumm reports.
#
# not set => Use REPORT_RECIPIENT or POSTMASTER_ADDRESS
# => Specify the recipient address(es)
PFLOGSUMM_RECIPIENT=
# Sender address (`FROM`) for pflogsumm reports if pflogsumm reports are enabled.
#
# not set => Use REPORT_SENDER
# => Specify the sender address
PFLOGSUMM_SENDER=
# Interval for logwatch report.
#
# none => No report is generated
# daily => Send a daily report
# weekly => Send a report every week
LOGWATCH_INTERVAL=
# Recipient address for logwatch reports if they are enabled.
#
# not set => Use REPORT_RECIPIENT or POSTMASTER_ADDRESS
# => Specify the recipient address(es)
LOGWATCH_RECIPIENT=
# Sender address (`FROM`) for logwatch reports if logwatch reports are enabled.
#
# not set => Use REPORT_SENDER
# => Specify the sender address
LOGWATCH_SENDER=
# Defines who receives reports if they are enabled.
# **empty** => ${POSTMASTER_ADDRESS}
# => Specify the recipient address
REPORT_RECIPIENT=
# Defines who sends reports if they are enabled.
# **empty** => mailserver-report@${DOMAINNAME}
# => Specify the sender address
REPORT_SENDER=
# Changes the interval in which log files are rotated
# **weekly** => Rotate log files weekly
# daily => Rotate log files daily
# monthly => Rotate log files monthly
#
# Note: This Variable actually controls logrotate inside the container
# and rotates the log files depending on this setting. The main log output is
# still available in its entirety via `docker logs mail` (Or your
# respective container name). If you want to control logrotation for
# the Docker-generated logfile see:
# https://docs.docker.com/config/containers/logging/configure/
#
# Note: This variable can also determine the interval for Postfix's log summary reports, see [`PFLOGSUMM_TRIGGER`](#pflogsumm_trigger).
LOGROTATE_INTERVAL=weekly
# If enabled, employs `reject_unknown_client_hostname` to sender restrictions in Postfix's configuration.
#
# - **0** => Disabled
# - 1 => Enabled
POSTFIX_REJECT_UNKNOWN_CLIENT_HOSTNAME=0
# Choose TCP/IP protocols for postfix to use
# **all** => All possible protocols.
# ipv4 => Use only IPv4 traffic. Most likely you want this behind Docker.
# ipv6 => Use only IPv6 traffic.
#
# Note: More details at http://www.postfix.org/postconf.5.html#inet_protocols
POSTFIX_INET_PROTOCOLS=all
# Choose TCP/IP protocols for dovecot to use
# **all** => Listen on all interfaces
# ipv4 => Listen only on IPv4 interfaces. Most likely you want this behind Docker.
# ipv6 => Listen only on IPv6 interfaces.
#
# Note: More information at https://dovecot.org/doc/dovecot-example.conf
DOVECOT_INET_PROTOCOLS=all
# -----------------------------------------------
# --- SpamAssassin Section ----------------------
# -----------------------------------------------
ENABLE_SPAMASSASSIN=0
# deliver spam messages in the inbox (eventually tagged using SA_SPAM_SUBJECT)
SPAMASSASSIN_SPAM_TO_INBOX=1
# KAM is a 3rd party SpamAssassin ruleset, provided by the McGrail Foundation.
# If SpamAssassin is enabled, KAM can be used in addition to the default ruleset.
# - **0** => KAM disabled
# - 1 => KAM enabled
#
# Note: only has an effect if `ENABLE_SPAMASSASSIN=1`
ENABLE_SPAMASSASSIN_KAM=0
# spam messages will be moved in the Junk folder (SPAMASSASSIN_SPAM_TO_INBOX=1 required)
MOVE_SPAM_TO_JUNK=1
# add spam info headers if at, or above that level:
SA_TAG=2.0
# add 'spam detected' headers at that level
SA_TAG2=6.31
# triggers spam evasive actions
SA_KILL=10.0
# add tag to subject if spam detected
SA_SPAM_SUBJECT=***SPAM*****
# -----------------------------------------------
# --- Fetchmail Section -------------------------
# -----------------------------------------------
ENABLE_FETCHMAIL=0
# The interval to fetch mail in seconds
FETCHMAIL_POLL=300
# Enable or disable `getmail`.
#
# - **0** => Disabled
# - 1 => Enabled
ENABLE_GETMAIL=0
# The number of minutes for the interval. Min: 1; Max: 30.
GETMAIL_POLL=5
# -----------------------------------------------
# --- LDAP Section ------------------------------
# -----------------------------------------------
# A second container for the ldap service is necessary (i.e. https://github.com/osixia/docker-openldap)
# with the :edge tag, use ACCOUNT_PROVISIONER=LDAP
# empty => LDAP authentication is disabled
# 1 => LDAP authentication is enabled
ENABLE_LDAP=
# empty => no
# yes => LDAP over TLS enabled for Postfix
LDAP_START_TLS=
# If you going to use the mailserver in combination with Docker Compose you can set the service name here
# empty => mail.domain.com
# Specify the dns-name/ip-address where the ldap-server
LDAP_SERVER_HOST=
# empty => ou=people,dc=domain,dc=com
# => e.g. LDAP_SEARCH_BASE=dc=mydomain,dc=local
LDAP_SEARCH_BASE=
# empty => cn=admin,dc=domain,dc=com
# => take a look at examples of SASL_LDAP_BIND_DN
LDAP_BIND_DN=
# empty** => admin
# => Specify the password to bind against ldap
LDAP_BIND_PW=
# e.g. `"(&(mail=%s)(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for users
LDAP_QUERY_FILTER_USER=
# e.g. `"(&(mailGroupMember=%s)(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for groups
LDAP_QUERY_FILTER_GROUP=
# e.g. `"(&(mailAlias=%s)(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for aliases
LDAP_QUERY_FILTER_ALIAS=
# e.g. `"(&(|(mail=*@%s)(mailalias=*@%s)(mailGroupMember=*@%s))(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for domains
LDAP_QUERY_FILTER_DOMAIN=
# -----------------------------------------------
# --- Dovecot Section ---------------------------
# -----------------------------------------------
# empty => no
# yes => LDAP over TLS enabled for Dovecot
DOVECOT_TLS=
# e.g. `"(&(objectClass=PostfixBookMailAccount)(uniqueIdentifier=%n))"`
DOVECOT_USER_FILTER=
# e.g. `"(&(objectClass=PostfixBookMailAccount)(uniqueIdentifier=%n))"`
DOVECOT_PASS_FILTER=
# Define the mailbox format to be used
# default is maildir, supported values are: sdbox, mdbox, maildir
DOVECOT_MAILBOX_FORMAT=maildir
# empty => no
# yes => Allow bind authentication for LDAP
# https://wiki.dovecot.org/AuthDatabase/LDAP/AuthBinds
DOVECOT_AUTH_BIND=
# -----------------------------------------------
# --- Postgrey Section --------------------------
# -----------------------------------------------
ENABLE_POSTGREY=0
# greylist for N seconds
POSTGREY_DELAY=300
# delete entries older than N days since the last time that they have been seen
POSTGREY_MAX_AGE=35
# response when a mail is greylisted
POSTGREY_TEXT="Delayed by Postgrey"
# whitelist host after N successful deliveries (N=0 to disable whitelisting)
POSTGREY_AUTO_WHITELIST_CLIENTS=5
# -----------------------------------------------
# --- SASL Section ------------------------------
# -----------------------------------------------
ENABLE_SASLAUTHD=0
# empty => pam
# `ldap` => authenticate against ldap server
# `shadow` => authenticate against local user db
# `mysql` => authenticate against mysql db
# `rimap` => authenticate against imap server
# Note: can be a list of mechanisms like pam ldap shadow
SASLAUTHD_MECHANISMS=
# empty => None
# e.g. with SASLAUTHD_MECHANISMS rimap you need to specify the ip-address/servername of the imap server ==> xxx.xxx.xxx.xxx
SASLAUTHD_MECH_OPTIONS=
# empty => Use value of LDAP_SERVER_HOST
# Note: since version 10.0.0, you can specify a protocol here (like ldaps://); this deprecates SASLAUTHD_LDAP_SSL.
SASLAUTHD_LDAP_SERVER=
# empty => Use value of LDAP_BIND_DN
# specify an object with privileges to search the directory tree
# e.g. active directory: SASLAUTHD_LDAP_BIND_DN=cn=Administrator,cn=Users,dc=mydomain,dc=net
# e.g. openldap: SASLAUTHD_LDAP_BIND_DN=cn=admin,dc=mydomain,dc=net
SASLAUTHD_LDAP_BIND_DN=
# empty => Use value of LDAP_BIND_PW
SASLAUTHD_LDAP_PASSWORD=
# empty => Use value of LDAP_SEARCH_BASE
# specify the search base
SASLAUTHD_LDAP_SEARCH_BASE=
# empty => default filter `(&(uniqueIdentifier=%u)(mailEnabled=TRUE))`
# e.g. for active directory: `(&(sAMAccountName=%U)(objectClass=person))`
# e.g. for openldap: `(&(uid=%U)(objectClass=person))`
SASLAUTHD_LDAP_FILTER=
# empty => no
# yes => LDAP over TLS enabled for SASL
# If set to yes, the protocol in SASLAUTHD_LDAP_SERVER must be ldap:// or missing.
SASLAUTHD_LDAP_START_TLS=
# empty => no
# yes => Require and verify server certificate
# If yes you must/could specify SASLAUTHD_LDAP_TLS_CACERT_FILE or SASLAUTHD_LDAP_TLS_CACERT_DIR.
SASLAUTHD_LDAP_TLS_CHECK_PEER=
# File containing CA (Certificate Authority) certificate(s).
# empty => Nothing is added to the configuration
# Any value => Fills the `ldap_tls_cacert_file` option
SASLAUTHD_LDAP_TLS_CACERT_FILE=
# Path to directory with CA (Certificate Authority) certificates.
# empty => Nothing is added to the configuration
# Any value => Fills the `ldap_tls_cacert_dir` option
SASLAUTHD_LDAP_TLS_CACERT_DIR=
# Specify what password attribute to use for password verification.
# empty => Nothing is added to the configuration but the documentation says it is `userPassword` by default.
# Any value => Fills the `ldap_password_attr` option
SASLAUTHD_LDAP_PASSWORD_ATTR=
# empty => `bind` will be used as a default value
# `fastbind` => The fastbind method is used
# `custom` => The custom method uses userPassword attribute to verify the password
SASLAUTHD_LDAP_AUTH_METHOD=
# Specify the authentication mechanism for SASL bind
# empty => Nothing is added to the configuration
# Any value => Fills the `ldap_mech` option
SASLAUTHD_LDAP_MECH=
# -----------------------------------------------
# --- SRS Section -------------------------------
# -----------------------------------------------
# envelope_sender => Rewrite only envelope sender address (default)
# header_sender => Rewrite only header sender (not recommended)
# envelope_sender,header_sender => Rewrite both senders
# An email has an "envelope" sender (indicating the sending server) and a
# "header" sender (indicating who sent it). More strict SPF policies may require
# you to replace both instead of just the envelope sender.
SRS_SENDER_CLASSES=envelope_sender
# empty => Envelope sender will be rewritten for all domains
# provide comma separated list of domains to exclude from rewriting
SRS_EXCLUDE_DOMAINS=
# empty => generated when the image is built
# provide a secret to use in base64
# you may specify multiple keys, comma separated. the first one is used for
# signing and the remaining will be used for verification. this is how you
# rotate and expire keys
SRS_SECRET=
# -----------------------------------------------
# --- Default Relay Host Section ----------------
# -----------------------------------------------
# Setup relaying all mail through a default relay host
#
# empty => don't configure default relay host
# default host and optional port to relay all mail through
DEFAULT_RELAY_HOST=
# -----------------------------------------------
# --- Multi-Domain Relay Section ----------------
# -----------------------------------------------
# Setup relaying for multiple domains based on the domain name of the sender
# optionally uses usernames and passwords in postfix-sasl-password.cf and relay host mappings in postfix-relaymap.cf
#
# empty => don't configure relay host
# default host to relay mail through
RELAY_HOST=
# empty => 25
# default port to relay mail
RELAY_PORT=25
# empty => no default
# default relay username (if no specific entry exists in postfix-sasl-password.cf)
RELAY_USER=
# empty => no default
# password for default relay user
RELAY_PASSWORD=

View file

@ -0,0 +1,36 @@
[global]
secret_file = {{ home_dir }}/secret/secret.txt
debug = false
;Set the paths where data will be stored at
static_root = {{ home_dir }}/static
media_root = {{ home_dir }}/media
;Advanced options, only uncomment if you know what you're doing:
;static_url = /static/
;media_url = /user-media/
;language_code = en-us
;time_zone = UTC
;redis_uri = redis://localhost:6379
[allowed_hosts]
allowed_host1 = {{ etebase_host }}
[database]
engine = django.db.backends.sqlite3
name = {{ home_dir }}/secret/db.sqlite3
[database-options]
; Add engine-specific options here, such as postgresql parameter key words
;[ldap]
;server = <The URL to your LDAP server>
;search_base = <Your search base>
;filter = <Your LDAP filter query. '%%s' will be substituted for the username>
; In case a cache TTL of 1 hour is too short for you, set `cache_ttl` to the preferred
; amount of hours a cache entry should be viewed as valid:
;cache_ttl = 5
;bind_dn = <Your LDAP "user" to bind as. Must be a bind user>
; Either specify the password directly, or provide a password file
;bind_pw = <The password to authenticate as your bind user>
;bind_pw_file = /path/to/the/file.txt

View file

@ -0,0 +1,13 @@
[Unit]
Description=Etebase Server
[Service]
Type=simple
WorkingDirectory={{ etebase_home_dir }}/etebase
ExecStart={{ etebase_home_dir }}/venv/bin/uvicorn etebase_server.asgi:application --uds {{ etebase_socket_file }}
User=etebase
Group=etebase
Restart=always
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,19 @@
[Unit]
Description=Forgejo
After=syslog.target
After=network.target
[Service]
# Uncomment the next line if you have repos with lots of files and get a HTTP 500 error because of that
# LimitNOFILE=524288:524288
RestartSec=2s
Type=notify
User=git
Group=git
WorkingDirectory=/var/lib/gitea/
ExecStart=/usr/local/bin/forgejo web --config /etc/gitea/app.ini --custom-path /usr/local/share/forgejo/custom
Restart=always
Environment=USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,2 @@
User-agent: *
Disallow: /

View file

@ -0,0 +1,14 @@
[Unit]
Description=Act Runner (Gitea runner)
After=syslog.target
After=network.target
[Service]
Type=simple
User=runner
WorkingDirectory=/home/runner
ExecStart=/usr/local/bin/act_runner daemon
Restart=always
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,10 @@
[Unit]
Description=Protonmail Bridge
[Service]
Type=simple
ExecStart=docker run --restart unless-stopped -v protonmail:/root -p {{ wireguard_ip }}:1025:25 -p {{ wireguard_ip }}:1143:143 {{ protonmail_bridge_docker_image }}
Restart=never
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,31 @@
[NetDev]
Name=wg0
Kind=wireguard
Description=Wireguard tunnel wg0
[WireGuard]
ListenPort=51820
PrivateKey={{ wireguard_private_key.stdout }}
{% for peer in groups['wireguard'] %}
{% if peer != inventory_hostname %}
[WireGuardPeer]
PublicKey={{ hostvars[peer].wireguard_public_key.stdout }}
AllowedIPs={{ hostvars[peer].wireguard_ip }}/32
PersistentKeepalive=25
{% endif %}
{% endfor %}
# ouroboros
[WireGuardPeer]
PublicKey={{ ouroboros_wireguard_public_key }}
AllowedIPs={{ ouroboros_wireguard_ip }}/32
PersistentKeepalive=25
# mobile
[WireGuardPeer]
PublicKey={{ mobile_wireguard_public_key }}
AllowedIPs={{ mobile_wireguard_ip }}/32
PersistentKeepalive=25

View file

@ -0,0 +1,5 @@
[Match]
Name=wg0
[Network]
Address={{ wireguard_ip }}/{{ wireguard_network_mask }}

32
templates/woodpecker.yml Normal file
View file

@ -0,0 +1,32 @@
version: '3'
services:
woodpecker-server:
image: woodpeckerci/woodpecker-server:next
ports:
- "{{ woodpecker_port }}:8000"
volumes:
- woodpecker-server-data:/var/lib/woodpecker/
environment:
- WOODPECKER_OPEN=false
- WOODPECKER_HOST=https://{{ woodpecker_host }}
- WOODPECKER_AGENT_SECRET={{ woodpecker_agent_secret }}
- WOODPECKER_FORGEJO=true
- WOODPECKER_FORGEJO_URL=https://{{ forgejo_host }}
- WOODPECKER_FORGEJO_CLIENT={{ woodpecker_client_id }}
- WOODPECKER_FORGEJO_SECRET={{ woodpecker_client_secret }}
- WOODPECKER_ADMIN=nikos
woodpecker-agent:
image: woodpeckerci/woodpecker-agent:next
command: agent
restart: always
depends_on:
- woodpecker-server
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WOODPECKER_SERVER=woodpecker-server:9000
- WOODPECKER_AGENT_SECRET={{ woodpecker_agent_secret }}
volumes:
woodpecker-server-data:

59
wireguard.yml Normal file
View file

@ -0,0 +1,59 @@
---
- hosts: [wireguard]
tasks:
- name: Install wireguard packages
become: true
ansible.builtin.apt:
name: wireguard
state: present
- name: Generate keys
become: true
ansible.builtin.shell: |
umask 0077
wg genkey > /etc/wireguard/privatekey
wg pubkey < /etc/wireguard/privatekey > /etc/wireguard/publickey
args:
creates:
- /etc/wireguard/privatekey
- /etc/wireguard/publickey
- name: Register public key
become: true
ansible.builtin.shell: cat /etc/wireguard/publickey
register: wireguard_public_key
changed_when: false
- name: Register private key
become: true
ansible.builtin.shell: cat /etc/wireguard/privatekey
register: wireguard_private_key
changed_when: false
- name: Setup network device
become: yes
notify: systemd network restart
ansible.builtin.template:
src: ./templates/wireguard/wg0.netdev.j2
dest: /etc/systemd/network/wg0.netdev
owner: root
group: systemd-network
mode: 0640
- name: Setup network
become: yes
notify: systemd network restart
ansible.builtin.template:
src: ./templates/wireguard/wg0.network.j2
dest: /etc/systemd/network/wg0.network
owner: root
group: systemd-network
mode: 0640
handlers:
- name: systemd network restart
become: true
ansible.builtin.service:
name: systemd-networkd
state: restarted
enabled: true