This commit is contained in:
1
coreos-config/plays/services/ba-gitlab-runner/.env
Normal file
1
coreos-config/plays/services/ba-gitlab-runner/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=gitlab-ba
|
@ -0,0 +1,39 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
dind:
|
||||
image: docker:dind
|
||||
restart: unless-stopped
|
||||
privileged: true
|
||||
volumes:
|
||||
- /lib/modules:/lib/modules:ro
|
||||
environment:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
networks:
|
||||
- backend
|
||||
- default
|
||||
|
||||
runner:
|
||||
image: gitlab/gitlab-runner:alpine
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- dind
|
||||
networks:
|
||||
- default
|
||||
- backend
|
||||
volumes:
|
||||
- runner_cfg:/etc/gitlab-runner:z
|
||||
environment:
|
||||
- DOCKER_HOST=tcp://dind:2375
|
||||
- CI_SERVER_URL={{ ba_gitlab_runner.server }}
|
||||
- REGISTRATION_TOKEN={{ ba_gitlab_runner.token }}
|
||||
|
||||
volumes:
|
||||
runner_cfg:
|
||||
|
||||
networks:
|
||||
backend:
|
||||
internal: true
|
||||
|
||||
...
|
1
coreos-config/plays/services/blog/.env
Normal file
1
coreos-config/plays/services/blog/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=blog
|
19
coreos-config/plays/services/blog/docker-compose.yaml
Normal file
19
coreos-config/plays/services/blog/docker-compose.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
tobiasmanske.de:
|
||||
image: registry.tobiasmanske.de/tobiasmanske.de:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tobiasmanskede.rule=(Host(`tobiasmanske.de`) || Host(`www.tobiasmanske.de`)) && !PathPrefix(`/{path:(_matrix|_synapse|.well-known/matrix|.well-known/openpgpkey)}/`)"
|
||||
- "traefik.http.routers.tobiasmanskede.entryPoints=websecure"
|
||||
- "traefik.http.services.tobiasmanskede.loadbalancer.server.port=80"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
...
|
1
coreos-config/plays/services/caddy/.env
Normal file
1
coreos-config/plays/services/caddy/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=caddy
|
14
coreos-config/plays/services/caddy/Caddyfile
Normal file
14
coreos-config/plays/services/caddy/Caddyfile
Normal file
@ -0,0 +1,14 @@
|
||||
{
|
||||
auto_https off
|
||||
}
|
||||
|
||||
{% for rule in redirect.hosts %}
|
||||
http://{{ rule.from }} {
|
||||
{% if rule.keepUri %}
|
||||
redir https://{{ rule.to }}{uri} permanent
|
||||
{% else %}
|
||||
redir https://{{ rule.to }} permanent
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
{% endfor %}
|
21
coreos-config/plays/services/caddy/docker-compose.yaml
Normal file
21
coreos-config/plays/services/caddy/docker-compose.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
redirect:
|
||||
image: caddy:2
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro,z
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.caddyredir.rule={{ redirect.hosts | map(attribute='from') | map('regex_replace', '^(.*)$', 'Host(`\\1`)') | join(' || ') }}"
|
||||
- "traefik.http.routers.caddyredir.entryPoints=websecure"
|
||||
- "traefik.http.services.caddyredir.loadbalancer.server.port=80"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
...
|
1
coreos-config/plays/services/diun/.env
Normal file
1
coreos-config/plays/services/diun/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=diun
|
19
coreos-config/plays/services/diun/diun.yml
Normal file
19
coreos-config/plays/services/diun/diun.yml
Normal file
@ -0,0 +1,19 @@
|
||||
watch:
|
||||
workers: 20
|
||||
schedule: "0 */6 * * *"
|
||||
firstCheckNotif: false
|
||||
|
||||
notif:
|
||||
matrix:
|
||||
homeserverURL: http://pantalaimon:8008
|
||||
user: "{{ diun.matrix.user }}"
|
||||
password: "{{ diun.matrix.password }}"
|
||||
roomID: "{{ diun.matrix.roomID }}"
|
||||
msgType: notice
|
||||
templateBody: |
|
||||
{% raw %}Docker tag {{ if .Entry.Image.HubLink }}[**{{ .Entry.Image }}**]({{ .Entry.Image.HubLink }}){{ else }}**{{ .Entry.Image }}**{{ end }} which you subscribed to through {{ .Entry.Provider }} provider {{ if (eq .Entry.Status "new") }}is available{{ else }}has been updated{{ end }} on {{ .Entry.Image.Domain }} registry.
|
||||
{{ if and (eq .Entry.Status "new") (eq .Entry.Image "docker.io/jitsi/web") }}See https://github.com/jitsi/docker-jitsi-meet/releases/tag/{{ .Entry.Image.Tag }}{{ end }}{% endraw %}
|
||||
|
||||
providers:
|
||||
file:
|
||||
filename: /watch.yml
|
29
coreos-config/plays/services/diun/docker-compose.yaml
Normal file
29
coreos-config/plays/services/diun/docker-compose.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
diun:
|
||||
image: crazymax/diun:latest
|
||||
container_name: diun
|
||||
command: serve
|
||||
volumes:
|
||||
- "data:/data"
|
||||
- "./diun.yml:/diun.yml:ro,Z"
|
||||
- "./watch.yml:/watch.yml:ro,Z"
|
||||
environment:
|
||||
- "TZ=Europe/Berlin"
|
||||
- "LOG_LEVEL=info"
|
||||
- "LOG_JSON=false"
|
||||
restart: always
|
||||
networks:
|
||||
- default
|
||||
- pantalaimon
|
||||
|
||||
volumes:
|
||||
data:
|
||||
|
||||
networks:
|
||||
pantalaimon:
|
||||
external: true
|
||||
|
||||
...
|
6
coreos-config/plays/services/diun/watch.yml
Normal file
6
coreos-config/plays/services/diun/watch.yml
Normal file
@ -0,0 +1,6 @@
|
||||
- name: docker.io/jitsi/web
|
||||
watch_repo: true
|
||||
notify_on:
|
||||
- new
|
||||
include_tags:
|
||||
- ^stable-\d+
|
1
coreos-config/plays/services/gitea-runner/.env
Normal file
1
coreos-config/plays/services/gitea-runner/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=gitea-runner
|
@ -0,0 +1,41 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
dind:
|
||||
image: docker:dind
|
||||
restart: unless-stopped
|
||||
privileged: true
|
||||
volumes:
|
||||
- /lib/modules:/lib/modules:ro
|
||||
environment:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
networks:
|
||||
- backend
|
||||
- default
|
||||
|
||||
drone_runner:
|
||||
image: drone/drone-runner-docker:1.8
|
||||
restart: always
|
||||
environment:
|
||||
- "DOCKER_HOST=tcp://dind:2375"
|
||||
- "DRONE_RPC_SECRET={{ gitea.drone.rpc_secret }}"
|
||||
- "DRONE_RPC_HOST=drone.tobiasmanske.de"
|
||||
- "DRONE_RPC_PROTO=https"
|
||||
- "DRONE_RUNNER_CAPACITY={{ gitea.drone.runner_capacity }}"
|
||||
- "DRONE_RUNNER_NAME={{ gitea.drone.runner_name }}"
|
||||
{% if gitea.drone.runner_labels is defined %}
|
||||
- "DRONE_RUNNER_LABELS={{ gitea.drone.runner_labels | join(',') }}"
|
||||
{% endif %}
|
||||
- "DRONE_RUNNER_CLONE_IMAGE=drone/git:linux-amd64"
|
||||
- "DRONE_RUNNER_VOLUMES=/etc/hosts:/etc/hosts"
|
||||
depends_on:
|
||||
- dind
|
||||
networks:
|
||||
- backend
|
||||
- default
|
||||
|
||||
networks:
|
||||
backend:
|
||||
internal: true
|
||||
...
|
1
coreos-config/plays/services/gitea/.env
Normal file
1
coreos-config/plays/services/gitea/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=gitea
|
92
coreos-config/plays/services/gitea/docker-compose.yaml
Normal file
92
coreos-config/plays/services/gitea/docker-compose.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea:1
|
||||
container_name: gitea
|
||||
environment:
|
||||
- "USER_UID=1000"
|
||||
- "USER_GID=1000"
|
||||
- "GITEA__database__DB_TYPE=postgres"
|
||||
- "GITEA__database__HOST=db:5432"
|
||||
- "GITEA__database__NAME={{ gitea.db.name }}"
|
||||
- "GITEA__database__USER={{ gitea.db.user }}"
|
||||
- "GITEA__database__PASSWD={{ gitea.db.password }}"
|
||||
- "GITEA__webhook__ALLOWED_HOST_LIST=*.tobiasmanske.de"
|
||||
- "GITEA__oauth2_client__ENABLE_AUTO_REGISTRATION=true"
|
||||
- "GITEA__service__DISABLE_REGISTRATION=true"
|
||||
restart: always
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
volumes:
|
||||
- gitea_data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.gitea.rule=Host(`git.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.gitea.entryPoints=websecure"
|
||||
- "traefik.http.services.gitea.loadbalancer.server.port=3000"
|
||||
ports:
|
||||
- "7779:22"
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
db:
|
||||
image: postgres:14
|
||||
restart: always
|
||||
environment:
|
||||
- "POSTGRES_USER={{ gitea.db.user }}"
|
||||
- "POSTGRES_PASSWORD={{ gitea.db.password }}"
|
||||
- "POSTGRES_DB={{ gitea.db.name }}"
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
drone:
|
||||
image: drone/drone:2
|
||||
restart: always
|
||||
environment:
|
||||
- "DRONE_GITEA_SERVER=https://git.tobiasmanske.de"
|
||||
- "DRONEC_COOKIE_SECRET={{ gitea.drone.cookie_secret }}"
|
||||
- "DRONE_GITEA_CLIENT_ID={{ gitea.drone.client_id }}"
|
||||
- "DRONE_GIT_ALWAYS_AUTH=true"
|
||||
- "DRONE_GITEA_CLIENT_SECRET={{ gitea.drone.client_secret }}"
|
||||
- "DRONE_RPC_SECRET={{ gitea.drone.rpc_secret }}"
|
||||
- "DRONE_SERVER_HOST=drone.tobiasmanske.de"
|
||||
- "DRONE_SERVER_PROTO=https"
|
||||
- "DRONE_IMAGE_CLONE=openjdk:17-bullseye"
|
||||
- "DRONE_USER_CREATE=username:tobias,admin:true"
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
volumes:
|
||||
- drone_data:/data
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.drone.rule=Host(`drone.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.drone.entryPoints=websecure"
|
||||
- "traefik.http.services.drone.loadbalancer.server.port=80"
|
||||
depends_on:
|
||||
- gitea
|
||||
|
||||
networks:
|
||||
backend:
|
||||
internal: true
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
gitea_data:
|
||||
drone_data:
|
||||
pg_data:
|
||||
...
|
1
coreos-config/plays/services/hedgedoc/.env
Normal file
1
coreos-config/plays/services/hedgedoc/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=hedgedoc
|
79
coreos-config/plays/services/hedgedoc/docker-compose.yaml
Normal file
79
coreos-config/plays/services/hedgedoc/docker-compose.yaml
Normal file
@ -0,0 +1,79 @@
|
||||
---
|
||||
version: '3'
|
||||
services:
|
||||
database:
|
||||
image: postgres:13-alpine
|
||||
environment:
|
||||
- POSTGRES_USER={{ hedgedoc.db.user }}
|
||||
- POSTGRES_PASSWORD={{ hedgedoc.db.password }}
|
||||
- POSTGRES_DB={{ hedgedoc.db.name }}
|
||||
volumes:
|
||||
- database:/var/lib/postgresql/data
|
||||
restart: always
|
||||
networks:
|
||||
- backend
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
app:
|
||||
# Make sure to use the latest release from https://hedgedoc.org/latest-release
|
||||
image: quay.io/hedgedoc/hedgedoc:1.9.3
|
||||
environment:
|
||||
- CMD_DB_URL=postgres://{{ hedgedoc.db.user }}:{{ hedgedoc.db.password }}@database:5432/{{ hedgedoc.db.name }}
|
||||
- CMD_DOMAIN=doc.tobiasmanske.de
|
||||
- CMD_ALLOW_ORIGIN=doc.tobiasmanske.de,localhost
|
||||
- CMD_CSP_ENABLE=true
|
||||
- CMD_PROTOCOL_USESSL=true
|
||||
- CMD_PROTOCOL_USE_SSL=true
|
||||
- CMD_ALLOW_EMAIL_REGISTER=false
|
||||
- CMD_ALLOW_ANONYMOUS=false
|
||||
- CMD_ALLOW_ANONYMOUS_EDITS=true
|
||||
- CMD_ALLOW_FREEURL=true
|
||||
- CMD_DEFAULT_PERMISSION=private
|
||||
- CMD_SESSION_SECRET={{ hedgedoc.cmd.session_secret }}
|
||||
- CMD_OAUTH2_CLIENT_ID={{ hedgedoc.cmd.client_id }}
|
||||
- CMD_OAUTH2_CLIENT_SECRET={{ hedgedoc.cmd.client_secret }}
|
||||
- CMD_OAUTH2_AUTHORIZATION_URL={{ hedgedoc.cmd.authorization_url }}
|
||||
- CMD_OAUTH2_SCOPE=openid email profile
|
||||
- CMD_OAUTH2_TOKEN_URL={{ hedgedoc.cmd.token_url }}
|
||||
- CMD_OAUTH2_USER_PROFILE_URL={{ hedgedoc.cmd.user_profile_url }}
|
||||
- CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
|
||||
- CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
|
||||
- CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
|
||||
- CMD_OAUTH2_PROVIDERNAME=Keycloak
|
||||
- CMD_IMAGE_UPLOAD_TYPE=minio
|
||||
- CMD_MINIO_ACCESS_KEY={{ hedgedoc.cmd.s3.access_key }}
|
||||
- CMD_MINIO_SECRET_KEY={{ hedgedoc.cmd.s3.secret_key }}
|
||||
- CMD_MINIO_ENDPOINT={{ hedgedoc.cmd.s3.endpoint }}
|
||||
- CMD_MINIO_PORT={{ hedgedoc.cmd.s3.port }}
|
||||
- CMD_MINIO_SECURE={{ hedgedoc.cmd.s3.secure }}
|
||||
- CMD_S3_BUCKET=hedgedoc
|
||||
- CMD_S3_FOLDER=uploads
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.hedgedoc.rule=Host(`doc.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.hedgedoc.middlewares=deny-metrics@file"
|
||||
- "traefik.http.routers.hedgedoc.entryPoints=websecure"
|
||||
- "traefik.http.services.hedgedoc.loadbalancer.server.port=3000"
|
||||
- "prometheus-scrape.enabled=true"
|
||||
- "prometheus-scrape.port=3000"
|
||||
depends_on:
|
||||
database:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
|
||||
volumes:
|
||||
database:
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
backend:
|
||||
internal: true
|
||||
...
|
1
coreos-config/plays/services/keycloak/.env
Normal file
1
coreos-config/plays/services/keycloak/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=keycloak
|
58
coreos-config/plays/services/keycloak/docker-compose.yaml
Normal file
58
coreos-config/plays/services/keycloak/docker-compose.yaml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:14
|
||||
restart: always
|
||||
environment:
|
||||
- "POSTGRES_DB={{ auth.db.name }}"
|
||||
- "POSTGRES_USER={{ auth.db.user }}"
|
||||
- "POSTGRES_PASSWORD={{ auth.db.password }}"
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.scope=update"
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- backend
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
keycloak:
|
||||
image: registry.tobiasmanske.de/keycloak:main
|
||||
command: start
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- "KC_DB=postgres"
|
||||
- "KC_DB_URL_HOST=postgres"
|
||||
- "KC_DB_URL_DATABASE={{ auth.db.name }}"
|
||||
- "KC_DB_USERNAME={{ auth.db.user }}"
|
||||
- "KC_DB_PASSWORD={{ auth.db.password }}"
|
||||
- "KEYCLOAK_ADMIN={{ auth.keycloak.user }}"
|
||||
- "KEYCLOAK_ADMIN_PASSWORD={{ auth.keycloak.password }}"
|
||||
- "KC_PROXY=edge"
|
||||
- "KC_HOSTNAME=auth.tobiasmanske.de"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.keycloak.rule=Host(`auth.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.keycloak.entryPoints=websecure"
|
||||
- "traefik.http.services.keycloak.loadbalancer.server.port=8080"
|
||||
restart: always
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
backend:
|
||||
internal: true
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
pg_data:
|
||||
...
|
1
coreos-config/plays/services/kuma/.env
Normal file
1
coreos-config/plays/services/kuma/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=kuma-{{ service_name|default("kuma") }}
|
28
coreos-config/plays/services/kuma/docker-compose.yaml
Normal file
28
coreos-config/plays/services/kuma/docker-compose.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
{% set _name = service_name|default("kuma") %}
|
||||
{% set _urls = urls|default(kuma.urls)|mandatory %}
|
||||
---
|
||||
services:
|
||||
kuma:
|
||||
image: louislam/uptime-kuma:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- data:/app/data
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.kuma-{{ _name }}.rule={{ _urls | map('regex_replace', '^(.*)$', 'Host(`\\1`)') | join(' || ') }}"
|
||||
- "traefik.http.routers.kuma-{{ _name }}.entryPoints=websecure"
|
||||
- "traefik.http.services.kuma-{{ _name }}.loadbalancer.server.port=3001"
|
||||
networks:
|
||||
- default
|
||||
- gateway
|
||||
- pantalaimon
|
||||
|
||||
volumes:
|
||||
data:
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
pantalaimon:
|
||||
external: true
|
||||
...
|
1
coreos-config/plays/services/linktree/.env
Normal file
1
coreos-config/plays/services/linktree/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=linktree
|
19
coreos-config/plays/services/linktree/docker-compose.yaml
Normal file
19
coreos-config/plays/services/linktree/docker-compose.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
unruhig.eu:
|
||||
image: registry.tobiasmanske.de/unruhig.eu:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.unruhigeu.rule=(Host(`unruhig.eu`) || Host(`www.unruhig.eu`))"
|
||||
- "traefik.http.routers.unruhigeu.entryPoints=websecure"
|
||||
- "traefik.http.services.unruhigeu.loadbalancer.server.port=80"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
...
|
1
coreos-config/plays/services/matrix/.env
Normal file
1
coreos-config/plays/services/matrix/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=matrix
|
15
coreos-config/plays/services/matrix/Caddyfile
Normal file
15
coreos-config/plays/services/matrix/Caddyfile
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
auto_https off
|
||||
}
|
||||
|
||||
http://{{ matrix.baseurl }} {
|
||||
header {
|
||||
Content-Type application/json
|
||||
Access-Control-Allow-Origin *
|
||||
}
|
||||
respond /.well-known/matrix/client "{\"m.homeserver\": {\"base_url\": \"https://synapse.{{ matrix.baseurl }}\"} }" 200
|
||||
respond /.well-known/matrix/server "{\"m.server\": \"synapse.{{ matrix.baseurl }}:443\"}" 200
|
||||
respond /.well-known/matrix/support "{\"admins\":[{\"matrix_id\":\"@tobi:{{ matrix.baseurl }}\",\"email_address\":\"matrix@{{ matrix.baseurl }}\",\"role\":\"admin\"}]}" 200
|
||||
|
||||
respond 404
|
||||
}
|
12
coreos-config/plays/services/matrix/cinny-config.json
Normal file
12
coreos-config/plays/services/matrix/cinny-config.json
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
"defaultHomeserver": 0,
|
||||
"homeserverList": [
|
||||
"unruhig.eu",
|
||||
"entropia.de",
|
||||
"matrix.org",
|
||||
"archlinux.org",
|
||||
"kit.edu",
|
||||
"mozilla.org"
|
||||
],
|
||||
"allowCustomHomeservers": true
|
||||
}
|
231
coreos-config/plays/services/matrix/docker-compose.yaml
Normal file
231
coreos-config/plays/services/matrix/docker-compose.yaml
Normal file
@ -0,0 +1,231 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
|
||||
synapse:
|
||||
image: registry.tobiasmanske.de/matrixdotorg/synapse:latest
|
||||
user: "1000:1000"
|
||||
# Since synapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
# NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite
|
||||
environment:
|
||||
- SYNAPSE_CONFIG_DIR=/config
|
||||
- SYNAPSE_CONFIG_PATH=/config/homeserver.yaml
|
||||
- TZ=Europe/Berlin
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 10000
|
||||
hard: 40000
|
||||
volumes:
|
||||
- synapse_data:/data
|
||||
- ./synapse-config:/config:ro,Z
|
||||
- ./mautrix-telegram/registration.yaml:/data/reg-mautrix-tg.yaml:ro,Z
|
||||
- ./mautrix-slack/registration.yaml:/data/reg-mautrix-slack.yaml:ro,Z
|
||||
- ./mautrix-signal/registration.yaml:/data/reg-mautrix-signal.yaml:ro,Z
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
networks:
|
||||
- default
|
||||
- gateway
|
||||
- backend
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.http-synapse.rule=Host(`synapse.{{ matrix.baseurl }}`)"
|
||||
- "traefik.http.routers.http-synapse.entryPoints=websecure"
|
||||
- "traefik.http.routers.http-synapse.service=matrix-synapse"
|
||||
- "traefik.http.routers.matrix-synapse.rule=Host(`{{ matrix.baseurl }}`) && PathPrefix(`/_{path:(matrix|synapse)}/`)"
|
||||
- "traefik.http.routers.matrix-synapse.entryPoints=websecure"
|
||||
- "traefik.http.routers.matrix-synapse.service=matrix-synapse"
|
||||
- "traefik.http.services.matrix-synapse.loadbalancer.server.port=8008"
|
||||
- "prometheus-scrape.enabled=true"
|
||||
- "prometheus-scrape.port=9091"
|
||||
- "prometheus-scrape.metrics_path=/_synapse/metrics"
|
||||
|
||||
db:
|
||||
image: postgres:15
|
||||
restart: always
|
||||
environment:
|
||||
- POSTGRES_USER={{ matrix.db.user }}
|
||||
- POSTGRES_DB={{ matrix.db.database }}
|
||||
- POSTGRES_PASSWORD={{ matrix.db.password }}
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- db_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
caddy:
|
||||
image: caddy:2
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro,z
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.matrix-well-known.rule=Host(`{{ matrix.baseurl }}`) && PathPrefix(`/.well-known/matrix/`)"
|
||||
- "traefik.http.routers.matrix-well-known.entrypoints=websecure"
|
||||
- "traefik.http.services.matrix-well-known.loadbalancer.server.port=80"
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
cinny:
|
||||
image: ghcr.io/cinnyapp/cinny:latest
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.matrix-cinny.rule=Host(`cinny.{{ matrix.baseurl }}`)"
|
||||
- "traefik.http.routers.matrix-cinny.entryPoints=websecure"
|
||||
- "traefik.http.services.matrix-cinny.loadbalancer.server.port=80"
|
||||
volumes:
|
||||
- ./cinny-config.json:/app/config.json:ro,Z
|
||||
networks:
|
||||
- gateway
|
||||
- default
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### BRIDGES
|
||||
|
||||
#### Telegram
|
||||
|
||||
mautrix-telegram:
|
||||
image: dock.mau.dev/mautrix/telegram:latest
|
||||
user: "1000:1000"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "MAUTRIX_DIRECT_STARTUP=1"
|
||||
volumes:
|
||||
- bridge_tg_data:/data
|
||||
- ./mautrix-telegram/config.yaml:/data/config.yaml:ro,Z
|
||||
- ./mautrix-telegram/registration.yaml:/data/registration.yaml:ro,Z
|
||||
networks:
|
||||
- backend
|
||||
- default # Needs to contact UFOs in the sky
|
||||
depends_on:
|
||||
- db-bridge-tg
|
||||
- synapse
|
||||
|
||||
db-bridge-tg:
|
||||
image: postgres:15
|
||||
restart: always
|
||||
environment:
|
||||
- POSTGRES_USER={{ matrix.bridge.tg.dbuser }}
|
||||
- POSTGRES_DB={{ matrix.bridge.tg.dbname }}
|
||||
- POSTGRES_PASSWORD={{ matrix.bridge.tg.dbpass }}
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- bridge_tg_db:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
#### SLACK
|
||||
|
||||
mautrix-slack:
|
||||
image: dock.mau.dev/mautrix/slack:latest
|
||||
environment:
|
||||
- "UID=1000"
|
||||
- "GID=1000"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- bridge_slack_data:/data
|
||||
- ./mautrix-slack/config.yaml:/data/config.yaml:ro,Z
|
||||
- ./mautrix-slack/registration.yaml:/data/registration.yaml:ro,Z
|
||||
networks:
|
||||
- backend
|
||||
- default # Needs to contact UFOs in the sky
|
||||
depends_on:
|
||||
- db-bridge-slack
|
||||
- synapse
|
||||
|
||||
db-bridge-slack:
|
||||
image: postgres:15
|
||||
restart: always
|
||||
environment:
|
||||
- POSTGRES_USER={{ matrix.bridge.slack.dbuser }}
|
||||
- POSTGRES_DB={{ matrix.bridge.slack.dbname }}
|
||||
- POSTGRES_PASSWORD={{ matrix.bridge.slack.dbpass }}
|
||||
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- bridge_slack_db:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
#### SIGNAL
|
||||
mautrix-signal:
|
||||
image: dock.mau.dev/mautrix/signal:latest
|
||||
user: "1000:1000"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "MAUTRIX_DIRECT_STARTUP=1"
|
||||
networks:
|
||||
- default
|
||||
- backend
|
||||
volumes:
|
||||
- bridge_signal_data:/data
|
||||
- signald_data:/signald
|
||||
- ./mautrix-signal/config.yaml:/data/config.yaml:ro,Z
|
||||
- ./mautrix-signal/registration.yaml:/data/registration.yaml:ro,Z
|
||||
depends_on:
|
||||
- signald
|
||||
- db-bridge-signal
|
||||
|
||||
signald:
|
||||
image: docker.io/signald/signald:latest
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- default
|
||||
- backend
|
||||
volumes:
|
||||
- signald_data:/signald
|
||||
|
||||
db-bridge-signal:
|
||||
image: postgres:15
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend
|
||||
environment:
|
||||
- POSTGRES_USER={{ matrix.bridge.signal.dbuser }}
|
||||
- POSTGRES_DB={{ matrix.bridge.signal.dbname }}
|
||||
- POSTGRES_PASSWORD={{ matrix.bridge.signal.dbpass }}
|
||||
volumes:
|
||||
- bridge_signal_db:/var/lib/postgresql/data
|
||||
|
||||
networks:
|
||||
backend:
|
||||
internal: true
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
synapse_data:
|
||||
bridge_tg_data:
|
||||
bridge_tg_db:
|
||||
bridge_slack_data:
|
||||
bridge_slack_db:
|
||||
bridge_signal_data:
|
||||
bridge_signal_db:
|
||||
signald_data:
|
||||
db_data:
|
||||
...
|
334
coreos-config/plays/services/matrix/mautrix-signal/config.yaml
Normal file
334
coreos-config/plays/services/matrix/mautrix-signal/config.yaml
Normal file
@ -0,0 +1,334 @@
|
||||
# Homeserver details
|
||||
# {% set config = matrix.bridge.signal %}
|
||||
|
||||
homeserver:
|
||||
# The address that this appservice can use to connect to the homeserver.
|
||||
address: https://synapse.{{ matrix.baseurl }}
|
||||
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
|
||||
domain: {{ matrix.baseurl }}
|
||||
# Whether or not to verify the SSL certificate of the homeserver.
|
||||
# Only applies if address starts with https://
|
||||
verify_ssl: true
|
||||
# What software is the homeserver running?
|
||||
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
|
||||
software: standard
|
||||
# Number of retries for all HTTP requests if the homeserver isn't reachable.
|
||||
http_retry_count: 4
|
||||
# The URL to push real-time bridge status to.
|
||||
# If set, the bridge will make POST requests to this URL whenever a user's Signal connection state changes.
|
||||
# The bridge will use the appservice as_token to authorize requests.
|
||||
status_endpoint:
|
||||
# Endpoint for reporting per-message status.
|
||||
message_send_checkpoint_endpoint:
|
||||
# Maximum number of simultaneous HTTP connections to the homeserver.
|
||||
connection_limit: 100
|
||||
# Whether asynchronous uploads via MSC2246 should be enabled for media.
|
||||
# Requires a media repo that supports MSC2246.
|
||||
async_media: false
|
||||
|
||||
# Application service host/registration related details
|
||||
# Changing these values requires regeneration of the registration.
|
||||
appservice:
|
||||
# The address that the homeserver can use to connect to this appservice.
|
||||
address: http://mautrix-signal:29328
|
||||
# When using https:// the TLS certificate and key files for the address.
|
||||
tls_cert: false
|
||||
tls_key: false
|
||||
|
||||
# The hostname and port where this appservice should listen.
|
||||
hostname: 0.0.0.0
|
||||
port: 29328
|
||||
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
|
||||
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
|
||||
max_body_size: 1
|
||||
|
||||
# The full URI to the database. SQLite and Postgres are supported.
|
||||
# Format examples:
|
||||
# SQLite: sqlite:///filename.db
|
||||
# Postgres: postgres://username:password@hostname/dbname
|
||||
database: postgres://{{ config.dbuser }}:{{ config.dbpass }}@db-bridge-signal/{{ config.dbname }}?sslmode=disable
|
||||
# Additional arguments for asyncpg.create_pool() or sqlite3.connect()
|
||||
# https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.pool.create_pool
|
||||
# https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
|
||||
# For sqlite, min_size is used as the connection thread pool size and max_size is ignored.
|
||||
# Additionally, SQLite supports init_commands as an array of SQL queries to run on connect (e.g. to set PRAGMAs).
|
||||
database_opts:
|
||||
min_size: 1
|
||||
max_size: 10
|
||||
id: signal
|
||||
# Username of the appservice bot.
|
||||
bot_username: signalbot
|
||||
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
|
||||
# to leave display name/avatar as-is.
|
||||
bot_displayname: Signal bridge bot
|
||||
bot_avatar: mxc://maunium.net/wPJgTQbZOtpBFmDNkiNEMDUp
|
||||
|
||||
# Whether or not to receive ephemeral events via appservice transactions.
|
||||
# Requires MSC2409 support (i.e. Synapse 1.22+).
|
||||
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
|
||||
ephemeral_events: true
|
||||
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "{{ config.as_token }}"
|
||||
hs_token: "{{ config.hs_token }}"
|
||||
|
||||
# Prometheus telemetry config. Requires prometheus-client to be installed.
|
||||
metrics:
|
||||
enabled: false
|
||||
listen_port: 8000
|
||||
|
||||
# Manhole config.
|
||||
manhole:
|
||||
# Whether or not opening the manhole is allowed.
|
||||
enabled: false
|
||||
# The path for the unix socket.
|
||||
path: /var/tmp/mautrix-signal.manhole
|
||||
# The list of UIDs who can be added to the whitelist.
|
||||
# If empty, any UIDs can be specified in the open-manhole command.
|
||||
whitelist:
|
||||
- 0
|
||||
signal:
|
||||
# Path to signald unix socket
|
||||
socket_path: /signald/signald.sock
|
||||
# Directory for temp files when sending files to Signal. This should be an
|
||||
# absolute path that signald can read. For attachments in the other direction,
|
||||
# make sure signald is configured to use an absolute path as the data directory.
|
||||
outgoing_attachment_dir: /signald/attachments
|
||||
# Directory where signald stores avatars for groups.
|
||||
avatar_dir: /signald/avatars
|
||||
# Directory where signald stores auth data. Used to delete data when logging out.
|
||||
data_dir: /signald/data
|
||||
# Whether or not unknown signald accounts should be deleted when the bridge is started.
|
||||
# When this is enabled, any UserInUse errors should be resolved by restarting the bridge.
|
||||
delete_unknown_accounts_on_start: false
|
||||
# Whether or not message attachments should be removed from disk after they're bridged.
|
||||
remove_file_after_handling: true
|
||||
# Whether or not users can register a primary device
|
||||
registration_enabled: true
|
||||
# Whether or not to enable disappearing messages in groups. If enabled, then the expiration
|
||||
# time of the messages will be determined by the first users to read the message, rather
|
||||
# than individually. If the bridge has a single user, this can be turned on safely.
|
||||
enable_disappearing_messages_in_groups: false
|
||||
|
||||
# Bridge config
|
||||
bridge:
|
||||
# {% raw %}
|
||||
# Localpart template of MXIDs for Signal users.
|
||||
# {userid} is replaced with the UUID of the Signal user.
|
||||
username_template: signal_{userid}
|
||||
# Displayname template for Signal users.
|
||||
# {displayname} is replaced with the displayname of the Signal user, which is the first
|
||||
# available variable in displayname_preference. The variables in displayname_preference
|
||||
# can also be used here directly.
|
||||
displayname_template: '{displayname} (Signal)'
|
||||
# {% endraw %}
|
||||
# Whether or not contact list displaynames should be used.
|
||||
# Possible values: disallow, allow, prefer
|
||||
#
|
||||
# Multi-user instances are recommended to disallow contact list names, as otherwise there can
|
||||
# be conflicts between names from different users' contact lists.
|
||||
contact_list_names: disallow
|
||||
# Available variables: full_name, first_name, last_name, phone, uuid
|
||||
displayname_preference:
|
||||
- full_name
|
||||
- phone
|
||||
autocreate_group_portal: true
|
||||
# Whether or not to create portals for all contacts on login/connect.
|
||||
autocreate_contact_portal: false
|
||||
# Whether or not to make portals of Signal groups in which joining via invite link does
|
||||
# not need to be approved by an administrator publicly joinable on Matrix.
|
||||
public_portals: false
|
||||
# Whether or not to use /sync to get read receipts and typing notifications
|
||||
# when double puppeting is enabled
|
||||
sync_with_custom_puppets: false
|
||||
# Whether or not to update the m.direct account data event when double puppeting is enabled.
|
||||
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
|
||||
# and is therefore prone to race conditions.
|
||||
sync_direct_chat_list: false
|
||||
# Allow using double puppeting from any server with a valid client .well-known file.
|
||||
double_puppet_allow_discovery: false
|
||||
# Servers to allow double puppeting from, even if double_puppet_allow_discovery is false.
|
||||
double_puppet_server_map:
|
||||
{{ matrix.baseurl }}: https://{{ matrix.baseurl }}
|
||||
login_shared_secret_map:
|
||||
{{ matrix.baseurl }}: {{ matrix.authenticator.shared_secret }}
|
||||
federate_rooms: false
|
||||
# End-to-bridge encryption support options.
|
||||
#
|
||||
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
|
||||
encryption:
|
||||
# Allow encryption, work in group chat rooms with e2ee enabled
|
||||
allow: true
|
||||
# Default to encryption, force-enable encryption in all portals the bridge creates
|
||||
# This will cause the bridge bot to be in private chats for the encryption to work properly.
|
||||
default: true
|
||||
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
|
||||
appservice: false
|
||||
# Require encryption, drop any unencrypted messages.
|
||||
require: true
|
||||
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
|
||||
# You must use a client that supports requesting keys from other users to use this feature.
|
||||
allow_key_sharing: false
|
||||
# What level of device verification should be required from users?
|
||||
#
|
||||
# Valid levels:
|
||||
# unverified - Send keys to all device in the room.
|
||||
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
|
||||
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
|
||||
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
|
||||
# Note that creating user signatures from the bridge bot is not currently possible.
|
||||
# verified - Require manual per-device verification
|
||||
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
|
||||
verification_levels:
|
||||
# Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
|
||||
receive: unverified
|
||||
# Minimum level that the bridge should accept for incoming Matrix messages.
|
||||
send: unverified
|
||||
# Minimum level that the bridge should require for accepting key requests.
|
||||
share: cross-signed-tofu
|
||||
# Options for Megolm room key rotation. These options allow you to
|
||||
# configure the m.room.encryption event content. See:
|
||||
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
|
||||
# more information about that event.
|
||||
rotation:
|
||||
# Enable custom Megolm room key rotation settings. Note that these
|
||||
# settings will only apply to rooms created after this option is
|
||||
# set.
|
||||
enable_custom: false
|
||||
# The maximum number of milliseconds a session should be used
|
||||
# before changing it. The Matrix spec recommends 604800000 (a week)
|
||||
# as the default.
|
||||
milliseconds: 604800000
|
||||
# The maximum number of messages that should be sent with a given a
|
||||
# session before changing it. The Matrix spec recommends 100 as the
|
||||
# default.
|
||||
messages: 100
|
||||
|
||||
# Whether or not to explicitly set the avatar and room name for private
|
||||
# chat portal rooms. This will be implicitly enabled if encryption.default is true.
|
||||
private_chat_portal_meta: true
|
||||
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
|
||||
# been sent to Signal. This let's you check manually whether the bridge is receiving your
|
||||
# messages.
|
||||
# Note that this is not related to Signal delivery receipts.
|
||||
delivery_receipts: true
|
||||
# Whether or not delivery errors should be reported as messages in the Matrix room.
|
||||
delivery_error_reports: true
|
||||
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
|
||||
message_status_events: false
|
||||
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
|
||||
# This field will automatically be changed back to false after it,
|
||||
# except if the config file is not writable.
|
||||
resend_bridge_info: false
|
||||
# Interval at which to resync contacts (in seconds).
|
||||
periodic_sync: 0
|
||||
# Should leaving the room on Matrix make the user leave on Signal?
|
||||
bridge_matrix_leave: false
|
||||
# Should the bridge auto-create a group chat on Signal when a ghost is invited to a room?
|
||||
# Requires the user to have sufficient power level and double puppeting enabled.
|
||||
create_group_on_invite: true
|
||||
hacky_contact_name_mixup_detection: false
|
||||
|
||||
# Provisioning API part of the web server for automated portal creation and fetching information.
|
||||
# Used by things like mautrix-manager (https://github.com/tulir/mautrix-manager).
|
||||
provisioning:
|
||||
# Whether or not the provisioning API should be enabled.
|
||||
enabled: false
|
||||
# The prefix to use in the provisioning API endpoints.
|
||||
prefix: /_matrix/provision
|
||||
# The shared secret to authorize users of the API.
|
||||
# Set to "generate" to generate and save a new token.
|
||||
shared_secret: disabled
|
||||
# Segment API key to enable analytics tracking for web server
|
||||
# endpoints. Set to null to disable.
|
||||
# Currently the only events are login start, QR code scan, and login
|
||||
# success/failure.
|
||||
segment_key:
|
||||
# Optional user_id to use when sending Segment events. If null, defaults to using mxID.
|
||||
segment_user_id:
|
||||
|
||||
# The prefix for commands. Only required in non-management rooms.
|
||||
command_prefix: '!signal'
|
||||
|
||||
# Messages sent upon joining a management room.
|
||||
# Markdown is supported. The defaults are listed below.
|
||||
management_room_text:
|
||||
# Sent when joining a room.
|
||||
welcome: Hello, I'm a Signal bridge bot.
|
||||
# Sent when joining a management room and the user is already logged in.
|
||||
welcome_connected: Use `help` for help.
|
||||
# Sent when joining a management room and the user is not logged in.
|
||||
welcome_unconnected: Use `help` for help or `link` to log in.
|
||||
# Optional extra text sent when joining a management room.
|
||||
additional_help: ''
|
||||
|
||||
# Send each message separately (for readability in some clients)
|
||||
management_room_multiple_messages: false
|
||||
|
||||
# Permissions for using the bridge.
|
||||
# Permitted values:
|
||||
# relay - Allowed to be relayed through the bridge, no access to commands.
|
||||
# user - Use the bridge with puppeting.
|
||||
# admin - Use and administrate the bridge.
|
||||
# Permitted keys:
|
||||
# * - All Matrix users
|
||||
# domain - All users on that homeserver
|
||||
# mxid - Specific user
|
||||
permissions:
|
||||
'*': relay
|
||||
{{ matrix.baseurl }}: user
|
||||
'@tobi:{{ matrix.baseurl }}': admin
|
||||
relay:
|
||||
# Whether relay mode should be allowed. If allowed, `!signal set-relay` can be used to turn any
|
||||
# authenticated user into a relaybot for that chat.
|
||||
enabled: false
|
||||
# The formats to use when sending messages to Signal via a relay user.
|
||||
#
|
||||
# Available variables:
|
||||
# $sender_displayname - The display name of the sender (e.g. Example User)
|
||||
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
|
||||
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
|
||||
# $message - The message content
|
||||
message_formats:
|
||||
m.text: '$sender_displayname: $message'
|
||||
m.notice: '$sender_displayname: $message'
|
||||
m.emote: '* $sender_displayname $message'
|
||||
m.file: $sender_displayname sent a file
|
||||
m.image: $sender_displayname sent an image
|
||||
m.audio: $sender_displayname sent an audio file
|
||||
m.video: $sender_displayname sent a video
|
||||
m.location: $sender_displayname sent a location
|
||||
relaybot: '@relaybot:example.com'
|
||||
# Whether or not invites from non-logged-in users should be relayed
|
||||
invite: true
|
||||
|
||||
# Format for generating URLs from location messages for sending to Signal
|
||||
# Google Maps: 'https://www.google.com/maps/place/{lat},{long}'
|
||||
# OpenStreetMap: 'https://www.openstreetmap.org/?mlat={lat}&mlon={long}'
|
||||
location_format: https://www.google.com/maps/place/{lat},{long}
|
||||
|
||||
# Python logging configuration.
|
||||
#
|
||||
# See section 16.7.2 of the Python documentation for more info:
|
||||
# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema
|
||||
logging:
|
||||
version: 1
|
||||
formatters:
|
||||
colored:
|
||||
(): mautrix_signal.util.ColorFormatter
|
||||
format: '[%(asctime)s] [%(levelname)s@%(name)s] %(message)s'
|
||||
normal:
|
||||
format: '[%(asctime)s] [%(levelname)s@%(name)s] %(message)s'
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: colored
|
||||
loggers:
|
||||
mau:
|
||||
level: DEBUG
|
||||
aiohttp:
|
||||
level: INFO
|
||||
root:
|
||||
level: DEBUG
|
||||
handlers: [console]
|
||||
|
@ -0,0 +1,31 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
31353638336331613430353931626330366132643736326566343536343666643965333163313831
|
||||
3062336363343836666163393763326332623730623930620a333666373365306536636264613732
|
||||
64373937373062303332306166393833656239333862343836626364613639633762376138383964
|
||||
3033623639636530320a613233643736383637396131636434306435346637353966393639363239
|
||||
30336461616464303031386164393433373831353435333466323166643436626234623262633237
|
||||
30373830366430636230633962643439363666363031633936313934616332306437623138373535
|
||||
65343062336461663861376664383138636333353338666231623436666366303431363438323632
|
||||
31313739376439323665386130323338363930366361646361383831643337653963353639353738
|
||||
36383866313262616135633231623964663266643030343561363735323039376338373165356366
|
||||
30643738313331333733343739366435383936373135666433666663353039316331366463623362
|
||||
38343430663432396332623662633533396433366564656263393735663839666566376139656261
|
||||
65323664616463626430653734393433626231386230633664653264373034633731633239363135
|
||||
35333366333039623764386330613130373263316436316266303461626463373939336134363039
|
||||
62653363613064373731616137333663333334636336623363343034383263656631653864336439
|
||||
65623762666538383766393939303832373566623666383761623234636638303566336438616136
|
||||
33333939323061333431656435383731326633323135313839343761613231623537356333636336
|
||||
65323063653239623166313938386133366565313336643161323564386338363839393434616535
|
||||
63373038383334633238303336386261343639393537333735383439346164633962343033633533
|
||||
64353138373161323639613434653939326265336239366364336630666634356439303564653833
|
||||
31333765303030376330396261376161636563306133363137313435376133373363653031356333
|
||||
62663737646165626366363230663262346563633236366238646339303763383161663033356232
|
||||
34343434363833386330636535663333356364633332616431613431386534336133386638333034
|
||||
35633363333366306435656137303866636232323765313164363636636366653364326332613233
|
||||
32643866663032313431663463666364326633376332323335336131376131663865616232653065
|
||||
34633338333237636336333062646561376331363138346132386430633462666634646462656431
|
||||
65373562323539636165313038643839623132643539346539343338346366366362323230653935
|
||||
34323834393961376234343564383635623865303765663439316535396263363265626265613761
|
||||
33343034343666663834363133663734343838623132666561393862623136613035656434626233
|
||||
31666434656535393536623461393630346262643331336364353932326337376132333631616635
|
||||
3963306630613238323633666264316462393063383639656333
|
231
coreos-config/plays/services/matrix/mautrix-slack/config.yaml
Normal file
231
coreos-config/plays/services/matrix/mautrix-slack/config.yaml
Normal file
@ -0,0 +1,231 @@
|
||||
# Homeserver details.
|
||||
homeserver:
|
||||
# The address that this appservice can use to connect to the homeserver.
|
||||
address: https://synapse.{{ matrix.baseurl }}
|
||||
# The domain of the homeserver (for MXIDs, etc).
|
||||
domain: {{ matrix.baseurl }}
|
||||
# What software is the homeserver running?
|
||||
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
|
||||
software: standard
|
||||
# The URL to push real-time bridge status to.
|
||||
# If set, the bridge will make POST requests to this URL whenever a user's slack connection state changes.
|
||||
# The bridge will use the appservice as_token to authorize requests.
|
||||
status_endpoint: null
|
||||
# Endpoint for reporting per-message status.
|
||||
message_send_checkpoint_endpoint: null
|
||||
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
|
||||
async_media: false
|
||||
|
||||
# Application service host/registration related details.
|
||||
# Changing these values requires regeneration of the registration.
|
||||
appservice:
|
||||
# The address that the homeserver can use to connect to this appservice.
|
||||
address: http://mautrix-slack:29335
|
||||
# The hostname and port where this appservice should listen.
|
||||
hostname: 0.0.0.0
|
||||
port: 29335
|
||||
|
||||
# Database config.
|
||||
database:
|
||||
# The database type. "sqlite3" and "postgres" are supported.
|
||||
type: postgres
|
||||
# The database URI.
|
||||
# SQLite: File name is enough. https://github.com/mattn/go-sqlite3#connection-string
|
||||
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
|
||||
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
|
||||
uri: postgres://{{ matrix.bridge.slack.dbuser }}:{{ matrix.bridge.slack.dbpass }}@db-bridge-slack/{{ matrix.bridge.slack.dbname }}?sslmode=disable
|
||||
# Maximum number of connections. Mostly relevant for Postgres.
|
||||
max_open_conns: 20
|
||||
max_idle_conns: 2
|
||||
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
|
||||
# Parsed with https://pkg.go.dev/time#ParseDuration
|
||||
max_conn_idle_time: null
|
||||
max_conn_lifetime: null
|
||||
|
||||
# The unique ID of this appservice.
|
||||
id: slack
|
||||
# Appservice bot details.
|
||||
bot:
|
||||
# Username of the appservice bot.
|
||||
username: slackbot
|
||||
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
|
||||
# to leave display name/avatar as-is.
|
||||
displayname: Slack bridge bot
|
||||
avatar: mxc://maunium.net/pVtzLmChZejGxLqmXtQjFxem
|
||||
# Whether or not to receive ephemeral events via appservice transactions.
|
||||
# Requires MSC2409 support (i.e. Synapse 1.22+).
|
||||
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
|
||||
ephemeral_events: true
|
||||
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "{{ matrix.bridge.slack.as_token }}"
|
||||
hs_token: "{{ matrix.bridge.slack.hs_token }}"
|
||||
|
||||
# Bridge config
|
||||
bridge:
|
||||
{% raw %}
|
||||
# Localpart template of MXIDs for Slack users.
|
||||
# {{.}} is replaced with the internal ID of the Slack user.
|
||||
username_template: slack_{{.}}
|
||||
# Displayname template for Slack users.
|
||||
# TODO: document variables
|
||||
displayname_template: '{{.DisplayName}} (Slack)'
|
||||
bot_displayname_template: '{{.Name}} (bot)'
|
||||
channel_name_template: '#{{.Name}}'
|
||||
{% endraw %}
|
||||
portal_message_buffer: 128
|
||||
# Should the bridge send a read receipt from the bridge bot when a message has been sent to Slack?
|
||||
delivery_receipts: true
|
||||
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
|
||||
message_status_events: false
|
||||
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
|
||||
message_error_notices: true
|
||||
# Should the bridge sync with double puppeting to receive EDUs that aren't normally sent to appservices.
|
||||
sync_with_custom_puppets: false
|
||||
# Should the bridge update the m.direct account data event when double puppeting is enabled.
|
||||
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
|
||||
# and is therefore prone to race conditions.
|
||||
sync_direct_chat_list: false
|
||||
# Servers to always allow double puppeting from
|
||||
double_puppet_server_map:
|
||||
{{ matrix.baseurl }}: https://{{ matrix.baseurl }}
|
||||
# Allow using double puppeting from any server with a valid client .well-known file.
|
||||
double_puppet_allow_discovery: false
|
||||
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
|
||||
#
|
||||
# If set, double puppeting will be enabled automatically for local users
|
||||
# instead of users having to find an access token and run `login-matrix`
|
||||
# manually.
|
||||
login_shared_secret_map:
|
||||
{{ matrix.baseurl }}: "{{ matrix.authenticator.shared_secret }}"
|
||||
message_handling_timeout:
|
||||
# Send an error message after this timeout, but keep waiting for the response until the deadline.
|
||||
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
|
||||
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
|
||||
error_after: 10s
|
||||
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
|
||||
# This is counted from the time the bridge starts handling the message.
|
||||
deadline: 60s
|
||||
|
||||
# The prefix for commands. Only required in non-management rooms.
|
||||
command_prefix: '!slack'
|
||||
|
||||
# Messages sent upon joining a management room.
|
||||
# Markdown is supported. The defaults are listed below.
|
||||
management_room_text:
|
||||
# Sent when joining a room.
|
||||
welcome: "Hello, I'm a Slack bridge bot."
|
||||
# Sent when joining a management room and the user is already logged in.
|
||||
welcome_connected: "Use `help` for help."
|
||||
# Sent when joining a management room and the user is not logged in.
|
||||
welcome_unconnected: "Use `help` for help, or `login-token` or `login-password` to log in."
|
||||
# Optional extra text sent when joining a management room.
|
||||
additional_help: ""
|
||||
backfill:
|
||||
# Allow backfilling at all? Requires MSC2716 support on homeserver.
|
||||
enable: true
|
||||
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Slack.
|
||||
# Set to -1 to let any chat be unread.
|
||||
unread_hours_threshold: 720
|
||||
# Number of messages to immediately backfill when creating a portal.
|
||||
immediate_messages: 10
|
||||
# Settings for incremental backfill of history.
|
||||
incremental:
|
||||
# Maximum number of messages to backfill per batch.
|
||||
messages_per_batch: 100
|
||||
# The number of seconds to wait after backfilling the batch of messages.
|
||||
post_batch_delay: 20
|
||||
# The maximum number of messages to backfill per portal, split by the chat type.
|
||||
# If set to -1, all messages in the chat will eventually be backfilled.
|
||||
max_messages:
|
||||
# Channels
|
||||
channel: -1
|
||||
# Group direct messages
|
||||
group_dm: -1
|
||||
# 1:1 direct messages
|
||||
dm: -1
|
||||
|
||||
# End-to-bridge encryption support options.
|
||||
#
|
||||
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
|
||||
encryption:
|
||||
# Allow encryption, work in group chat rooms with e2ee enabled
|
||||
allow: true
|
||||
# Default to encryption, force-enable encryption in all portals the bridge creates
|
||||
# This will cause the bridge bot to be in private chats for the encryption to work properly.
|
||||
default: true
|
||||
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
|
||||
appservice: false
|
||||
# Require encryption, drop any unencrypted messages.
|
||||
require: false
|
||||
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
|
||||
# You must use a client that supports requesting keys from other users to use this feature.
|
||||
allow_key_sharing: true
|
||||
# What level of device verification should be required from users?
|
||||
#
|
||||
# Valid levels:
|
||||
# unverified - Send keys to all device in the room.
|
||||
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
|
||||
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
|
||||
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
|
||||
# Note that creating user signatures from the bridge bot is not currently possible.
|
||||
# verified - Require manual per-device verification
|
||||
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
|
||||
verification_levels:
|
||||
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
|
||||
receive: unverified
|
||||
# Minimum level that the bridge should accept for incoming Matrix messages.
|
||||
send: unverified
|
||||
# Minimum level that the bridge should require for accepting key requests.
|
||||
share: cross-signed-tofu
|
||||
# Options for Megolm room key rotation. These options allow you to
|
||||
# configure the m.room.encryption event content. See:
|
||||
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
|
||||
# more information about that event.
|
||||
rotation:
|
||||
# Enable custom Megolm room key rotation settings. Note that these
|
||||
# settings will only apply to rooms created after this option is
|
||||
# set.
|
||||
enable_custom: false
|
||||
# The maximum number of milliseconds a session should be used
|
||||
# before changing it. The Matrix spec recommends 604800000 (a week)
|
||||
# as the default.
|
||||
milliseconds: 604800000
|
||||
# The maximum number of messages that should be sent with a given a
|
||||
# session before changing it. The Matrix spec recommends 100 as the
|
||||
# default.
|
||||
messages: 100
|
||||
|
||||
# Settings for provisioning API
|
||||
provisioning:
|
||||
# Prefix for the provisioning API paths.
|
||||
prefix: /_matrix/provision
|
||||
# Shared secret for authentication. If set to "generate", a random secret will be generated,
|
||||
# or if set to "disable", the provisioning API will be disabled.
|
||||
shared_secret: disable
|
||||
|
||||
# Permissions for using the bridge.
|
||||
# Permitted values:
|
||||
# relay - Talk through the relaybot (if enabled), no access otherwise
|
||||
# user - Access to use the bridge to chat with a Slack account.
|
||||
# admin - User level and some additional administration tools
|
||||
# Permitted keys:
|
||||
# * - All Matrix users
|
||||
# domain - All users on that homeserver
|
||||
# mxid - Specific user
|
||||
permissions:
|
||||
"*": relay
|
||||
"{{ matrix.baseurl }}": user
|
||||
"@tobi:{{ matrix.baseurl }}": admin
|
||||
|
||||
{% raw %}
|
||||
logging:
|
||||
directory: ./logs
|
||||
file_name_format: '{{.Date}}-{{.Index}}.log'
|
||||
file_date_format: "2006-01-02"
|
||||
file_mode: 384
|
||||
timestamp_format: Jan _2, 2006 15:04:05
|
||||
print_level: debug
|
||||
print_json: false
|
||||
file_json: false
|
||||
{% endraw %}
|
@ -0,0 +1,26 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
63643764313434366534636536373233613163353932353332353034386638623463323265356366
|
||||
3033666637643563393537636263366338643736303663620a376138656235653238386131623864
|
||||
33356331386265613436626337356436373439376434633135626339373931346166313834323938
|
||||
3833636339306137360a383230386236333632613037363139356230663563333266353030616133
|
||||
39343037343234386465646433613465646363343237346432373934623431336163303233323263
|
||||
65356133373264323664663238306266336332353632643533373038653938623939353931613964
|
||||
33383638653061313961363033343435316130666337393034356664653933626466623734643239
|
||||
63663864316464343631313533653931376561303830366665333635613666346139623937373663
|
||||
65393234326533623364626666353763396437386330386563333432306566316161626561363836
|
||||
62613630623864323163616639396233393031373734373332383064626562623563363266383065
|
||||
61613738323034313431333333656530346566333165363430333962373930363736396265636663
|
||||
65646632356265633665633930343231636138366364653038336563333234326139333437643063
|
||||
39653437303565343739306237653832616265323138643234313731343339353161333363366538
|
||||
35373864666436306438303037363766373532633533666335303137346337633265613630653637
|
||||
39356237663665333533363030653735333535653861353866363362343830366562383661666137
|
||||
37623436336531363230356233656235666238663537616437353636353732643639386534616561
|
||||
30656264316535636437653032343634643036363838626234303837393935393430323537643231
|
||||
64363534313033396362326530663430373661613362346364356262386433663731313866363438
|
||||
30653966343436656430326434646337386230333432383861333635326431346332663332313437
|
||||
35636162323834616437383563353932333137653639616532363162663365393437386333613439
|
||||
35343937333034303934623962653132323837643430303230383163393833316233636233643736
|
||||
33666530653033613762313364653734633765326432613032386535333335633834633430356165
|
||||
64396132386133326464376163326236373131316266343634306163313235616236383239366639
|
||||
38373235643763616236356266663534356230643131653130323338393262616337346635633835
|
||||
39386236643562653738383037376334303138623966316637386464386139613431
|
593
coreos-config/plays/services/matrix/mautrix-telegram/config.yaml
Normal file
593
coreos-config/plays/services/matrix/mautrix-telegram/config.yaml
Normal file
@ -0,0 +1,593 @@
|
||||
# Homeserver details
|
||||
homeserver:
|
||||
# The address that this appservice can use to connect to the homeserver.
|
||||
address: https://synapse.{{ matrix.baseurl }}
|
||||
# The domain of the homeserver (for MXIDs, etc).
|
||||
domain: {{ matrix.baseurl }}
|
||||
# Whether or not to verify the SSL certificate of the homeserver.
|
||||
# Only applies if address starts with https://
|
||||
verify_ssl: true
|
||||
# What software is the homeserver running?
|
||||
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
|
||||
software: standard
|
||||
# Number of retries for all HTTP requests if the homeserver isn't reachable.
|
||||
http_retry_count: 4
|
||||
# The URL to push real-time bridge status to.
|
||||
# If set, the bridge will make POST requests to this URL whenever a user's Telegram connection state changes.
|
||||
# The bridge will use the appservice as_token to authorize requests.
|
||||
status_endpoint: null
|
||||
# Endpoint for reporting per-message status.
|
||||
message_send_checkpoint_endpoint: null
|
||||
# Whether asynchronous uploads via MSC2246 should be enabled for media.
|
||||
# Requires a media repo that supports MSC2246.
|
||||
async_media: false
|
||||
# Application service host/registration related details
|
||||
# Changing these values requires regeneration of the registration.
|
||||
appservice:
|
||||
# The address that the homeserver can use to connect to this appservice.
|
||||
address: http://mautrix-telegram:29317
|
||||
# When using https:// the TLS certificate and key files for the address.
|
||||
tls_cert: false
|
||||
tls_key: false
|
||||
# The hostname and port where this appservice should listen.
|
||||
hostname: 0.0.0.0
|
||||
port: 29317
|
||||
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
|
||||
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
|
||||
max_body_size: 1
|
||||
# The full URI to the database. SQLite and Postgres are supported.
|
||||
# Format examples:
|
||||
# SQLite: sqlite:///filename.db
|
||||
# Postgres: postgres://username:password@hostname/dbname
|
||||
database: postgres://{{ matrix.bridge.tg.dbuser }}:{{ matrix.bridge.tg.dbpass }}@db-bridge-tg/{{ matrix.bridge.tg.dbname }}
|
||||
# Additional arguments for asyncpg.create_pool() or sqlite3.connect()
|
||||
# https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.pool.create_pool
|
||||
# https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
|
||||
# For sqlite, min_size is used as the connection thread pool size and max_size is ignored.
|
||||
# Additionally, SQLite supports init_commands as an array of SQL queries to run on connect (e.g. to set PRAGMAs).
|
||||
database_opts:
|
||||
min_size: 1
|
||||
max_size: 10
|
||||
# Public part of web server for out-of-Matrix interaction with the bridge.
|
||||
# Used for things like login if the user wants to make sure the 2FA password isn't stored in
|
||||
# the HS database.
|
||||
public:
|
||||
# Whether or not the public-facing endpoints should be enabled.
|
||||
enabled: false
|
||||
# The prefix to use in the public-facing endpoints.
|
||||
prefix: /public
|
||||
# The base URL where the public-facing endpoints are available. The prefix is not added
|
||||
# implicitly.
|
||||
external: https://example.com/public
|
||||
# Provisioning API part of the web server for automated portal creation and fetching information.
|
||||
# Used by things like mautrix-manager (https://github.com/tulir/mautrix-manager).
|
||||
provisioning:
|
||||
# Whether or not the provisioning API should be enabled.
|
||||
enabled: false
|
||||
# The prefix to use in the provisioning API endpoints.
|
||||
prefix: /_matrix/provision
|
||||
# The shared secret to authorize users of the API.
|
||||
# Set to "generate" to generate and save a new token.
|
||||
shared_secret: generate
|
||||
# The unique ID of this appservice.
|
||||
id: telegram
|
||||
# Username of the appservice bot.
|
||||
bot_username: telegrambot
|
||||
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
|
||||
# to leave display name/avatar as-is.
|
||||
bot_displayname: Telegram bridge bot
|
||||
bot_avatar: mxc://maunium.net/tJCRmUyJDsgRNgqhOgoiHWbX
|
||||
# Whether or not to receive ephemeral events via appservice transactions.
|
||||
# Requires MSC2409 support (i.e. Synapse 1.22+).
|
||||
# You should disable bridge -> sync_with_custom_puppets when this is enabled.
|
||||
ephemeral_events: true
|
||||
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
|
||||
as_token: "{{ matrix.bridge.tg.as_token }}"
|
||||
hs_token: "{{ matrix.bridge.tg.hs_token }}"
|
||||
# Prometheus telemetry config. Requires prometheus-client to be installed.
|
||||
metrics:
|
||||
enabled: false
|
||||
listen_port: 8000
|
||||
# Manhole config.
|
||||
manhole:
|
||||
# Whether or not opening the manhole is allowed.
|
||||
enabled: false
|
||||
# The path for the unix socket.
|
||||
path: /var/tmp/mautrix-telegram.manhole
|
||||
# The list of UIDs who can be added to the whitelist.
|
||||
# If empty, any UIDs can be specified in the open-manhole command.
|
||||
whitelist:
|
||||
- 0
|
||||
# Bridge config
|
||||
bridge:
|
||||
# Localpart template of MXIDs for Telegram users.
|
||||
# {userid} is replaced with the user ID of the Telegram user.
|
||||
username_template: "telegram_{userid}"
|
||||
# Localpart template of room aliases for Telegram portal rooms.
|
||||
# {groupname} is replaced with the name part of the public channel/group invite link ( https://t.me/{} )
|
||||
alias_template: "telegram_{groupname}"
|
||||
# Displayname template for Telegram users.
|
||||
# {displayname} is replaced with the display name of the Telegram user.
|
||||
displayname_template: "{displayname} (Telegram)"
|
||||
# Set the preferred order of user identifiers which to use in the Matrix puppet display name.
|
||||
# In the (hopefully unlikely) scenario that none of the given keys are found, the numeric user
|
||||
# ID is used.
|
||||
#
|
||||
# If the bridge is working properly, a phone number or an username should always be known, but
|
||||
# the other one can very well be empty.
|
||||
#
|
||||
# Valid keys:
|
||||
# "full name" (First and/or last name)
|
||||
# "full name reversed" (Last and/or first name)
|
||||
# "first name"
|
||||
# "last name"
|
||||
# "username"
|
||||
# "phone number"
|
||||
displayname_preference:
|
||||
- full name
|
||||
- username
|
||||
- phone number
|
||||
# Maximum length of displayname
|
||||
displayname_max_length: 100
|
||||
# Remove avatars from Telegram ghost users when removed on Telegram. This is disabled by default
|
||||
# as there's no way to determine whether an avatar is removed or just hidden from some users. If
|
||||
# you're on a single-user instance, this should be safe to enable.
|
||||
allow_avatar_remove: false
|
||||
# Maximum number of members to sync per portal when starting up. Other members will be
|
||||
# synced when they send messages. The maximum is 10000, after which the Telegram server
|
||||
# will not send any more members.
|
||||
# -1 means no limit (which means it's limited to 10000 by the server)
|
||||
max_initial_member_sync: 100
|
||||
# Maximum number of participants in chats to bridge. Only applies when the portal is being created.
|
||||
# If there are more members when trying to create a room, the room creation will be cancelled.
|
||||
# -1 means no limit (which means all chats can be bridged)
|
||||
max_member_count: -1
|
||||
# Whether or not to sync the member list in channels.
|
||||
# If no channel admins have logged into the bridge, the bridge won't be able to sync the member
|
||||
# list regardless of this setting.
|
||||
sync_channel_members: true
|
||||
# Whether or not to skip deleted members when syncing members.
|
||||
skip_deleted_members: true
|
||||
# Whether or not to automatically synchronize contacts and chats of Matrix users logged into
|
||||
# their Telegram account at startup.
|
||||
startup_sync: true
|
||||
# Number of most recently active dialogs to check when syncing chats.
|
||||
# Set to 0 to remove limit.
|
||||
sync_update_limit: 0
|
||||
# Number of most recently active dialogs to create portals for when syncing chats.
|
||||
# Set to 0 to remove limit.
|
||||
sync_create_limit: 15
|
||||
# Should all chats be scheduled to be created later?
|
||||
# This is best used in combination with MSC2716 infinite backfill.
|
||||
sync_deferred_create_all: false
|
||||
# Whether or not to sync and create portals for direct chats at startup.
|
||||
sync_direct_chats: true
|
||||
# The maximum number of simultaneous Telegram deletions to handle.
|
||||
# A large number of simultaneous redactions could put strain on your homeserver.
|
||||
max_telegram_delete: 10
|
||||
# Whether or not to automatically sync the Matrix room state (mostly unpuppeted displaynames)
|
||||
# at startup and when creating a bridge.
|
||||
sync_matrix_state: true
|
||||
# Allow logging in within Matrix. If false, users can only log in using login-qr or the
|
||||
# out-of-Matrix login website (see appservice.public config section)
|
||||
allow_matrix_login: true
|
||||
# Whether or not to make portals of publicly joinable channels/supergroups publicly joinable on Matrix.
|
||||
public_portals: false
|
||||
# Whether or not to use /sync to get presence, read receipts and typing notifications
|
||||
# when double puppeting is enabled
|
||||
sync_with_custom_puppets: false
|
||||
# Whether or not to update the m.direct account data event when double puppeting is enabled.
|
||||
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
|
||||
# and is therefore prone to race conditions.
|
||||
sync_direct_chat_list: false
|
||||
# Servers to always allow double puppeting from
|
||||
double_puppet_server_map:
|
||||
{{ matrix.baseurl }}: https://{{ matrix.baseurl }}
|
||||
# Allow using double puppeting from any server with a valid client .well-known file.
|
||||
double_puppet_allow_discovery: false
|
||||
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
|
||||
#
|
||||
# If set, custom puppets will be enabled automatically for local users
|
||||
# instead of users having to find an access token and run `login-matrix`
|
||||
# manually.
|
||||
# If using this for other servers than the bridge's server,
|
||||
# you must also set the URL in the double_puppet_server_map.
|
||||
login_shared_secret_map:
|
||||
{{ matrix.baseurl }}: {{ matrix.authenticator.shared_secret }}
|
||||
# Set to false to disable link previews in messages sent to Telegram.
|
||||
telegram_link_preview: true
|
||||
# Whether or not the !tg join command should do a HTTP request
|
||||
# to resolve redirects in invite links.
|
||||
invite_link_resolve: false
|
||||
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
|
||||
# This is currently not supported in most clients.
|
||||
caption_in_message: false
|
||||
# Maximum size of image in megabytes before sending to Telegram as a document.
|
||||
image_as_file_size: 10
|
||||
# Maximum number of pixels in an image before sending to Telegram as a document. Defaults to 4096x4096 = 16777216.
|
||||
image_as_file_pixels: 16777216
|
||||
# Enable experimental parallel file transfer, which makes uploads/downloads much faster by
|
||||
# streaming from/to Matrix and using many connections for Telegram.
|
||||
# Note that generating HQ thumbnails for videos is not possible with streamed transfers.
|
||||
# This option uses internal Telethon implementation details and may break with minor updates.
|
||||
parallel_file_transfer: false
|
||||
# Whether or not created rooms should have federation enabled.
|
||||
# If false, created portal rooms will never be federated.
|
||||
federate_rooms: false
|
||||
# Should the bridge send all unicode reactions as custom emoji reactions to Telegram?
|
||||
# By default, the bridge only uses custom emojis for unicode emojis that aren't allowed in reactions.
|
||||
always_custom_emoji_reaction: true
|
||||
# Settings for converting animated stickers.
|
||||
animated_sticker:
|
||||
# Format to which animated stickers should be converted.
|
||||
# disable - No conversion, send as-is (gzipped lottie)
|
||||
# png - converts to non-animated png (fastest),
|
||||
# gif - converts to animated gif
|
||||
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
|
||||
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
|
||||
target: gif
|
||||
# Should video stickers be converted to the specified format as well?
|
||||
convert_from_webm: false
|
||||
# Arguments for converter. All converters take width and height.
|
||||
args:
|
||||
width: 256
|
||||
height: 256
|
||||
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
|
||||
# Settings for converting animated emoji.
|
||||
# Same as animated_sticker, but webm is not supported as the target
|
||||
# (because inline images can only contain images, not videos).
|
||||
animated_emoji:
|
||||
target: webp
|
||||
args:
|
||||
width: 64
|
||||
height: 64
|
||||
fps: 25
|
||||
# End-to-bridge encryption support options.
|
||||
#
|
||||
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
|
||||
encryption:
|
||||
# Allow encryption, work in group chat rooms with e2ee enabled
|
||||
allow: true
|
||||
# Default to encryption, force-enable encryption in all portals the bridge creates
|
||||
# This will cause the bridge bot to be in private chats for the encryption to work properly.
|
||||
default: true
|
||||
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
|
||||
appservice: false
|
||||
# Require encryption, drop any unencrypted messages.
|
||||
require: false
|
||||
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
|
||||
# You must use a client that supports requesting keys from other users to use this feature.
|
||||
allow_key_sharing: true
|
||||
# What level of device verification should be required from users?
|
||||
#
|
||||
# Valid levels:
|
||||
# unverified - Send keys to all device in the room.
|
||||
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
|
||||
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
|
||||
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
|
||||
# Note that creating user signatures from the bridge bot is not currently possible.
|
||||
# verified - Require manual per-device verification
|
||||
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
|
||||
verification_levels:
|
||||
# Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix.
|
||||
receive: unverified
|
||||
# Minimum level that the bridge should accept for incoming Matrix messages.
|
||||
send: unverified
|
||||
# Minimum level that the bridge should require for accepting key requests.
|
||||
share: cross-signed-tofu
|
||||
# Options for Megolm room key rotation. These options allow you to
|
||||
# configure the m.room.encryption event content. See:
|
||||
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
|
||||
# more information about that event.
|
||||
rotation:
|
||||
# Enable custom Megolm room key rotation settings. Note that these
|
||||
# settings will only apply to rooms created after this option is
|
||||
# set.
|
||||
enable_custom: false
|
||||
# The maximum number of milliseconds a session should be used
|
||||
# before changing it. The Matrix spec recommends 604800000 (a week)
|
||||
# as the default.
|
||||
milliseconds: 604800000
|
||||
# The maximum number of messages that should be sent with a given a
|
||||
# session before changing it. The Matrix spec recommends 100 as the
|
||||
# default.
|
||||
messages: 100
|
||||
# Whether or not to explicitly set the avatar and room name for private
|
||||
# chat portal rooms. This will be implicitly enabled if encryption.default is true.
|
||||
private_chat_portal_meta: false
|
||||
# Whether or not the bridge should send a read receipt from the bridge bot when a message has
|
||||
# been sent to Telegram.
|
||||
delivery_receipts: false
|
||||
# Whether or not delivery errors should be reported as messages in the Matrix room.
|
||||
delivery_error_reports: true
|
||||
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
|
||||
message_status_events: false
|
||||
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
|
||||
# This field will automatically be changed back to false after it,
|
||||
# except if the config file is not writable.
|
||||
resend_bridge_info: false
|
||||
# When using double puppeting, should muted chats be muted in Matrix?
|
||||
mute_bridging: false
|
||||
# When using double puppeting, should pinned chats be moved to a specific tag in Matrix?
|
||||
# The favorites tag is `m.favourite`.
|
||||
pinned_tag: "m.favorite"
|
||||
# Same as above for archived chats, the low priority tag is `m.lowpriority`.
|
||||
archive_tag: "m.lowpriority"
|
||||
# Whether or not mute status and tags should only be bridged when the portal room is created.
|
||||
tag_only_on_create: true
|
||||
# Should leaving the room on Matrix make the user leave on Telegram?
|
||||
bridge_matrix_leave: true
|
||||
# Should the user be kicked out of all portals when logging out of the bridge?
|
||||
kick_on_logout: true
|
||||
# Should the "* user joined Telegram" notice always be marked as read automatically?
|
||||
always_read_joined_telegram_notice: true
|
||||
# Should the bridge auto-create a group chat on Telegram when a ghost is invited to a room?
|
||||
# Requires the user to have sufficient power level and double puppeting enabled.
|
||||
create_group_on_invite: true
|
||||
# Settings for backfilling messages from Telegram.
|
||||
backfill:
|
||||
# Allow backfilling at all?
|
||||
enable: true
|
||||
# Use MSC2716 for backfilling?
|
||||
#
|
||||
# This requires a server with MSC2716 support, which is currently an experimental feature in Synapse.
|
||||
# It can be enabled by setting experimental_features -> msc2716_enabled to true in homeserver.yaml.
|
||||
msc2716: false
|
||||
# Use double puppets for backfilling?
|
||||
#
|
||||
# If using MSC2716, the double puppets must be in the appservice's user ID namespace
|
||||
# (because the bridge can't use the double puppet access token with batch sending).
|
||||
#
|
||||
# Even without MSC2716, bridging old messages with correct timestamps requires the double
|
||||
# puppets to be in an appservice namespace, or the server to be modified to allow
|
||||
# overriding timestamps anyway.
|
||||
double_puppet_backfill: false
|
||||
# Whether or not to enable backfilling in normal groups.
|
||||
# Normal groups have numerous technical problems in Telegram, and backfilling normal groups
|
||||
# will likely cause problems if there are multiple Matrix users in the group.
|
||||
normal_groups: false
|
||||
# If a backfilled chat is older than this number of hours, mark it as read even if it's unread on Telegram.
|
||||
# Set to -1 to let any chat be unread.
|
||||
unread_hours_threshold: 720
|
||||
# Forward backfilling limits. These apply to both MSC2716 and legacy backfill.
|
||||
#
|
||||
# Using a negative initial limit is not recommended, as it would try to backfill everything in a single batch.
|
||||
# MSC2716 and the incremental settings are meant for backfilling everything incrementally rather than at once.
|
||||
forward:
|
||||
# Number of messages to backfill immediately after creating a portal.
|
||||
initial_limit: 10
|
||||
# Number of messages to backfill when syncing chats.
|
||||
sync_limit: 100
|
||||
# Settings for incremental backfill of history. These only apply when using MSC2716.
|
||||
incremental:
|
||||
# Maximum number of messages to backfill per batch.
|
||||
messages_per_batch: 100
|
||||
# The number of seconds to wait after backfilling the batch of messages.
|
||||
post_batch_delay: 20
|
||||
# The maximum number of batches to backfill per portal, split by the chat type.
|
||||
# If set to -1, all messages in the chat will eventually be backfilled.
|
||||
max_batches:
|
||||
# Direct chats
|
||||
user: -1
|
||||
# Normal groups. Note that the normal_groups option above must be enabled
|
||||
# for these to be backfilled.
|
||||
normal_group: -1
|
||||
# Supergroups
|
||||
supergroup: 10
|
||||
# Broadcast channels
|
||||
channel: -1
|
||||
# Overrides for base power levels.
|
||||
initial_power_level_overrides:
|
||||
user: {}
|
||||
group: {}
|
||||
# Whether to bridge Telegram bot messages as m.notices or m.texts.
|
||||
bot_messages_as_notices: true
|
||||
bridge_notices:
|
||||
# Whether or not Matrix bot messages (type m.notice) should be bridged.
|
||||
default: false
|
||||
# List of user IDs for whom the previous flag is flipped.
|
||||
# e.g. if bridge_notices.default is false, notices from other users will not be bridged, but
|
||||
# notices from users listed here will be bridged.
|
||||
exceptions: []
|
||||
# An array of possible values for the $distinguisher variable in message formats.
|
||||
# Each user gets one of the values here, based on a hash of their user ID.
|
||||
# If the array is empty, the $distinguisher variable will also be empty.
|
||||
relay_user_distinguishers: ["\U0001F7E6", "\U0001F7E3", "\U0001F7E9", "⭕️", "\U0001F536", "⬛️", "\U0001F535", "\U0001F7E2"]
|
||||
# The formats to use when sending messages to Telegram via the relay bot.
|
||||
# Text msgtypes (m.text, m.notice and m.emote) support HTML, media msgtypes don't.
|
||||
#
|
||||
# Available variables:
|
||||
# $sender_displayname - The display name of the sender (e.g. Example User)
|
||||
# $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser)
|
||||
# $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com)
|
||||
# $distinguisher - A random string from the options in the relay_user_distinguishers array.
|
||||
# $message - The message content
|
||||
message_formats:
|
||||
m.text: "$distinguisher <b>$sender_displayname</b>: $message"
|
||||
m.notice: "$distinguisher <b>$sender_displayname</b>: $message"
|
||||
m.emote: "* $distinguisher <b>$sender_displayname</b> $message"
|
||||
m.file: "$distinguisher <b>$sender_displayname</b> sent a file: $message"
|
||||
m.image: "$distinguisher <b>$sender_displayname</b> sent an image: $message"
|
||||
m.audio: "$distinguisher <b>$sender_displayname</b> sent an audio file: $message"
|
||||
m.video: "$distinguisher <b>$sender_displayname</b> sent a video: $message"
|
||||
m.location: "$distinguisher <b>$sender_displayname</b> sent a location: $message"
|
||||
# Telegram doesn't have built-in emotes, this field specifies how m.emote's from authenticated
|
||||
# users are sent to telegram. All fields in message_formats are supported. Additionally, the
|
||||
# Telegram user info is available in the following variables:
|
||||
# $displayname - Telegram displayname
|
||||
# $username - Telegram username (may not exist)
|
||||
# $mention - Telegram @username or displayname mention (depending on which exists)
|
||||
emote_format: "* $mention $formatted_body"
|
||||
# The formats to use when sending state events to Telegram via the relay bot.
|
||||
#
|
||||
# Variables from `message_formats` that have the `sender_` prefix are available without the prefix.
|
||||
# In name_change events, `$prev_displayname` is the previous displayname.
|
||||
#
|
||||
# Set format to an empty string to disable the messages for that event.
|
||||
state_event_formats:
|
||||
join: "$distinguisher <b>$displayname</b> joined the room."
|
||||
leave: "$distinguisher <b>$displayname</b> left the room."
|
||||
name_change: "$distinguisher <b>$prev_displayname</b> changed their name to $distinguisher <b>$displayname</b>"
|
||||
# Filter rooms that can/can't be bridged. Can also be managed using the `filter` and
|
||||
# `filter-mode` management commands.
|
||||
#
|
||||
# Filters do not affect direct chats.
|
||||
# An empty blacklist will essentially disable the filter.
|
||||
filter:
|
||||
# Filter mode to use. Either "blacklist" or "whitelist".
|
||||
# If the mode is "blacklist", the listed chats will never be bridged.
|
||||
# If the mode is "whitelist", only the listed chats can be bridged.
|
||||
mode: blacklist
|
||||
# The list of group/channel IDs to filter.
|
||||
list: []
|
||||
# The prefix for commands. Only required in non-management rooms.
|
||||
command_prefix: "!tg"
|
||||
# Messages sent upon joining a management room.
|
||||
# Markdown is supported. The defaults are listed below.
|
||||
management_room_text:
|
||||
# Sent when joining a room.
|
||||
welcome: "Hello, I'm a Telegram bridge bot."
|
||||
# Sent when joining a management room and the user is already logged in.
|
||||
welcome_connected: "Use `help` for help."
|
||||
# Sent when joining a management room and the user is not logged in.
|
||||
welcome_unconnected: "Use `help` for help or `login` to log in."
|
||||
# Optional extra text sent when joining a management room.
|
||||
additional_help: ""
|
||||
# Send each message separately (for readability in some clients)
|
||||
management_room_multiple_messages: false
|
||||
# Permissions for using the bridge.
|
||||
# Permitted values:
|
||||
# relaybot - Only use the bridge via the relaybot, no access to commands.
|
||||
# user - Relaybot level + access to commands to create bridges.
|
||||
# puppeting - User level + logging in with a Telegram account.
|
||||
# full - Full access to use the bridge, i.e. previous levels + Matrix login.
|
||||
# admin - Full access to use the bridge and some extra administration commands.
|
||||
# Permitted keys:
|
||||
# * - All Matrix users
|
||||
# domain - All users on that homeserver
|
||||
# mxid - Specific user
|
||||
permissions:
|
||||
"*": "relaybot"
|
||||
"{{ matrix.baseurl }}": "full"
|
||||
"@tobi:{{ matrix.baseurl }}": "admin"
|
||||
# Options related to the message relay Telegram bot.
|
||||
relaybot:
|
||||
private_chat:
|
||||
# List of users to invite to the portal when someone starts a private chat with the bot.
|
||||
# If empty, private chats with the bot won't create a portal.
|
||||
invite: []
|
||||
# Whether or not to bridge state change messages in relaybot private chats.
|
||||
state_changes: true
|
||||
# When private_chat_invite is empty, this message is sent to users /starting the
|
||||
# relaybot. Telegram's "markdown" is supported.
|
||||
message: This is a Matrix bridge relaybot and does not support direct chats
|
||||
# List of users to invite to all group chat portals created by the bridge.
|
||||
group_chat_invite: []
|
||||
# Whether or not the relaybot should not bridge events in unbridged group chats.
|
||||
# If false, portals will be created when the relaybot receives messages, just like normal
|
||||
# users. This behavior is usually not desirable, as it interferes with manually bridging
|
||||
# the chat to another room.
|
||||
ignore_unbridged_group_chat: true
|
||||
# Whether or not to allow creating portals from Telegram.
|
||||
authless_portals: true
|
||||
# Whether or not to allow Telegram group admins to use the bot commands.
|
||||
whitelist_group_admins: true
|
||||
# Whether or not to ignore incoming events sent by the relay bot.
|
||||
ignore_own_incoming_events: true
|
||||
# List of usernames/user IDs who are also allowed to use the bot commands.
|
||||
whitelist:
|
||||
- myusername
|
||||
- 12345678
|
||||
# Telegram config
|
||||
telegram:
|
||||
# Get your own API keys at https://my.telegram.org/apps
|
||||
api_id: {{ matrix.bridge.tg.api_id }}
|
||||
api_hash: {{ matrix.bridge.tg.api_hash }}
|
||||
# (Optional) Create your own bot at https://t.me/BotFather
|
||||
bot_token: disabled
|
||||
# Should the bridge request missed updates from Telegram when restarting?
|
||||
catch_up: true
|
||||
# Should incoming updates be handled sequentially to make sure order is preserved on Matrix?
|
||||
sequential_updates: true
|
||||
exit_on_update_error: false
|
||||
# Telethon connection options.
|
||||
connection:
|
||||
# The timeout in seconds to be used when connecting.
|
||||
timeout: 120
|
||||
# How many times the reconnection should retry, either on the initial connection or when
|
||||
# Telegram disconnects us. May be set to a negative or null value for infinite retries, but
|
||||
# this is not recommended, since the program can get stuck in an infinite loop.
|
||||
retries: 5
|
||||
# The delay in seconds to sleep between automatic reconnections.
|
||||
retry_delay: 1
|
||||
# The threshold below which the library should automatically sleep on flood wait errors
|
||||
# (inclusive). For instance, if a FloodWaitError for 17s occurs and flood_sleep_threshold
|
||||
# is 20s, the library will sleep automatically. If the error was for 21s, it would raise
|
||||
# the error instead. Values larger than a day (86400) will be changed to a day.
|
||||
flood_sleep_threshold: 60
|
||||
# How many times a request should be retried. Request are retried when Telegram is having
|
||||
# internal issues, when there is a FloodWaitError less than flood_sleep_threshold, or when
|
||||
# there's a migrate error. May take a negative or null value for infinite retries, but this
|
||||
# is not recommended, since some requests can always trigger a call fail (such as searching
|
||||
# for messages).
|
||||
request_retries: 5
|
||||
# Device info sent to Telegram.
|
||||
device_info:
|
||||
# "auto" = OS name+version.
|
||||
device_model: mautrix-telegram
|
||||
# "auto" = Telethon version.
|
||||
system_version: auto
|
||||
# "auto" = mautrix-telegram version.
|
||||
app_version: auto
|
||||
lang_code: en
|
||||
system_lang_code: en
|
||||
# Custom server to connect to.
|
||||
server:
|
||||
# Set to true to use these server settings. If false, will automatically
|
||||
# use production server assigned by Telegram. Set to false in production.
|
||||
enabled: false
|
||||
# The DC ID to connect to.
|
||||
dc: 2
|
||||
# The IP to connect to.
|
||||
ip: 149.154.167.40
|
||||
# The port to connect to. 443 may not work, 80 is better and both are equally secure.
|
||||
port: 80
|
||||
# Telethon proxy configuration.
|
||||
# You must install PySocks from pip for proxies to work.
|
||||
proxy:
|
||||
# Allowed types: disabled, socks4, socks5, http, mtproxy
|
||||
type: disabled
|
||||
# Proxy IP address and port.
|
||||
address: 127.0.0.1
|
||||
port: 1080
|
||||
# Whether or not to perform DNS resolving remotely. Only for socks/http proxies.
|
||||
rdns: true
|
||||
# Proxy authentication (optional). Put MTProxy secret in password field.
|
||||
username: ""
|
||||
password: ""
|
||||
# Python logging configuration.
|
||||
#
|
||||
# See section 16.7.2 of the Python documentation for more info:
|
||||
# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema
|
||||
logging:
|
||||
version: 1
|
||||
formatters:
|
||||
colored:
|
||||
(): mautrix_telegram.util.ColorFormatter
|
||||
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
|
||||
normal:
|
||||
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: colored
|
||||
loggers:
|
||||
mau:
|
||||
level: DEBUG
|
||||
telethon:
|
||||
level: INFO
|
||||
aiohttp:
|
||||
level: INFO
|
||||
root:
|
||||
level: DEBUG
|
||||
handlers: [console]
|
@ -0,0 +1,31 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
31303639303562306630323132376333316332636534613834326662396237396634313233646364
|
||||
6335353833616135373439633136356339333737363437660a316634366334376339656466646437
|
||||
39323131363163393931356331306434613035626239356631303032646664303838386635613930
|
||||
6232663031663765370a653936623761313937383233313739313166353335346465363265613762
|
||||
35643335646637343534373966626632336363646231353732643831346563356464386133393166
|
||||
32613134656431656561316335656463653462656166373433386633666338633132663032633461
|
||||
66376265633233323662313930323737316166613262383434626264353462386236636139383835
|
||||
33613830316361373434623435376162653930616631323764653539306235363530326165353037
|
||||
32303432356630376363613839313831363537363735613833306163616130336631386337366234
|
||||
33373633306161653163333635366637313266346634656633376237346566663461353962376239
|
||||
34386237373565313362383532363931333337366336316363663734343333386663653466396139
|
||||
36633735356561346531376337346635383666376635346361333162376339333839306632666562
|
||||
63363761623136643031653030666437306361396232383738366533396561373932323563363566
|
||||
38306333393662333634613139643930626664666139363039333735363538396339373634356365
|
||||
66633637316432323762353964313237396338613834336532636164333564363839353061336636
|
||||
63316163626334353231386463313535313866336431613234353533636533343662653933393132
|
||||
37353065333431366662363530333863646131313737336538396332396238656239366531366337
|
||||
63633563636531616664313930626266323266613466656636636361653731623666636333666164
|
||||
39356535363939653232326633383837666262643834326137646363393935613132366663396364
|
||||
30666266366163316563613665356535633766626335343762333765643837373034646633336432
|
||||
64373366313962333563336535346436346536386633343366336535363236306338343832373763
|
||||
36663663353533383939323234333535316162303033313833616533373237613335303662393032
|
||||
66316163343938383330663133613333346535393264636264366533343938653730316163366363
|
||||
66373866316264656361613935383334323133636164366630333264343931663461333138656131
|
||||
31353631393336323166663765613461356437306234653263393030316564363431353566316531
|
||||
35336665633133386134656361323063303531336263643764353666636364343537363136666632
|
||||
66333033373766336230393131343434666536653061353032663264636565636361336138653931
|
||||
34303233613637633165303431626361623132363530666238386336383463656136383965343563
|
||||
63616131376239356163353464333864363164363666646435353038323565386536326639366565
|
||||
3134646366666134646665366533396466366233343666613761
|
@ -0,0 +1,122 @@
|
||||
# Configuration file for Synapse.
|
||||
#
|
||||
# This is a YAML file: see [1] for a quick introduction. Note in particular
|
||||
# that *indentation is important*: all the elements of a list or dictionary
|
||||
# should have the same indentation.
|
||||
#
|
||||
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
|
||||
#
|
||||
# For more information on how to configure Synapse, including a complete accounting of
|
||||
# each option, go to docs/usage/configuration/config_documentation.md or
|
||||
# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html
|
||||
server_name: "{{ matrix.baseurl }}"
|
||||
pid_file: /data/homeserver.pid
|
||||
enable_metrics: true
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
- port: 9091
|
||||
tls: false
|
||||
type: metrics
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
user: {{ matrix.db.user }}
|
||||
password: {{ matrix.db.password }}
|
||||
database: {{ matrix.db.database }}
|
||||
host: db
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
log_config: "/config/tobiasmanske.de.log.config"
|
||||
media_store_path: /data/media_store
|
||||
report_stats: true
|
||||
macaroon_secret_key: "{{ matrix.secrets.macaroon }}"
|
||||
form_secret: "{{ matrix.secrets.form }}"
|
||||
signing_key_path: "/config/tobiasmanske.de.signing.key"
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "KeyCloak"
|
||||
issuer: "{{ matrix.oidc.issuer }}"
|
||||
client_id: "{{ matrix.oidc.client_id }}"
|
||||
client_secret: "{{ matrix.oidc.client_secret }}"
|
||||
scopes: ["openid", "profile"]
|
||||
user_mapping_provider:
|
||||
config:
|
||||
{% raw %}
|
||||
localpart_template: "{{ user.mx_localpart }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
{% endraw %}
|
||||
backchannel_logout_enabled: true # Optional
|
||||
|
||||
enable_registration: true
|
||||
registration_requires_token: true
|
||||
registration_shared_secret: "{{ matrix.secrets.registration }}"
|
||||
password_config:
|
||||
enabled: true
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
host: redis
|
||||
port: 6379
|
||||
|
||||
app_service_config_files:
|
||||
- /data/reg-mautrix-tg.yaml
|
||||
- /data/reg-mautrix-slack.yaml
|
||||
- /data/reg-mautrix-signal.yaml
|
||||
|
||||
rc_message:
|
||||
per_second: 100
|
||||
burst_count: 100
|
||||
rc_joins:
|
||||
local:
|
||||
per_second: 100
|
||||
burst_count: 100
|
||||
server_notices:
|
||||
system_mxid_localpart: "server"
|
||||
system_mxid_display_name: "Server Notices"
|
||||
system_mxid_avatar_url: "mxc://unruhig.eu/khyOCChmyYSOsIFIbUWGGEWq"
|
||||
room_name: "Server Notices"
|
||||
|
||||
modules:
|
||||
- module: shared_secret_authenticator.SharedSecretAuthProvider
|
||||
config:
|
||||
shared_secret: "{{ matrix.authenticator.shared_secret }}"
|
||||
|
||||
# By default, only login requests of type `com.devture.shared_secret_auth` are supported.
|
||||
# Below, we explicitly enable support for the old `m.login.password` login type,
|
||||
# which was used in v1 of matrix-synapse-shared-secret-auth and still widely supported by external software.
|
||||
# If you don't need such legacy support, consider setting this to `false` or omitting it entirely.
|
||||
m_login_password_support_enabled: true
|
||||
|
||||
# By default, only login requests of type `com.devture.shared_secret_auth` are supported.
|
||||
# Advertising support for such an authentication type causes a problem with Element, however.
|
||||
# See: https://github.com/vector-im/element-web/issues/19605
|
||||
#
|
||||
# Uncomment the line below to disable `com.devture.shared_secret_auth` support.
|
||||
# You will then need to:
|
||||
# - have `m_login_password_support_enabled: true` to enable the `m.login.password` login type
|
||||
# - authenticate using `m.login.password` requests, instead of ``com.devture.shared_secret_auth` requests
|
||||
# com_devture_shared_secret_auth_support_enabled: false
|
||||
|
||||
media_storage_providers:
|
||||
- module: s3_storage_provider.S3StorageProviderBackend
|
||||
store_local: True
|
||||
store_remote: True
|
||||
store_synchronous: True
|
||||
config:
|
||||
bucket: "{{ matrix.storage.s3.bucket }}"
|
||||
# All of the below options are optional, for use with non-AWS S3-like
|
||||
# services, or to specify access tokens here instead of some external method.
|
||||
endpoint_url: "{{ matrix.storage.s3.endpoint_url }}"
|
||||
access_key_id: "{{ matrix.storage.s3.access_key_id }}"
|
||||
secret_access_key: "{{ matrix.storage.s3.secret_access_key }}"
|
||||
|
||||
|
||||
# vim:ft=yaml
|
@ -0,0 +1,32 @@
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
|
||||
handlers:
|
||||
|
||||
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
|
||||
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
|
||||
root:
|
||||
level: WARNING
|
||||
|
||||
|
||||
handlers: [console]
|
||||
|
||||
|
||||
disable_existing_loggers: false
|
@ -0,0 +1,8 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
64326434386632376335333966336365333663393130323464333266383639383264616662623333
|
||||
6437306539633766376336663263393038306162333234340a383237386331636366616266316265
|
||||
39626638623562623835633035643231656263653437346266333264643830323062353930356462
|
||||
3936633165633434320a656463656536383539346138383630343137383861613538323735393131
|
||||
61383237626533316433633866396434663230633239396661333831653531363732646561656164
|
||||
35353264613364613832653536333632356132666434616134316339383934616264323261366366
|
||||
633838383264646531663039343639383036
|
1
coreos-config/plays/services/maubot/.env
Normal file
1
coreos-config/plays/services/maubot/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=maubot
|
11
coreos-config/plays/services/maubot/docker-compose.yaml
Normal file
11
coreos-config/plays/services/maubot/docker-compose.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
services:
|
||||
maubot:
|
||||
image: dock.mau.dev/maubot/maubot:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "{{ maubot.port }}:29316"
|
||||
volumes:
|
||||
- data:/data:z
|
||||
|
||||
volumes:
|
||||
data:
|
1
coreos-config/plays/services/miniflux/.env
Normal file
1
coreos-config/plays/services/miniflux/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=miniflux
|
67
coreos-config/plays/services/miniflux/docker-compose.yaml
Normal file
67
coreos-config/plays/services/miniflux/docker-compose.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
version: '3'
|
||||
services:
|
||||
miniflux:
|
||||
image: miniflux/miniflux:latest
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- FETCH_YOUTUBE_WATCH_TIME=1
|
||||
- DATABASE_URL=postgres://{{ miniflux.db.user }}:{{ miniflux.db.password }}@db/{{ miniflux.db.name }}?sslmode=disable
|
||||
- RUN_MIGRATIONS=1
|
||||
- CREATE_ADMIN=1
|
||||
- ADMIN_USERNAME={{ miniflux.admin.user }}
|
||||
- ADMIN_PASSWORD={{ miniflux.admin.password }}
|
||||
- BASE_URL=https://rss.tobiasmanske.de
|
||||
- CLEANUP_ARCHIVE_READ_DAYS=-1
|
||||
- OAUTH2_CLIENT_ID={{ miniflux.oauth.client_id }}
|
||||
- OAUTH2_CLIENT_SECRET={{ miniflux.oauth.client_secret }}
|
||||
- OAUTH2_OIDC_DISCOVERY_ENDPOINT={{ miniflux.oauth.discovery_endpoint }}
|
||||
- OAUTH2_PROVIDER=oidc
|
||||
- OAUTH2_REDIRECT_URL={{ miniflux.oauth.redirect_url }}
|
||||
- METRICS_COLLECTOR=1
|
||||
- METRICS_ALLOWED_NETWORKS=0.0.0.0/0
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.miniflux.rule=Host(`rss.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.miniflux.entryPoints=websecure"
|
||||
- "traefik.http.routers.miniflux.middlewares=deny-metrics@file"
|
||||
- "traefik.http.services.miniflux.loadbalancer.server.port=8080"
|
||||
- "prometheus-scrape.enabled=true"
|
||||
- "prometheus-scrape.port=8080"
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
- pantalaimon
|
||||
|
||||
db:
|
||||
image: postgres:13
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- POSTGRES_USER={{ miniflux.db.user }}
|
||||
- POSTGRES_PASSWORD={{ miniflux.db.password }}
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.scope=update"
|
||||
volumes:
|
||||
- database:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD", "pg_isready", "-U", "miniflux"]
|
||||
interval: 10s
|
||||
start_period: 30s
|
||||
networks:
|
||||
- backend
|
||||
|
||||
volumes:
|
||||
database:
|
||||
|
||||
networks:
|
||||
backend:
|
||||
internal: true
|
||||
gateway:
|
||||
external: true
|
||||
name: gateway
|
||||
pantalaimon:
|
||||
external: true
|
||||
...
|
3
coreos-config/plays/services/minio/.env
Normal file
3
coreos-config/plays/services/minio/.env
Normal file
@ -0,0 +1,3 @@
|
||||
COMPOSE_PROJECT_NAME=minio
|
||||
MINIO_URL=s3.tobiasmanske.de
|
||||
DASHBOARD_URL=minio.tobiasmanske.de
|
42
coreos-config/plays/services/minio/docker-compose.yaml
Normal file
42
coreos-config/plays/services/minio/docker-compose.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:latest
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
environment:
|
||||
- "MINIO_ROOT_USER={{ minio.user | mandatory }}"
|
||||
- "MINIO_ROOT_PASSWORD={{ minio.password | mandatory }}"
|
||||
- "MINIO_SERVER_URL=https://${MINIO_URL}"
|
||||
- "MINIO_BROWSER_REDIRECT_URL=https://${DASHBOARD_URL}"
|
||||
volumes:
|
||||
- data:/data
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.minio.rule=Host(`${MINIO_URL}`)||Host(`s3.unruhig.eu`)"
|
||||
- "traefik.http.routers.minio.entryPoints=websecure"
|
||||
- "traefik.http.services.minio.loadbalancer.server.port=9000"
|
||||
- "traefik.http.routers.minio.service=minio"
|
||||
- "traefik.http.routers.minio-dashboard.rule=Host(`${DASHBOARD_URL}`)"
|
||||
- "traefik.http.routers.minio-dashboard.entryPoints=websecure"
|
||||
- "traefik.http.services.minio-dashboard.loadbalancer.server.port=9001"
|
||||
- "traefik.http.routers.minio-dashboard.service=minio-dashboard"
|
||||
command: "server /data --console-address ':9001' --anonymous"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
data:
|
||||
|
||||
...
|
1
coreos-config/plays/services/nextcloud/.env
Normal file
1
coreos-config/plays/services/nextcloud/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=nextcloud
|
114
coreos-config/plays/services/nextcloud/config.php
Normal file
114
coreos-config/plays/services/nextcloud/config.php
Normal file
@ -0,0 +1,114 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
33303039656232646334643237623337356630636666303664373362346436393332663238356636
|
||||
3861636233323335653863383236623739376363303263390a396232343333313631353832616633
|
||||
36356233383664356533383733396330303462633762623039363361343738336238386230376532
|
||||
3465343039663938630a613262333330393166353930326430316461653765623935633361383036
|
||||
61336631633365396532383662303532613265633664313533623239346438373664353734306433
|
||||
64303239303465383165636634313239623766303136393535363236663762373438303939396561
|
||||
37663335303666363066623936326531333463363730356534393066323333346665656363323338
|
||||
64326662303263353236396461663636303037633835303366353063393763666135623135366562
|
||||
61653136333238393864353436396430303965626439396430356536303331623134343361363839
|
||||
38366533653939663734303039356666663730363237633966363338656134313364343637666338
|
||||
32323264613935636336376339393064366666353562313133326435306235636633353665393237
|
||||
34366565646565656638313739633866333430326531623166373865306362343535626130343039
|
||||
65653661396637353031343466376135636165363962636230376666303262393235363964336639
|
||||
62303963343364643633623366643265653430323135626366653436663261303838306434663064
|
||||
66663262633166396137306131383135356439366364363030626135623162393665333134663565
|
||||
34356135623835333330613631663936383765643330643065626636623432356664313166363366
|
||||
63373731393032316331626632306666396264373038663238363461363366383738303137333534
|
||||
38326664353932613462653661383064366162613466666338663264333633643231343830333337
|
||||
63353035383332393639663662663139643436323561356161313032643438633462643339626465
|
||||
35376165396261336132383034393362643239633433626133323531353535306463383830343838
|
||||
63323731666165646665613132333563386438323639633333653637613035636536313334386366
|
||||
65656266376161386161313761333861386438343833326333666262386663623736326636616537
|
||||
63613764373666383237656134313061646432313236306665656537323666636630626439383162
|
||||
32383534346538323530343063313838326664653233623231653937336230383135613465653439
|
||||
38373463646138356464616638303839633337356430316632303030626365623032353761326538
|
||||
31643238356430386662353236303030313034653531343636353066306464363934656438663031
|
||||
34656330633664343063666562376662303836613066623935386533376633643164323262366138
|
||||
39633235353163316463343433653738343033333263383164353234353661653038373231353263
|
||||
39356363386338323737663139373961393439346239643166653866323830323462656130336563
|
||||
31643433343336653864653030343033666162306662393133303066346235663330343265353336
|
||||
35313664303164623461326631303430353136616138663766616237663632373233316535333231
|
||||
66636238313833336261386663653563346538636361626261653737633366373439613834346561
|
||||
34656437316565363939393761313962653138326130653163383032643465633463656433356133
|
||||
31373434663363663637393035613466643865623030633466326666613061316136663761326439
|
||||
30313632666362663938663233666539353731373232393263653062383036323336633061383030
|
||||
66393062386265303338306132313233636265316538333465303764373932633332613666303933
|
||||
63333064626238616363333866626631313764313532646436356562613834633138336539306664
|
||||
32316434373634623962323830643961316665343734323563316366656536353131653363383038
|
||||
64333430663635323136666231363637346532363930303033626433396333353335363865316665
|
||||
34323938316461383339396263393566356338646537623338333363353734316664323865663864
|
||||
32373930653963623733666261616366386165373039643665663664316662663761316536376231
|
||||
62323662343933326337663566643664353861646361663438323734643130323362666630313131
|
||||
64383865336130313136346332656462633732343466343631346233356461656137303561353932
|
||||
30333065653934333665323838323032376366376632666438306265356363656134366139333463
|
||||
62363135363065633030393032303035663934383536613230646563636236336166363532633139
|
||||
31313531303766356565323537333534633763663638303232656235316566633436643739663061
|
||||
31646638636335333431373633656563363533373838306163326664613663626432613739376561
|
||||
33303636346639313636616464396231383166303761323535616338626530363861656331303738
|
||||
31346438346431343166653231373538623064326430663863663039643536356333623263623630
|
||||
37323139396366613831623830313336303162643935623436643637353561383163633661303237
|
||||
61363436323262316633643036646634303537613939353564326530373138373330396365336530
|
||||
32626133663032383765386432626465306338373031373238653966333033393431393561386335
|
||||
66613163343366323238666636373532326261616462333139653163613636313662623835623434
|
||||
35383936306438333461313563346634393630396237613432316230353731313834323133376239
|
||||
30643965613031366530383564623361646331323235666339623530333962643830366563313463
|
||||
37396638346266666362393038373034313932353130633764343430366164656136383032363565
|
||||
33316632613031346663386636376561383237366261336436653463626363306663663231613035
|
||||
38653939643735316131393636373339383338363461663134613831396438623764623239636365
|
||||
33616466393365373636643135613763633035396666376166616433643062363463633436396364
|
||||
35633864633939303264343365316361653832393462613761323361656563343839613338653336
|
||||
66333664643036343361666162633035373435343931333666353866633263646530656163626234
|
||||
61336464663232323030613564383335323131656333636633633735306263303637643662376330
|
||||
39373733616264656661653836326238323461306133393565303930333662383335393563353965
|
||||
33643431623862313436663939616437313266343665356132393066373335656561643338633465
|
||||
63373630643361633730363066383731366665393235396130326130653837656633316636653864
|
||||
30613237393836383262613263393066663331393736356264663732623964336266656633333633
|
||||
32663737663866383235356661343839666230636338633366393935663537363331333366396331
|
||||
33663834646436303033396536353036326637383538396562613566353738303332373763333633
|
||||
63663432366631323431666230373035383837626333386331356530626533363434306661663038
|
||||
34376163343939386463313335623364653630316636623235653066313939303935613765303961
|
||||
36646631316334633762643037353034373338326631666130366462636461393830663238393437
|
||||
37623161613863656637333065343631333638663639633130633239326562656535303031636662
|
||||
39353134326265616665376432396633396566663138326334386530396534343965656334356265
|
||||
66383437313630306338306433316435623262653464383533313032646465616466623035353562
|
||||
32343935616266626466343837326434313536393365663864663765373530643037666237633166
|
||||
30303135343862386636313562373162626638363130306165346335646335363063326563353337
|
||||
62373339383130623837666634643665396566333566653463303866633035346136623465363665
|
||||
37323437623132316439333530373037636431636233326333656163643737663865333562386665
|
||||
38656338643436366135316566306438653231383839613732336139393562633065366237313634
|
||||
62646230323533623533396566303864323139633730303765326134393139303261326635386661
|
||||
30383933366438663130623632383639346164393461323963313933386535306364333133613035
|
||||
36626666326238633331653062636137363238613863633965373337343031383464663130643861
|
||||
38376532316464363630393065376438396631376139313332616430623561373362353962383530
|
||||
62653239653631316634613637663132396335636334353363363962346466326565366261653564
|
||||
37633139353731303064636438616138646463373261373438646464306365326633633437356532
|
||||
34626435336235373562663733376262663530343136633632623534356335363531356533613464
|
||||
65303361326430643233636466333061393434643630616435306430356532333739633864376139
|
||||
37666465623063646231393535313338363034626465393165333232313536313834616463626536
|
||||
34353462313139343233663662336138393636336338326132623665623734623132346164353164
|
||||
37383562613931363535356535353837333661336238323337306239613965623463613963613762
|
||||
61363634663233626263616333643462616263383932343933386437336665346237313131623562
|
||||
34303035336666383064653235316636313464343130356430666162393663396466393236666330
|
||||
36623432393662373330306431623638343565306433643263316333316233383938383230323832
|
||||
36336536393435636138373063306637393737346133343330636230666262363466333266656163
|
||||
34663430613335656162613037346539316131323135323834623337363862373065623939343461
|
||||
64623933633637313961353633653337366434386361623331336131613565633765616232346161
|
||||
31386663353130653332366463316166666261613238343962363533343564316663383664356237
|
||||
64663033633464633961656566303737383264663736313439336234376431653737333639663961
|
||||
62303665323531356431646338373164653766326166376534663436323962343533343131326565
|
||||
32653532356462646133666665326533393439386630646230383339396133643034653030353062
|
||||
34363738383463313061383231336364373366643530313231633164613732663165643439336366
|
||||
32653962326666356530346165356163336261633138626138636363646261646239323265383666
|
||||
39353165633965343764396431653165393363376265613063653039666663326338346330393263
|
||||
62653063613665633934326236653663393839653537623334366431616331613461616136303962
|
||||
35363936616635613739323161396331333330303165373936323330653532393561323232623837
|
||||
33626635383064386363363834653937383235366630343664386433373135643634643635306431
|
||||
38613630623331336463646338643832666363663732633238393636613466636637346135353335
|
||||
34663037623332306434666663613361313632356264306338313234643938386265376538646630
|
||||
33303962333631353935393632333531306432396338623065346162663461323035353333666536
|
||||
62626231636131353763633962326361303036343736663566383662383435303533613536346230
|
||||
62653562616630623065313137623735613162316465643666666534613137363135393230633138
|
||||
64303161346236373861353261393735663736643334393864303366373435376239353332356634
|
||||
64346362383438623166613565643661353039633834313132636162666638363733
|
60
coreos-config/plays/services/nextcloud/docker-compose.yaml
Normal file
60
coreos-config/plays/services/nextcloud/docker-compose.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
db:
|
||||
image: mariadb:latest
|
||||
restart: always
|
||||
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --innodb-file-per-table=1 --skip-innodb-read-only-compressed
|
||||
volumes:
|
||||
- database:/var/lib/mysql
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD={{ nextcloud.db.rootpassword }}
|
||||
- MYSQL_PASSWORD={{ nextcloud.db.password }}
|
||||
- MYSQL_DATABASE={{ nextcloud.db.name }}
|
||||
- MYSQL_USER={{ nextcloud.db.user }}
|
||||
networks:
|
||||
- backend
|
||||
|
||||
app:
|
||||
image: registry.tobiasmanske.de/nextcloud:main
|
||||
restart: always
|
||||
depends_on:
|
||||
- db
|
||||
volumes:
|
||||
- ./config.php:/config.php:ro,Z
|
||||
- nc_custom_apps:/var/www/html/custom_apps
|
||||
- nc_data:/var/www/data
|
||||
environment:
|
||||
- MYSQL_PASSWORD={{ nextcloud.db.password }}
|
||||
- MYSQL_DATABASE={{ nextcloud.db.name }}
|
||||
- MYSQL_USER={{ nextcloud.db.user }}
|
||||
- MYSQL_HOST=db
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
- default
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.nextcloud.rule=Host(`wolke.chaoswg.org`)"
|
||||
- "traefik.http.routers.nextcloud.entryPoints=websecure"
|
||||
- "traefik.http.services.nextcloud.loadbalancer.server.port=80"
|
||||
- "traefik.http.middlewares.nextcloud.headers.customrequestheaders.Front-End-Https=on"
|
||||
- "traefik.http.middlewares.nextcloud.headers.customresponseheaders.Strict-Transport-Security=max-age=15768000; includeSubDomains;"
|
||||
- "traefik.http.middlewares.nextcloud-dav.replacepathregex.regex=^/.well-known/ca(l|rd)dav"
|
||||
- "traefik.http.middlewares.nextcloud-dav.replacepathregex.replacement=/remote.php/dav/"
|
||||
- "traefik.http.middlewares.compression.compress=true"
|
||||
- "traefik.http.routers.nextcloud.middlewares=nextcloud,nextcloud-dav,compression"
|
||||
|
||||
volumes:
|
||||
nc_custom_apps:
|
||||
nc_data:
|
||||
database:
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
name: gateway
|
||||
backend:
|
||||
internal: true
|
||||
...
|
1
coreos-config/plays/services/pantalaimon/.env
Normal file
1
coreos-config/plays/services/pantalaimon/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=pentalaimon
|
23
coreos-config/plays/services/pantalaimon/docker-compose.yaml
Normal file
23
coreos-config/plays/services/pantalaimon/docker-compose.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
services:
|
||||
pantalaimon:
|
||||
image: matrixdotorg/pantalaimon:latest
|
||||
container_name: pantalaimon
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
- ALL
|
||||
volumes:
|
||||
- pantalaimon_data:/data
|
||||
- ./pantalaimon.conf:/data/pantalaimon.conf:ro,Z
|
||||
restart: always
|
||||
networks:
|
||||
- pantalaimon
|
||||
|
||||
volumes:
|
||||
pantalaimon_data:
|
||||
networks:
|
||||
pantalaimon:
|
||||
name: pantalaimon
|
||||
internal: false
|
||||
...
|
@ -0,0 +1,7 @@
|
||||
[unruhigeu]
|
||||
Homeserver = https://unruhig.eu
|
||||
ListenAddress = 0.0.0.0
|
||||
ListenPort = 8008
|
||||
SSL = True
|
||||
UseKeyring = False
|
||||
IgnoreVerification = True
|
1
coreos-config/plays/services/prometheus/.env
Normal file
1
coreos-config/plays/services/prometheus/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=prometheus
|
50
coreos-config/plays/services/prometheus/alertmanager.yml
Normal file
50
coreos-config/plays/services/prometheus/alertmanager.yml
Normal file
@ -0,0 +1,50 @@
|
||||
global:
|
||||
resolve_timeout: 5m
|
||||
|
||||
route:
|
||||
group_by: ['alertname']
|
||||
group_wait: 5s
|
||||
group_interval: 5m
|
||||
repeat_interval: 1h
|
||||
receiver: 'matrix-monitoring'
|
||||
routes:
|
||||
- receiver: 'hcio'
|
||||
repeat_interval: 1h
|
||||
matchers:
|
||||
- alertname="PrometheusAlertmanagerE2eDeadManSwitch"
|
||||
- receiver: 'email'
|
||||
group_interval: 1m
|
||||
matchers:
|
||||
- job="matrix_synapse_1"
|
||||
- receiver: 'matrix-monitoring'
|
||||
group_wait: 30s
|
||||
group_interval: 1h
|
||||
matchers:
|
||||
- alertname="PrometheusAllTargetsMissing"
|
||||
- receiver: 'matrix-monitoring'
|
||||
group_wait: 30s
|
||||
group_interval: 1h
|
||||
matchers:
|
||||
- alertname="PrometheusTargetMissing"
|
||||
|
||||
|
||||
receivers:
|
||||
- name: 'email'
|
||||
email_configs:
|
||||
- to: '{{ prometheus.alertmanager.smtp.target }}'
|
||||
from: '"Alertmanager" <{{ prometheus.alertmanager.smtp.username }}>'
|
||||
smarthost: 'mxe8cf.netcup.net:587'
|
||||
auth_username: '{{ prometheus.alertmanager.smtp.username }}'
|
||||
auth_identity: '{{ prometheus.alertmanager.smtp.username }}'
|
||||
auth_password: '{{ prometheus.alertmanager.smtp.password }}'
|
||||
- name: 'hcio'
|
||||
email_configs:
|
||||
- to: '{{ prometheus.alertmanager.hcio.mail }}'
|
||||
from: '"Alertmanager" <{{ prometheus.alertmanager.smtp.username }}>'
|
||||
smarthost: 'mxe8cf.netcup.net:587'
|
||||
auth_username: '{{ prometheus.alertmanager.smtp.username }}'
|
||||
auth_identity: '{{ prometheus.alertmanager.smtp.username }}'
|
||||
auth_password: '{{ prometheus.alertmanager.smtp.password }}'
|
||||
- name: 'matrix-monitoring'
|
||||
webhook_configs:
|
||||
- url: 'http://alertmanager-matrix:3000/alerts?secret={{ prometheus.alertmanager.matrix.alertmanager_token }}'
|
161
coreos-config/plays/services/prometheus/docker-compose.yaml
Normal file
161
coreos-config/plays/services/prometheus/docker-compose.yaml
Normal file
@ -0,0 +1,161 @@
|
||||
version: "3.4"
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--web.external-url=https://prometheus.tobiasmanske.de'
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro,Z
|
||||
- prom_data:/prometheus
|
||||
- label_discovery:/label_discovery:ro
|
||||
- ./rules:/rules:ro,Z
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.prometheus.rule=Host(`prometheus.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.prometheus.entryPoints=websecure"
|
||||
- "traefik.http.services.prometheus.loadbalancer.server.port=9090"
|
||||
- "traefik.http.routers.prometheus.middlewares=oauth@file"
|
||||
depends_on:
|
||||
- prometheus-docker-sd
|
||||
- cadvisor
|
||||
- node-exporter
|
||||
networks:
|
||||
- gateway
|
||||
- backend
|
||||
- alertmanager
|
||||
|
||||
prometheus-docker-sd:
|
||||
image: registry.tobiasmanske.de/prometheus-docker-sd:latest
|
||||
restart: unless-stopped
|
||||
privileged: true
|
||||
networks:
|
||||
- backend
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro,Z
|
||||
- label_discovery:/prometheus-docker-sd:rw
|
||||
logging: # this service generates a HUGE amout of logs.
|
||||
driver: "none"
|
||||
|
||||
alertmanager:
|
||||
image: prom/alertmanager:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.alertmanager.rule=Host(`alertmanager.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.alertmanager.entryPoints=websecure"
|
||||
- "traefik.http.services.alertmanager.loadbalancer.server.port=9093"
|
||||
- "traefik.http.routers.alertmanager.middlewares=oauth@file"
|
||||
volumes:
|
||||
- ./alertmanager.yml:/etc/alertmanager/config.yml:ro,Z
|
||||
- alertmanager_data:/data
|
||||
networks:
|
||||
- alertmanager
|
||||
- gateway
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- '--config.file=/etc/alertmanager/config.yml'
|
||||
- '--web.external-url=https://alertmanager.tobiasmanske.de'
|
||||
- '--storage.path=/data'
|
||||
|
||||
alertmanager-matrix:
|
||||
image: jaywink/matrix-alertmanager:latest
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.alertmanager-matrix.rule=Host(`alertmanager.tobiasmanske.de`) && PathPrefix(`/matrix/`)"
|
||||
- "traefik.http.routers.alertmanager-matrix.middlewares=matrix-strip"
|
||||
- "traefik.http.middlewares.matrix-strip.stripprefix.prefixes=/matrix"
|
||||
- "traefik.http.middlewares.matrix-strip.stripprefix.forceslash=false"
|
||||
- "traefik.http.routers.alertmanager-matrix.entryPoints=websecure"
|
||||
- "traefik.http.services.alertmanager-matrix.loadbalancer.server.port=3000"
|
||||
environment:
|
||||
- APP_PORT=3000
|
||||
- APP_ALERTMANAGER_SECRET={{ prometheus.alertmanager.matrix.alertmanager_token }}
|
||||
- MATRIX_HOMESERVER_URL=http://pantalaimon:8008
|
||||
- MATRIX_ROOMS={{ prometheus.alertmanager.matrix.rooms | join('|') }}
|
||||
- MATRIX_TOKEN={{ prometheus.alertmanager.matrix.matrix_token }}
|
||||
- MATRIX_USER=@alertmanager:{{ matrix.baseurl }}
|
||||
- MENTION_ROOM=1
|
||||
networks:
|
||||
- alertmanager
|
||||
- pantalaimon
|
||||
- gateway
|
||||
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.grafana.rule=Host(`grafana.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.grafana.entryPoints=websecure"
|
||||
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
- "traefik.http.routers.grafana.middlewares=oauth@file"
|
||||
networks:
|
||||
- gateway
|
||||
- backend
|
||||
environment:
|
||||
- "GF_SECURITY_ADMIN_USER={{ grafana.admin.user }}"
|
||||
- "GF_SECURITY_ADMIN_PASSWORD={{ grafana.admin.password }}"
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./grafana-ds.yml:/etc/grafana/provisioning/datasources/datasource.yml:ro,Z
|
||||
|
||||
node-exporter:
|
||||
image: quay.io/prometheus/node-exporter:latest
|
||||
container_name: host-nc-chaoswg-org-node-exporter
|
||||
privileged: true
|
||||
labels:
|
||||
- "prometheus-scrape.enabled=true"
|
||||
- "prometheus-scrape.port=9100"
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/rootfs:ro
|
||||
- /:/host:ro,rslave
|
||||
- /run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro
|
||||
command:
|
||||
- '--path.rootfs=/host'
|
||||
- '--path.procfs=/host/proc'
|
||||
- '--path.sysfs=/host/sys'
|
||||
- '--collector.filesystem.ignored-mount-points'
|
||||
- "^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)"
|
||||
- '--collector.systemd'
|
||||
networks:
|
||||
- backend
|
||||
restart: unless-stopped
|
||||
|
||||
cadvisor:
|
||||
image: gcr.io/cadvisor/cadvisor:latest
|
||||
privileged: true
|
||||
labels:
|
||||
- "prometheus-scrape.enabled=true"
|
||||
- "prometheus-scrape.port=8080"
|
||||
command:
|
||||
- "-docker_only=true"
|
||||
- "-housekeeping_interval=10s"
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
- /var/run:/var/run:rw
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker/:/var/lib/docker:ro
|
||||
networks:
|
||||
- backend
|
||||
restart: unless-stopped
|
||||
|
||||
|
||||
volumes:
|
||||
prom_data:
|
||||
grafana_data:
|
||||
label_discovery:
|
||||
alertmanager_data:
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
pantalaimon:
|
||||
external: true
|
||||
backend:
|
||||
internal: true
|
||||
alertmanager:
|
||||
internal: true
|
9
coreos-config/plays/services/prometheus/grafana-ds.yml
Normal file
9
coreos-config/plays/services/prometheus/grafana-ds.yml
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
access: proxy
|
||||
editable: true
|
54
coreos-config/plays/services/prometheus/prometheus.yml
Normal file
54
coreos-config/plays/services/prometheus/prometheus.yml
Normal file
@ -0,0 +1,54 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
evaluation_interval: 15s
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
static_configs:
|
||||
- targets: [ 'alertmanager:9093' ]
|
||||
- static_configs:
|
||||
- targets: []
|
||||
scheme: http
|
||||
timeout: 10s
|
||||
api_version: v1
|
||||
rule_files:
|
||||
- "/rules/*.yaml"
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
honor_timestamps: true
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /metrics
|
||||
scheme: http
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
- job_name: 'service_discovery'
|
||||
metric_relabel_configs:
|
||||
- source_labels:
|
||||
- "container_name"
|
||||
target_label: "instance"
|
||||
action: replace
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- /label_discovery/docker-targets.json
|
||||
- job_name: minio-job
|
||||
bearer_token: "{{ prometheus.scrape.s3.bearer_token }}"
|
||||
metrics_path: /minio/v2/metrics/cluster
|
||||
scheme: https
|
||||
static_configs:
|
||||
- targets: [s3.tobiasmanske.de]
|
||||
- job_name: drone-job
|
||||
bearer_token: "{{ prometheus.scrape.drone.bearer_token }}"
|
||||
scheme: https
|
||||
static_configs:
|
||||
- targets: [drone.tobiasmanske.de]
|
||||
- job_name: 'uptime-kuma-job'
|
||||
scrape_interval: 30s
|
||||
scheme: https
|
||||
static_configs:
|
||||
- targets: [status.tobiasmanske.de]
|
||||
basic_auth:
|
||||
username: "{{ prometheus.scrape.kuma.user }}"
|
||||
password: "{{ prometheus.scrape.kuma.password }}"
|
54
coreos-config/plays/services/prometheus/rules/cadvisor.yaml
Normal file
54
coreos-config/plays/services/prometheus/rules/cadvisor.yaml
Normal file
@ -0,0 +1,54 @@
|
||||
# {% raw %}
|
||||
|
||||
groups:
|
||||
- name: GoogleCadvisor
|
||||
rules:
|
||||
# - alert: ContainerKilled
|
||||
# expr: 'time() - container_last_seen > 60'
|
||||
# for: 0m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# summary: Container killed (instance {{ $labels.instance }})
|
||||
# description: "A container has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: ContainerAbsent
|
||||
# expr: 'absent(container_last_seen)'
|
||||
# for: 5m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# summary: Container absent (instance {{ $labels.instance }})
|
||||
# description: "A container is absent for 5 min\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: ContainerCpuUsage
|
||||
expr: '(sum(rate(container_cpu_usage_seconds_total{name!=""}[3m])) BY (instance, name) * 100) > 80'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Container CPU usage (instance {{ $labels.instance }})
|
||||
description: "Container CPU usage is above 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: ContainerMemoryUsage
|
||||
expr: '(sum(container_memory_working_set_bytes{name!=""}) BY (instance, name) / sum(container_spec_memory_limit_bytes > 0) BY (instance, name) * 100) > 80'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Container Memory usage (instance {{ $labels.instance }})
|
||||
description: "Container Memory usage is above 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: ContainerVolumeUsage
|
||||
# expr: '(1 - (sum(container_fs_inodes_free{name!=""}) BY (instance) / sum(container_fs_inodes_total) BY (instance))) * 100 > 80'
|
||||
# for: 2m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# summary: Container Volume usage (instance {{ $labels.instance }})
|
||||
# description: "Container Volume usage is above 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: ContainerHighThrottleRate
|
||||
expr: 'rate(container_cpu_cfs_throttled_seconds_total[3m]) > 1'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Container high throttle rate (instance {{ $labels.instance }})
|
||||
description: "Container is being throttled\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# {% endraw %}
|
303
coreos-config/plays/services/prometheus/rules/node.yaml
Normal file
303
coreos-config/plays/services/prometheus/rules/node.yaml
Normal file
@ -0,0 +1,303 @@
|
||||
# {% raw %}
|
||||
|
||||
groups:
|
||||
- name: NodeExporter
|
||||
rules:
|
||||
- alert: HostOutOfMemory
|
||||
expr: 'node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of memory (instance {{ $labels.instance }})
|
||||
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostMemoryUnderMemoryPressure
|
||||
expr: 'rate(node_vmstat_pgmajfault[1m]) > 1000'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host memory under memory pressure (instance {{ $labels.instance }})
|
||||
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: HostMemoryIsUnderUtilized
|
||||
# expr: '100 - (rate(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20'
|
||||
# for: 1w
|
||||
# labels:
|
||||
# severity: info
|
||||
# annotations:
|
||||
# summary: Host Memory is under utilized (instance {{ $labels.instance }})
|
||||
# description: "Node memory is < 20% for 1 week. Consider reducing memory space.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualNetworkThroughputIn
|
||||
expr: 'sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual network throughput in (instance {{ $labels.instance }})
|
||||
description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualNetworkThroughputOut
|
||||
expr: 'sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual network throughput out (instance {{ $labels.instance }})
|
||||
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualDiskReadRate
|
||||
expr: 'sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk read rate (instance {{ $labels.instance }})
|
||||
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualDiskWriteRate
|
||||
expr: 'sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk write rate (instance {{ $labels.instance }})
|
||||
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostOutOfDiskSpace
|
||||
expr: '(node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of disk space (instance {{ $labels.instance }})
|
||||
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostDiskWillFillIn24Hours
|
||||
expr: '(node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
|
||||
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostOutOfInodes
|
||||
expr: 'node_filesystem_files_free / node_filesystem_files * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of inodes (instance {{ $labels.instance }})
|
||||
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostInodesWillFillIn24Hours
|
||||
expr: 'node_filesystem_files_free / node_filesystem_files * 100 < 10 and predict_linear(node_filesystem_files_free[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
|
||||
description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualDiskReadLatency
|
||||
expr: 'rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk read latency (instance {{ $labels.instance }})
|
||||
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualDiskWriteLatency
|
||||
expr: 'rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk write latency (instance {{ $labels.instance }})
|
||||
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostHighCpuLoad
|
||||
expr: '(100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100)) > 80'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host high CPU load (instance {{ $labels.instance }})
|
||||
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: HostCpuIsUnderUtilized
|
||||
# expr: '100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20'
|
||||
# for: 1w
|
||||
# labels:
|
||||
# severity: info
|
||||
# annotations:
|
||||
# summary: Host CPU is under utilized (instance {{ $labels.instance }})
|
||||
# description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostCpuStealNoisyNeighbor
|
||||
expr: 'avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
|
||||
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostCpuHighIowait
|
||||
expr: 'avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 15'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host CPU high iowait (instance {{ $labels.instance }})
|
||||
description: "CPU iowait > 15%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostUnusualDiskIo
|
||||
expr: 'rate(node_disk_io_time_seconds_total[1m]) > 0.5'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk IO (instance {{ $labels.instance }})
|
||||
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: HostContextSwitching
|
||||
# expr: '(rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 1000'
|
||||
# for: 0m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# summary: Host context switching (instance {{ $labels.instance }})
|
||||
# description: "Context switching is growing on node (> 1000 / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostSwapIsFillingUp
|
||||
expr: '(1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host swap is filling up (instance {{ $labels.instance }})
|
||||
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostSystemdServiceCrashed
|
||||
expr: 'node_systemd_unit_state{state="failed"} == 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host systemd service crashed (instance {{ $labels.instance }})
|
||||
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostPhysicalComponentTooHot
|
||||
expr: 'node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host physical component too hot (instance {{ $labels.instance }})
|
||||
description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostNodeOvertemperatureAlarm
|
||||
expr: 'node_hwmon_temp_crit_alarm_celsius == 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
|
||||
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostRaidArrayGotInactive
|
||||
expr: 'node_md_state{state="inactive"} > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host RAID array got inactive (instance {{ $labels.instance }})
|
||||
description: "RAID array {{ $labels.device }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostRaidDiskFailure
|
||||
expr: 'node_md_disks{state="failed"} > 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host RAID disk failure (instance {{ $labels.instance }})
|
||||
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostKernelVersionDeviations
|
||||
expr: 'count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1'
|
||||
for: 6h
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host kernel version deviations (instance {{ $labels.instance }})
|
||||
description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostOomKillDetected
|
||||
expr: 'increase(node_vmstat_oom_kill[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host OOM kill detected (instance {{ $labels.instance }})
|
||||
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostEdacCorrectableErrorsDetected
|
||||
expr: 'increase(node_edac_correctable_errors_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostEdacUncorrectableErrorsDetected
|
||||
expr: 'node_edac_uncorrectable_errors_total > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostNetworkReceiveErrors
|
||||
expr: 'rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Receive Errors (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostNetworkTransmitErrors
|
||||
expr: 'rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostNetworkInterfaceSaturated
|
||||
expr: '(rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000'
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
|
||||
description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostNetworkBondDegraded
|
||||
expr: '(node_bonding_active - node_bonding_slaves) != 0'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
|
||||
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostConntrackLimit
|
||||
expr: 'node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host conntrack limit (instance {{ $labels.instance }})
|
||||
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostClockSkew
|
||||
expr: '(node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host clock skew (instance {{ $labels.instance }})
|
||||
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostClockNotSynchronising
|
||||
expr: 'min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host clock not synchronising (instance {{ $labels.instance }})
|
||||
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: HostRequiresReboot
|
||||
expr: 'node_reboot_required > 0'
|
||||
for: 4h
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: Host requires reboot (instance {{ $labels.instance }})
|
||||
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
# {% endraw %}
|
231
coreos-config/plays/services/prometheus/rules/prometheus.yaml
Normal file
231
coreos-config/plays/services/prometheus/rules/prometheus.yaml
Normal file
@ -0,0 +1,231 @@
|
||||
# {% raw %}
|
||||
|
||||
groups:
|
||||
- name: EmbeddedExporter
|
||||
rules:
|
||||
- alert: PrometheusJobMissing
|
||||
expr: 'absent(up{job="prometheus"})'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus job missing (instance {{ $labels.instance }})
|
||||
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTargetMissing
|
||||
expr: 'up == 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus target missing (instance {{ $labels.instance }})
|
||||
description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusAllTargetsMissing
|
||||
expr: 'sum by (job) (up) == 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus all targets missing (instance {{ $labels.instance }})
|
||||
description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTargetMissingWithWarmupTime
|
||||
expr: 'sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus target missing with warmup time (instance {{ $labels.instance }})
|
||||
description: "Allow a job time to start up (10 minutes) before alerting that it's down.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusConfigurationReloadFailure
|
||||
expr: 'prometheus_config_last_reload_successful != 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
|
||||
description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTooManyRestarts
|
||||
expr: 'changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus too many restarts (instance {{ $labels.instance }})
|
||||
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: PrometheusAlertmanagerJobMissing
|
||||
# expr: 'absent(up{job="alertmanager"})'
|
||||
# for: 0m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
|
||||
# description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusAlertmanagerConfigurationReloadFailure
|
||||
expr: 'alertmanager_config_last_reload_successful != 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
|
||||
description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusAlertmanagerConfigNotSynced
|
||||
expr: 'count(count_values("config_hash", alertmanager_config_hash)) > 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus AlertManager config not synced (instance {{ $labels.instance }})
|
||||
description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusAlertmanagerE2eDeadManSwitch
|
||||
expr: 'vector(1)'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus AlertManager E2E dead man switch (instance {{ $labels.instance }})
|
||||
description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusNotConnectedToAlertmanager
|
||||
expr: 'prometheus_notifications_alertmanagers_discovered < 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
|
||||
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusRuleEvaluationFailures
|
||||
expr: 'increase(prometheus_rule_evaluation_failures_total[3m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTemplateTextExpansionFailures
|
||||
expr: 'increase(prometheus_template_text_expansion_failures_total[3m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusRuleEvaluationSlow
|
||||
expr: 'prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
|
||||
description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusNotificationsBacklog
|
||||
expr: 'min_over_time(prometheus_notifications_queue_length[10m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
|
||||
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusAlertmanagerNotificationFailing
|
||||
expr: 'rate(alertmanager_notifications_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
|
||||
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
# - alert: PrometheusTargetEmpty
|
||||
# expr: 'prometheus_sd_discovered_targets == 0'
|
||||
# for: 0m
|
||||
# labels:
|
||||
# severity: critical
|
||||
# annotations:
|
||||
# summary: Prometheus target empty (instance {{ $labels.instance }})
|
||||
# description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTargetScrapingSlow
|
||||
expr: 'prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus target scraping slow (instance {{ $labels.instance }})
|
||||
description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time. Your Prometheus server is under-provisioned.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusLargeScrape
|
||||
expr: 'increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus large scrape (instance {{ $labels.instance }})
|
||||
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTargetScrapeDuplicate
|
||||
expr: 'increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
|
||||
description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbCheckpointCreationFailures
|
||||
expr: 'increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbCheckpointDeletionFailures
|
||||
expr: 'increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbCompactionsFailed
|
||||
expr: 'increase(prometheus_tsdb_compactions_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbHeadTruncationsFailed
|
||||
expr: 'increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbReloadFailures
|
||||
expr: 'increase(prometheus_tsdb_reloads_failures_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbWalCorruptions
|
||||
expr: 'increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTsdbWalTruncationsFailed
|
||||
expr: 'increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
- alert: PrometheusTimeserieCardinality
|
||||
expr: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus timeserie cardinality (instance {{ $labels.instance }})
|
||||
description: "The \"{{ $labels.name }}\" timeserie cardinality is getting very high: {{ $value }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
# {% endraw %}
|
1
coreos-config/plays/services/radicale/.env
Normal file
1
coreos-config/plays/services/radicale/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=radicale
|
122
coreos-config/plays/services/radicale/config
Normal file
122
coreos-config/plays/services/radicale/config
Normal file
@ -0,0 +1,122 @@
|
||||
# -*- mode: conf -*-
|
||||
# vim:ft=cfg
|
||||
|
||||
# Config file for Radicale - A simple calendar server
|
||||
#
|
||||
# Place it into /etc/radicale/config (global)
|
||||
# or ~/.config/radicale/config (user)
|
||||
#
|
||||
# The current values are the default ones
|
||||
|
||||
|
||||
[server]
|
||||
|
||||
# CalDAV server hostnames separated by a comma
|
||||
# IPv4 syntax: address:port
|
||||
# IPv6 syntax: [address]:port
|
||||
# For example: 0.0.0.0:9999, [::]:9999
|
||||
#hosts = localhost:5232
|
||||
hosts = 0.0.0.0:5232
|
||||
|
||||
# Max parallel connections
|
||||
#max_connections = 8
|
||||
|
||||
# Max size of request body (bytes)
|
||||
#max_content_length = 100000000
|
||||
|
||||
# Socket timeout (seconds)
|
||||
#timeout = 30
|
||||
|
||||
# SSL flag, enable HTTPS protocol
|
||||
#ssl = False
|
||||
|
||||
# SSL certificate path
|
||||
#certificate = /etc/ssl/radicale.cert.pem
|
||||
|
||||
# SSL private key
|
||||
#key = /etc/ssl/radicale.key.pem
|
||||
|
||||
# CA certificate for validating clients. This can be used to secure
|
||||
# TCP traffic between Radicale and a reverse proxy
|
||||
#certificate_authority =
|
||||
|
||||
|
||||
[encoding]
|
||||
|
||||
# Encoding for responding requests
|
||||
#request = utf-8
|
||||
|
||||
# Encoding for storing local collections
|
||||
#stock = utf-8
|
||||
|
||||
|
||||
[auth]
|
||||
|
||||
# Authentication method
|
||||
# Value: none | htpasswd | remote_user | http_x_remote_user
|
||||
type = htpasswd
|
||||
|
||||
# Htpasswd filename
|
||||
htpasswd_filename = /config/users
|
||||
|
||||
# Htpasswd encryption method
|
||||
# Value: plain | bcrypt | md5
|
||||
# bcrypt requires the installation of radicale[bcrypt].
|
||||
htpasswd_encryption = bcrypt
|
||||
|
||||
# Incorrect authentication delay (seconds)
|
||||
#delay = 1
|
||||
|
||||
# Message displayed in the client when a password is needed
|
||||
realm = Radicale - Password Required
|
||||
|
||||
|
||||
[rights]
|
||||
|
||||
# Rights backend
|
||||
# Value: none | authenticated | owner_only | owner_write | from_file
|
||||
type = owner_only
|
||||
|
||||
# File for rights management from_file
|
||||
#file = /etc/radicale/rights
|
||||
|
||||
|
||||
[storage]
|
||||
|
||||
# Storage backend
|
||||
# Value: multifilesystem | multifilesystem_nolock
|
||||
#type = multifilesystem
|
||||
|
||||
# Folder for storing local collections, created if not present
|
||||
#filesystem_folder = /var/lib/radicale/collections
|
||||
filesystem_folder = /data/collections
|
||||
|
||||
# Delete sync token that are older (seconds)
|
||||
#max_sync_token_age = 2592000
|
||||
|
||||
# Command that is run after changes to storage
|
||||
# Example: ([ -d .git ] || git init) && git add -A && (git diff --cached --quiet || git commit -m "Changes by "%(user)s)
|
||||
hook = ([ -d .git ] || git init) && git add -A && (git diff --cached --quiet || git commit -m "Changes by "%(user)s)
|
||||
|
||||
|
||||
[web]
|
||||
|
||||
# Web interface backend
|
||||
# Value: none | internal
|
||||
#type = internal
|
||||
|
||||
|
||||
[logging]
|
||||
|
||||
# Threshold for the logger
|
||||
# Value: debug | info | warning | error | critical
|
||||
#level = warning
|
||||
|
||||
# Don't include passwords in logs
|
||||
#mask_passwords = True
|
||||
|
||||
|
||||
[headers]
|
||||
|
||||
# Additional HTTP headers
|
||||
#Access-Control-Allow-Origin = *
|
42
coreos-config/plays/services/radicale/docker-compose.yaml
Normal file
42
coreos-config/plays/services/radicale/docker-compose.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
radicale:
|
||||
image: registry.tobiasmanske.de/radicale:latest
|
||||
init: true
|
||||
read_only: true
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETUID
|
||||
- SETGID
|
||||
- KILL
|
||||
healthcheck:
|
||||
test: curl -f http://127.0.0.1:5232 || exit 1
|
||||
interval: 30s
|
||||
retries: 3
|
||||
volumes:
|
||||
- ./config:/config/config:ro,Z
|
||||
- ./users:/config/users:ro,Z
|
||||
- data:/data
|
||||
environment:
|
||||
- TAKE_FILE_OWNERSHIP=false
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.radicale.rule=Host(`calendar.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.radicale.entryPoints=websecure"
|
||||
- "traefik.http.services.radicale.loadbalancer.server.port=5232"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
data:
|
||||
...
|
9
coreos-config/plays/services/radicale/users
Normal file
9
coreos-config/plays/services/radicale/users
Normal file
@ -0,0 +1,9 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
62313133646234613734343031616261396636356563363934653635373435613237623833643733
|
||||
6233383934636436323037393533326335366434623764320a653531306439306337363839356535
|
||||
63646637396437333335343666653463616437316338313933333236373537623036376266333564
|
||||
3334323432656261340a393336323737653333306136313337323064653033656533356262636461
|
||||
39663138623639373965353862363836626266633139656132636233353334613939303764306539
|
||||
36393534663466653863383037393534666138316666326264353165643136333635363761316135
|
||||
38383062343062653963666639343137633466623232386264636437386136366338353538306139
|
||||
39623065616461373237
|
1
coreos-config/plays/services/registry/.env
Normal file
1
coreos-config/plays/services/registry/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=registry
|
30
coreos-config/plays/services/registry/config.yaml
Normal file
30
coreos-config/plays/services/registry/config.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
version: 0.1
|
||||
log:
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
delete:
|
||||
enabled: true
|
||||
|
||||
auth:
|
||||
htpasswd:
|
||||
realm: Registry Realm
|
||||
path: /auth/htpasswd
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
Access-Control-Expose-Headers: ['Docker-Content-Digest']
|
||||
Access-Control-Allow-Methods: ['HEAD', 'GET', 'OPTIONS', 'DELETE']
|
||||
Access-Control-Allow-Origin: ['https://registry-ui.tobiasmanske.de']
|
||||
Access-Control-Allow-Credentials: [true]
|
||||
Access-Control-Allow-Headers: ['Authorization', 'Accept']
|
||||
X-Content-Type-Options: [nosniff]
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
48
coreos-config/plays/services/registry/docker-compose.yaml
Normal file
48
coreos-config/plays/services/registry/docker-compose.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
services:
|
||||
registry:
|
||||
container_name: registry
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.registry.rule=Host(`registry.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.registry.entryPoints=websecure"
|
||||
- "traefik.http.services.registry.loadbalancer.server.port=5000"
|
||||
image: 'registry:2'
|
||||
networks:
|
||||
- gateway
|
||||
- backend
|
||||
volumes:
|
||||
- registry_data:/var/lib/registry
|
||||
- ./htpasswd:/auth/htpasswd:ro,z
|
||||
- ./config.yaml:/etc/docker/registry/config.yml:ro,z
|
||||
|
||||
frontend:
|
||||
image: joxit/docker-registry-ui:latest
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- registry
|
||||
environment:
|
||||
- DELETE_IMAGES=true
|
||||
- REGISTRY_TITLE=My Private Docker Registry
|
||||
- NGINX_PROXY_PASS_URL=http://registry:5000
|
||||
- SINGLE_REGISTRY=true
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.registryui.rule=Host(`registry-ui.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.registryui.entryPoints=websecure"
|
||||
- "traefik.http.services.registryui.loadbalancer.server.port=80"
|
||||
networks:
|
||||
- gateway
|
||||
- backend
|
||||
|
||||
|
||||
volumes:
|
||||
registry_data:
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
backend:
|
||||
internal: true
|
||||
...
|
3
coreos-config/plays/services/registry/htpasswd
Normal file
3
coreos-config/plays/services/registry/htpasswd
Normal file
@ -0,0 +1,3 @@
|
||||
{% for line in registry.HTPASSWD %}
|
||||
{{ line }}
|
||||
{% endfor %}
|
1
coreos-config/plays/services/repo_proxy/.env
Normal file
1
coreos-config/plays/services/repo_proxy/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=repo_proxy
|
21
coreos-config/plays/services/repo_proxy/Caddyfile
Normal file
21
coreos-config/plays/services/repo_proxy/Caddyfile
Normal file
@ -0,0 +1,21 @@
|
||||
{
|
||||
auto_https off
|
||||
}
|
||||
http://repo.tobiasmanske.de {
|
||||
@uncomressed {
|
||||
path *.db
|
||||
path *.files
|
||||
path *.db.sig
|
||||
path *.files.sig
|
||||
}
|
||||
|
||||
uri @uncomressed replace db db.tar.xz
|
||||
uri @uncomressed replace files files.tar.xz
|
||||
|
||||
uri /os/* replace /os/ /repo/
|
||||
reverse_proxy /repo/* https://s3.tobiasmanske.de {
|
||||
header_up Host s3.tobiasmanske.de
|
||||
}
|
||||
root * /var/www
|
||||
file_server
|
||||
}
|
22
coreos-config/plays/services/repo_proxy/docker-compose.yaml
Normal file
22
coreos-config/plays/services/repo_proxy/docker-compose.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
redirect:
|
||||
image: caddy:2
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro,z
|
||||
- ./www:/var/www:ro,Z
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.repoproxy.rule=Host(`repo.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.repoproxy.entryPoints=websecure"
|
||||
- "traefik.http.services.repoproxy.loadbalancer.server.port=80"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
...
|
1
coreos-config/plays/services/repo_proxy/www/index.html
Normal file
1
coreos-config/plays/services/repo_proxy/www/index.html
Normal file
@ -0,0 +1 @@
|
||||
Hello World
|
1
coreos-config/plays/services/search/.env
Normal file
1
coreos-config/plays/services/search/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=searxng
|
45
coreos-config/plays/services/search/docker-compose.yaml
Normal file
45
coreos-config/plays/services/search/docker-compose.yaml
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
searxng:
|
||||
image: searxng/searxng:latest # >.<
|
||||
container_name: searxng
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
- default
|
||||
- backend
|
||||
volumes:
|
||||
- ./settings.yml:/etc/searxng/settings.yml:ro,z
|
||||
- ./uwsgi.ini:/etc/searxng/uwsgi.ini:ro,z
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.searxng.rule=Host(`search.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.searxng.entryPoints=websecure"
|
||||
- "traefik.http.services.searxng.loadbalancer.server.port=8080"
|
||||
- "traefik.http.middlewares.compression.compress=true"
|
||||
- "traefik.http.routers.searxng.middlewares=compression"
|
||||
|
||||
redis:
|
||||
container_name: redis
|
||||
image: "redis:alpine"
|
||||
restart: always
|
||||
command: redis-server --save "" --appendonly "no"
|
||||
networks:
|
||||
- backend
|
||||
tmpfs:
|
||||
- /var/lib/redis
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
- SETGID
|
||||
- SETUID
|
||||
- DAC_OVERRIDE
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
backend:
|
||||
internal: true
|
||||
...
|
1320
coreos-config/plays/services/search/settings.yml
Normal file
1320
coreos-config/plays/services/search/settings.yml
Normal file
File diff suppressed because it is too large
Load Diff
49
coreos-config/plays/services/search/uwsgi.ini
Normal file
49
coreos-config/plays/services/search/uwsgi.ini
Normal file
@ -0,0 +1,49 @@
|
||||
[uwsgi]
|
||||
# Who will run the code
|
||||
uid = searxng
|
||||
gid = searxng
|
||||
|
||||
# Number of workers (usually CPU count)
|
||||
workers = 4
|
||||
threads = 4
|
||||
|
||||
# The right granted on the created socket
|
||||
chmod-socket = 666
|
||||
|
||||
# Plugin to use and interpretor config
|
||||
single-interpreter = true
|
||||
master = true
|
||||
plugin = python3
|
||||
lazy-apps = true
|
||||
enable-threads = true
|
||||
|
||||
# Module to import
|
||||
module = searx.webapp
|
||||
|
||||
# Virtualenv and python path
|
||||
pythonpath = /usr/local/searxng/
|
||||
chdir = /usr/local/searxng/searx/
|
||||
|
||||
# automatically set processes name to something meaningful
|
||||
auto-procname = true
|
||||
|
||||
# Disable request logging for privacy
|
||||
disable-logging = true
|
||||
log-5xx = true
|
||||
|
||||
# Set the max size of a request (request-body excluded)
|
||||
buffer-size = 8192
|
||||
|
||||
# No keep alive
|
||||
# See https://github.com/searx/searx-docker/issues/24
|
||||
add-header = Connection: close
|
||||
|
||||
# uwsgi serves the static files
|
||||
# expires set to one year since there are hashes
|
||||
static-map = /static=/usr/local/searxng/searx/static
|
||||
static-expires = /* 31557600
|
||||
static-gzip-all = True
|
||||
offload-threads = %k
|
||||
|
||||
# Cache
|
||||
cache2 = name=searxngcache,items=2000,blocks=2000,blocksize=4096,bitmap=1
|
1
coreos-config/plays/services/syncthing/.env
Normal file
1
coreos-config/plays/services/syncthing/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=syncthing
|
45
coreos-config/plays/services/syncthing/config/cert.pem
Normal file
45
coreos-config/plays/services/syncthing/config/cert.pem
Normal file
@ -0,0 +1,45 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
33356661663637323832343435656464323766303832363037333663393064336439663165313637
|
||||
6461393832613137623663353337333232616135663938340a666566366335326565613738613930
|
||||
38373066313664616665633337616138303530343637636162346535633339626236303237393561
|
||||
3731366337323033310a386562326238663133653666396434663465313936313738313363613762
|
||||
65643662396237316663306163366636646338663365666632373830363930623534376338396336
|
||||
30313135623038386564373131613835316166326662313066303061393536613136353065363163
|
||||
33343438613562336235626437666661346363363933616132353764336633343739616230333438
|
||||
30333736383434346465666437346137343437366164323964353437643733333066626531383439
|
||||
35353233376164303336626661366662383535613430363439633131643838333535353833396131
|
||||
38383530666232643461623565363865323438666333626338313139656563616465626262636639
|
||||
33653965663166323630373938643638373362313832363331356339353265626262303761316664
|
||||
30303264386630363530643363346162613539623839653934646330373438396530666436376462
|
||||
34653365353961663065323730383935396666336163626337633561613139343139316665333839
|
||||
65656665353436313538636137376633633032383438366665373133396337306138393931653362
|
||||
66386465336363643035343530376437323064643861363535623135353265393034653936323437
|
||||
63353537343538383930366334316237376364613730396534356565313135633331663265323030
|
||||
35646563303031366130383131316637623131396531663862333561396162663361666536306132
|
||||
32393564383330623739393730386261333038343362626438336462616638616132343035393265
|
||||
62336338643639333366613163393332336462613230346366616333396234366231636361623838
|
||||
39373565646131636333393663356130646537356432316261616664356531373063393430653539
|
||||
30346464656535323164613236303361653033613738333235346230343238623166663462353965
|
||||
66313632646431346565633339663864613333383432616536386533313863616232323235306165
|
||||
66376238306537383966373464333532313166393735313535393365616337383034396538343463
|
||||
38386235326233343132306164376432373361383939633161656232613033363538636435393061
|
||||
37343837396362653736303436356666666138353735653134336563373430643636356665373236
|
||||
64373035626461386132666339646466396563623266326637333435633165343034646566326430
|
||||
66356666626539373462653536313636366530313463313530363538623862626537366639636266
|
||||
32653137636331616536343934356662656262623762333930653238663563646239643637386165
|
||||
63613662363235656432313666656331346461343432316530633163303331366239333666633534
|
||||
64393865366139343135303062366333363332376565646437356164306232643130356261656534
|
||||
63303339633765373136323665376137376139363265633162653563356262353162643164306430
|
||||
30306133663565386237373131353232623936623237373739623837643564386131373132316331
|
||||
31663062363133636335303966356562333438376333356330373166366531393461303037363936
|
||||
63346630333130303566383264303436393462313431623237373063393033346438633966643062
|
||||
38396163356265363363396164623664343530383937653663663864626566353365646462666535
|
||||
36303936653933363964663131376236333965313431653937626332383834373833343462333036
|
||||
30343537366462376562343865633162383263313365633332396366336238613132313630653763
|
||||
64343035366561313339316463323134643664616565663331643036306639383163373831376265
|
||||
66646336316632323034653866653532323934636639383634316163336566383830376238376433
|
||||
37653137363939643461373538623032613731383731353639313534376466303930363765303637
|
||||
37626532313430653239663835323633326263656536353330636437376237376339663336666234
|
||||
39396138323836616432396265626236333134323462623138656534333966613338636132393665
|
||||
38343662646238363735666564643336383633393963633662333262663131616163643765353232
|
||||
63613764346437663666
|
1104
coreos-config/plays/services/syncthing/config/config.xml
Normal file
1104
coreos-config/plays/services/syncthing/config/config.xml
Normal file
File diff suppressed because it is too large
Load Diff
20
coreos-config/plays/services/syncthing/config/key.pem
Normal file
20
coreos-config/plays/services/syncthing/config/key.pem
Normal file
@ -0,0 +1,20 @@
|
||||
$ANSIBLE_VAULT;1.2;AES256;secrets
|
||||
30383836353130323131653139643365346335633063343362656530663538393435663335613132
|
||||
3533343531333434376632333630343364313061633066330a373263393966336430616530386564
|
||||
32313530333939333035393863623932666261316664666132366331626561306230306238323733
|
||||
3230396463303764300a643831633734393964363534343932303039323161373239363766643037
|
||||
35333362666332346133363063363466343338653963353333376162373331316433326436353738
|
||||
63323339346134626131316136663736313737653866393135356262363832613262316139613963
|
||||
32646532343639303732643432306239656135663363643263393832383936373837626665616662
|
||||
37373865303364373035363832633932323233396336363863336338613237623637323665386536
|
||||
37313164646266663135336432363963643630366434356230626663343534376330343461366664
|
||||
33623261313662663636346361386332343630396164616137303364643661643736653462323062
|
||||
32613335653732373365646566393930666561383465376665383531393037343638633763633563
|
||||
38613164373332306237373737623839386336613166333139656264336131326631343438373235
|
||||
31343739363366396464623662373539613433346438626335303765623738393930666430303139
|
||||
66386264623666393234623032386135323231653965393466656634353532616530306265306666
|
||||
62663039333231316130353061313233666639633563346635623464363135383863666532323432
|
||||
63376664343630633631393232636364633036636230663864323437636439623961613634613638
|
||||
38626161646166336138373931393464633030323338663763383135316665346137646139333530
|
||||
61386233373538636433363832363537313766363566623961346336353532303761313664383032
|
||||
646138346562616433343462303065306236
|
24
coreos-config/plays/services/syncthing/docker-compose.yaml
Normal file
24
coreos-config/plays/services/syncthing/docker-compose.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing:1
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 22000:22000/tcp
|
||||
- 22000:22000/udp
|
||||
volumes:
|
||||
- syncthing-data:/var/syncthing
|
||||
- ./config/cert.pem:/var/syncthing/config/cert.pem:ro,z
|
||||
- ./config/key.pem:/var/syncthing/config/key.pem:ro,z
|
||||
- ./config/config.xml:/var/syncthing/config/config.xml:ro,z
|
||||
hostname: "Netcup"
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/Berlin
|
||||
|
||||
volumes:
|
||||
syncthing-data:
|
||||
...
|
1
coreos-config/plays/services/thelounge/.env
Normal file
1
coreos-config/plays/services/thelounge/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=thelounge
|
24
coreos-config/plays/services/thelounge/docker-compose.yaml
Normal file
24
coreos-config/plays/services/thelounge/docker-compose.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
lounge:
|
||||
image: thelounge/thelounge:4
|
||||
volumes:
|
||||
- data:/var/opt/thelounge
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.thelounge.rule=Host(`lounge.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.thelounge.entryPoints=websecure"
|
||||
- "traefik.http.services.thelounge.loadbalancer.server.port=9000"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
data:
|
||||
...
|
1
coreos-config/plays/services/traefik/.env
Normal file
1
coreos-config/plays/services/traefik/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=traefik
|
86
coreos-config/plays/services/traefik/docker-compose.yaml
Normal file
86
coreos-config/plays/services/traefik/docker-compose.yaml
Normal file
@ -0,0 +1,86 @@
|
||||
{% set deploy_traefik_fa = with_fa|default(false) %}
|
||||
---
|
||||
version: '3.9'
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:v2.7
|
||||
container_name: traefik
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 4000
|
||||
hard: 15000
|
||||
restart: always
|
||||
ports:
|
||||
- "443:443"
|
||||
- "80:80"
|
||||
privileged: true
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:z"
|
||||
- "./traefik.yaml:/etc/traefik/traefik.yaml:Z,ro"
|
||||
- "./dynamic.yaml:/etc/traefik/dynamic.yaml:Z,ro"
|
||||
- "acme:/acme"
|
||||
labels:
|
||||
- "prometheus-scrape.enabled=true"
|
||||
- "prometheus-scrape.port=9091"
|
||||
networks:
|
||||
- gateway
|
||||
- default
|
||||
|
||||
{% if deploy_traefik_fa %}
|
||||
traefik-fa:
|
||||
image: quay.io/oauth2-proxy/oauth2-proxy:latest
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- gateway
|
||||
depends_on:
|
||||
- traefik
|
||||
labels:
|
||||
traefik.enable: 'true'
|
||||
traefik.http.routers.oauth.entrypoints: websecure
|
||||
traefik.http.routers.oauth.rule: Host(`traefik-fa.tobiasmanske.de`) || PathPrefix(`/oauth2`)
|
||||
traefik.http.services.oauth.loadbalancer.server.port: '4180'
|
||||
prometheus-scrape.enabled: "true"
|
||||
prometheus-scrape.port: "9091"
|
||||
environment:
|
||||
OAUTH2_PROXY_PROVIDER: 'keycloak-oidc'
|
||||
OAUTH2_PROXY_CLIENT_ID: '{{ traefik.oidc.client_id }}'
|
||||
OAUTH2_PROXY_CLIENT_SECRET: '{{ traefik.oidc.client_secret }}'
|
||||
OAUTH2_PROXY_OIDC_ISSUER_URL: '{{ traefik.oidc.issuer_url }}'
|
||||
OAUTH2_PROXY_COOKIE_DOMAIN: '.tobiasmanske.de'
|
||||
OAUTH2_PROXY_COOKIE_REFRESH: '1h'
|
||||
OAUTH2_PROXY_COOKIE_SECURE: 'true'
|
||||
OAUTH2_PROXY_COOKIE_SECRET: '{{ traefik.oidc.cookie_secret }}'
|
||||
OAUTH2_PROXY_EMAIL_DOMAINS: '*'
|
||||
OAUTH2_PROXY_FOOTER: '-'
|
||||
OAUTH2_PROXY_HTTP_ADDRESS: '0.0.0.0:4180'
|
||||
OAUTH2_PROXY_METRICS_ADDRESS: "0.0.0.0:9091"
|
||||
OAUTH2_PROXY_PASS_BASIC_AUTH: 'false'
|
||||
OAUTH2_PROXY_PASS_USER_HEADERS: 'true'
|
||||
OAUTH2_PROXY_REVERSE_PROXY: 'true'
|
||||
OAUTH2_PROXY_SET_AUTHORIZATION_HEADER: 'true'
|
||||
OAUTH2_PROXY_SET_XAUTHREQUEST: 'true'
|
||||
OAUTH2_PROXY_WHITELIST_DOMAIN: '.tobiasmanske.de'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami
|
||||
networks:
|
||||
- gateway
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.services.whoami.loadbalancer.server.port=80"
|
||||
- "traefik.http.routers.whoami.rule=Host(`test.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.whoami.entryPoints=websecure"
|
||||
- "traefik.http.routers.whoami.middlewares=oauth@file"
|
||||
|
||||
{% endif %}
|
||||
|
||||
volumes:
|
||||
acme:
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
name: gateway
|
||||
internal: false
|
||||
default:
|
||||
driver: bridge
|
||||
...
|
32
coreos-config/plays/services/traefik/dynamic.yaml
Normal file
32
coreos-config/plays/services/traefik/dynamic.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
http:
|
||||
middlewares:
|
||||
auth-headers:
|
||||
headers:
|
||||
sslRedirect: true
|
||||
stsSeconds: 315360000
|
||||
browserXssFilter: true
|
||||
contentTypeNosniff: true
|
||||
forceSTSHeader: true
|
||||
sslHost: tobiasmanske.de
|
||||
stsIncludeSubdomains: true
|
||||
stsPreload: true
|
||||
frameDeny: true
|
||||
oauth-auth:
|
||||
forwardAuth:
|
||||
address: https://traefik-fa.tobiasmanske.de/oauth2/auth
|
||||
trustForwardHeader: true
|
||||
oauth-errors:
|
||||
errors:
|
||||
status:
|
||||
- "401-403"
|
||||
service: oauth@docker
|
||||
query: "/oauth2/sign_in"
|
||||
oauth:
|
||||
chain:
|
||||
middlewares:
|
||||
- oauth-errors
|
||||
- oauth-auth
|
||||
deny-metrics:
|
||||
replacePathRegex:
|
||||
regex: "^/metrics$"
|
||||
replacement: "/"
|
38
coreos-config/plays/services/traefik/traefik.yaml
Normal file
38
coreos-config/plays/services/traefik/traefik.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
log:
|
||||
level: ERROR
|
||||
metrics:
|
||||
prometheus:
|
||||
addEntryPointsLabels: true
|
||||
addServicesLabels: true
|
||||
addRoutersLabels: true
|
||||
entryPoint: metrics
|
||||
providers:
|
||||
docker:
|
||||
network: gateway
|
||||
exposedbydefault: false
|
||||
file:
|
||||
filename: /etc/traefik/dynamic.yaml
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
http:
|
||||
redirections:
|
||||
entryPoint:
|
||||
to: websecure
|
||||
scheme: https
|
||||
permanent: true
|
||||
metrics:
|
||||
address: ":9091"
|
||||
websecure:
|
||||
address: ":443"
|
||||
http:
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
email: webmaster@tobiasmanske.de
|
||||
storage: /acme/acme.json
|
||||
# caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
tlsChallenge: true
|
1
coreos-config/plays/services/tubearchivist/.env
Normal file
1
coreos-config/plays/services/tubearchivist/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=tubearchivist
|
@ -0,0 +1,82 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
tubearchivist:
|
||||
restart: unless-stopped
|
||||
image: bbilly1/tubearchivist:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tubearchivist.middlewares=sso@file"
|
||||
- "traefik.http.routers.tubearchivist.rule=Host(`tubearchivist.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.tubearchivist.entryPoints=websecure"
|
||||
- "traefik.http.services.tubearchivist.loadbalancer.server.port=8000"
|
||||
volumes:
|
||||
- media:/youtube
|
||||
- cache:/cache
|
||||
environment:
|
||||
- ES_URL=http://archivist-es:9200
|
||||
- REDIS_HOST=archivist-redis
|
||||
- HOST_UID=1000
|
||||
- HOST_GID=1000
|
||||
- TA_HOST=tubearchivist.tobiasmanske.de
|
||||
- TA_USERNAME={{ tubearchivist.username }}
|
||||
- TA_PASSWORD={{ tubearchivist.password }}
|
||||
- ELASTIC_PASSWORD={{ tubearchivist.elastic_password }}
|
||||
- TZ=Europe/Berlin # set your time zone
|
||||
depends_on:
|
||||
- archivist-es
|
||||
- archivist-redis
|
||||
networks:
|
||||
- backend
|
||||
- gateway
|
||||
- default
|
||||
|
||||
archivist-redis:
|
||||
image: redislabs/rejson:latest # for arm64 use bbilly1/rejson
|
||||
container_name: archivist-redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- redis:/data
|
||||
depends_on:
|
||||
- archivist-es
|
||||
networks:
|
||||
- backend
|
||||
|
||||
archivist-es:
|
||||
image: bbilly1/tubearchivist-es:latest # only for amd64, or use official es 8.3.3
|
||||
container_name: archivist-es
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "xpack.security.enabled=true"
|
||||
- "ELASTIC_PASSWORD={{ tubearchivist.elastic_password }}" # matching Elasticsearch password
|
||||
- "discovery.type=single-node"
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
volumes:
|
||||
- es:/usr/share/elasticsearch/data # check for permission error when using bind mount, see readme
|
||||
networks:
|
||||
- backend
|
||||
|
||||
volumes:
|
||||
media:
|
||||
driver: local
|
||||
name: nobackup_ta_media
|
||||
driver_opts: {{ docker.cifs.media }}
|
||||
es:
|
||||
driver: local
|
||||
name: nobackup_ta_es
|
||||
driver_opts: {{ docker.cifs.es }}
|
||||
cache:
|
||||
redis:
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
backend:
|
||||
internal: true
|
||||
|
||||
...
|
1
coreos-config/plays/services/watchtower/.env
Normal file
1
coreos-config/plays/services/watchtower/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=watchtower
|
50
coreos-config/plays/services/watchtower/docker-compose.yaml
Normal file
50
coreos-config/plays/services/watchtower/docker-compose.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
services:
|
||||
update:
|
||||
image: containrrr/watchtower:latest
|
||||
container_name: Watchtower
|
||||
privileged: true
|
||||
environment:
|
||||
DOCKER_CONFIG: /config
|
||||
WATCHTOWER_SCHEDULE: 0 42 * * * *
|
||||
WATCHTOWER_CLEANUP: 1
|
||||
WATCHTOWER_ROLLING_RESTART: 1
|
||||
WATCHTOWER_NOTIFICATION_URL: "matrix://{{ watchtower.matrix.user }}:{{ watchtower.matrix.password }}@pantalaimon:8008/?disableTLS=yes{% if watchtower.matrix.options is defined %}&{{ watchtower.matrix.options | join('&') }}{% endif%}"
|
||||
WATCHTOWER_NOTIFICATION_REPORT: "true"
|
||||
WATCHTOWER_NOTIFICATION_TEMPLATE: |
|
||||
{% raw %}
|
||||
{{- if .Report -}}
|
||||
{{- with .Report -}}
|
||||
{{- if ( or .Updated .Failed .Skipped ) -}}
|
||||
{% endraw %}
|
||||
Updates on {{ inventory_hostname }}{% raw %}
|
||||
{{len .Scanned}} Scanned, {{len .Updated}} Updated, {{len .Failed}} Failed
|
||||
{{- range .Updated}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.CurrentImageID.ShortID}} updated to {{.LatestImageID.ShortID}}
|
||||
{{- end -}}
|
||||
{{- range .Skipped}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.State}}: {{.Error}}
|
||||
{{- end -}}
|
||||
{{- range .Failed}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.State}}: {{.Error}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{% endraw %}
|
||||
Updates on {{ inventory_hostname }}{% raw %}
|
||||
{{range .Entries -}}{{.Message}}{{"\n"}}{{- end -}}
|
||||
{{- end -}}{% endraw %}
|
||||
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /home/core/.docker:/config
|
||||
restart: always
|
||||
networks:
|
||||
- default
|
||||
- pantalaimon
|
||||
|
||||
networks:
|
||||
pantalaimon:
|
||||
external: true
|
||||
...
|
1
coreos-config/plays/services/wireguard/.env
Normal file
1
coreos-config/plays/services/wireguard/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=wireguard
|
32
coreos-config/plays/services/wireguard/docker-compose.yaml
Normal file
32
coreos-config/plays/services/wireguard/docker-compose.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
wireguard:
|
||||
image: registry.tobiasmanske.de/wireguard:latest
|
||||
container_name: wireguard
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
security_opt:
|
||||
- label:disable
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
volumes:
|
||||
- ./wg0.conf:/etc/wireguard/wg0.conf:ro,z
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
sysctls:
|
||||
- net.ipv4.conf.all.src_valid_mark=1
|
||||
- net.ipv6.conf.all.disable_ipv6=0
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- default
|
||||
- internal_services
|
||||
|
||||
networks:
|
||||
internal_services:
|
||||
name: internal_services
|
||||
internal: true
|
||||
...
|
24
coreos-config/plays/services/wireguard/wg0.conf
Normal file
24
coreos-config/plays/services/wireguard/wg0.conf
Normal file
@ -0,0 +1,24 @@
|
||||
[Interface]
|
||||
{% for addr in wireguard.cidr %}
|
||||
Address = {{ addr }}
|
||||
{% endfor %}
|
||||
MTU = 1420
|
||||
SaveConfig = true
|
||||
PostUp = iptables -A FORWARD -i wg0 -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE; ip6tables -A FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
PostDown = iptables -D FORWARD -i wg0 -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE; ip6tables -D FORWARD -i wg0 -j ACCEPT; ip6tables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
ListenPort = {{ wireguard.port }}
|
||||
PrivateKey = {{ wireguard.private_key }}
|
||||
|
||||
{% for peer in wireguard_peers %}
|
||||
# Peer: {{ peer.name }}
|
||||
[Peer]
|
||||
PublicKey = {{ peer.pubkey }}
|
||||
AllowedIPs = {{ peer.allowedips | join(", ") }}
|
||||
{% if peer.endpoint is defined %}
|
||||
Endpoint = {{ peer.endpoint }}
|
||||
{% endif %}
|
||||
{% if peer.keepalive is defined %}
|
||||
PersistentKeepalive = {{ peer.keepalive }}
|
||||
{% endif %}
|
||||
|
||||
{% endfor %}
|
1
coreos-config/plays/services/wkd/.env
Normal file
1
coreos-config/plays/services/wkd/.env
Normal file
@ -0,0 +1 @@
|
||||
COMPOSE_PROJECT_NAME=wkd
|
15
coreos-config/plays/services/wkd/Caddyfile
Normal file
15
coreos-config/plays/services/wkd/Caddyfile
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
auto_https off
|
||||
}
|
||||
|
||||
http://tobiasmanske.de {
|
||||
header {
|
||||
Access-Control-Allow-Origin *
|
||||
}
|
||||
|
||||
respond /.well-known/openpgpkey/policy "" 200
|
||||
|
||||
uri strip_prefix /.well-known/openpgpkey/hu
|
||||
root * /data/tobiasmanske.de/
|
||||
file_server
|
||||
}
|
Binary file not shown.
Binary file not shown.
23
coreos-config/plays/services/wkd/docker-compose.yaml
Normal file
23
coreos-config/plays/services/wkd/docker-compose.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
wkd:
|
||||
image: caddy:2
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.wkd.rule=(Host(`tobiasmanske.de`) && PathPrefix(`/{path:.well-known/openpgpkey}/`))"
|
||||
- "traefik.http.routers.wkd.entryPoints=websecure"
|
||||
- "traefik.http.routers.wkd.priority=100"
|
||||
- "traefik.http.services.wkd.loadbalancer.server.port=80"
|
||||
restart: always
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro,z
|
||||
- ./data:/data:ro,Z
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
...
|
2
coreos-config/plays/services/youtrack/.env
Normal file
2
coreos-config/plays/services/youtrack/.env
Normal file
@ -0,0 +1,2 @@
|
||||
COMPOSE_PROJECT_NAME=youtrack
|
||||
VERSION=2022.2.59587
|
30
coreos-config/plays/services/youtrack/docker-compose.yaml
Normal file
30
coreos-config/plays/services/youtrack/docker-compose.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
youtrack:
|
||||
image: jetbrains/youtrack:${VERSION}
|
||||
volumes:
|
||||
- data:/opt/youtrack/data
|
||||
- conf:/opt/youtrack/conf
|
||||
- logs:/opt/youtrack/logs
|
||||
- backups:/opt/youtrack/backups
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.youtrack.rule=Host(`youtrack.tobiasmanske.de`)"
|
||||
- "traefik.http.routers.youtrack.entryPoints=websecure"
|
||||
- "traefik.http.services.youtrack.loadbalancer.server.port=8080"
|
||||
restart: always
|
||||
networks:
|
||||
- gateway
|
||||
|
||||
networks:
|
||||
gateway:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
data:
|
||||
conf:
|
||||
logs:
|
||||
backups:
|
||||
...
|
@ -21,13 +21,14 @@
|
||||
hosts: host.nc.chaoswg.org
|
||||
vars:
|
||||
state: present
|
||||
base_domain: "tobiasmanske.de"
|
||||
roles:
|
||||
- {role: compose_project, service: traefik, with_fa: true}
|
||||
- {role: compose_project, service: keycloak}
|
||||
- {role: compose_project, service: prometheus}
|
||||
- {role: compose_project, service: minio}
|
||||
- {role: compose_project, service: repo_proxy}
|
||||
- {role: compose_project, service: registry}
|
||||
- {role: compose_project, service: registry, htpasswd: registry.HTPASSWD }
|
||||
- {role: compose_project, service: pantalaimon}
|
||||
- {role: compose_project, service: gitea}
|
||||
- {role: compose_project, service: gitea-runner}
|
||||
@ -45,3 +46,11 @@
|
||||
- {role: compose_project, service: diun}
|
||||
- {role: compose_project, service: watchtower}
|
||||
- {role: compose_project, service: wkd}
|
||||
|
||||
- name: Setup KITCTF registry
|
||||
hosts: host.nc.chaoswg.org
|
||||
vars:
|
||||
state: present
|
||||
base_domain: "ctf.kitctf.de"
|
||||
roles:
|
||||
- {role: compose_project, service: registry, htpasswd: registry.kitctf.HTPASSWD }
|
||||
|
Reference in New Issue
Block a user