infrastructure/ansible/plays/common.yaml

331 lines
9.6 KiB
YAML

- name: Setup SSH Config
hosts: all
become: true
become_user: root
tags:
- setup_ssh
- setup
tasks:
- name: Authorized_keys dir present
ansible.builtin.file:
state: directory
path: /etc/ssh/authorized_keys
owner: root
group: root
mode: '0755'
- name: Obtain Machine Pubkey
delegate_to: localhost
become: false
changed_when: false
register: pubkey
ansible.builtin.command:
cmd: "ssh-keygen -y -f {{ ansible_ssh_private_key_file }}"
- name: Deploy SSH-Keys
vars:
machine_key: "{{ pubkey.stdout }}"
ansible.builtin.template:
src: "authorized_keys.j2"
dest: "/etc/ssh/authorized_keys/{{ ansible_user }}"
owner: root
group: root
mode: '0644'
- name: Ensure authorized_keys ownership
ansible.builtin.file:
state: directory
path: /etc/ssh/authorized_keys
owner: root
group: root
mode: "u=rwX,g=rX,o=rX"
recurse: true
- name: Configure sshd
ansible.builtin.template:
src: 'sshd_config.j2'
dest: '/etc/ssh/sshd_config.d/99-override.conf'
owner: root
group: root
mode: '0600'
- name: Remove Keys Config
ansible.builtin.file:
state: absent
path: /etc/ssh/ssh_config.d/40-ssh-key-dir.conf
- name: Setup Networks
hosts: network_config
become: true
become_user: root
tasks:
- name: Setup wired interface
ansible.builtin.template:
src: "connection.nmconnection.j2"
dest: "/etc/NetworkManager/system-connections/Wired Connection 1.nmconnection"
owner: root
group: root
mode: '0600'
notify: Restart Network
- name: Setup DNS
ansible.builtin.lineinfile:
path: /etc/systemd/resolved.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
notify: Restart systemd-resolved
loop:
- regexp: "^DNS="
line: "DNS=1.1.1.1"
- regexp: "^FallbackDNS="
line: "FallbackDNS=8.8.8.8"
handlers:
- name: Restart Network
ansible.builtin.systemd:
name: NetworkManager.service
state: restarted
- name: Restart systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved.service
state: restarted
- name: Backup
hosts: backup
become: true
become_user: root
vars:
repo_path: "/var/home/backup/storagebox/{{ inventory_hostname }}"
password: "{{ backup.password }}"
pushkey: "{{ backup.pushkey }}"
tasks:
- name: Install backup script
vars:
repo: "ssh://{{ common.backup.user }}@{{ common.backup.url }}{{ repo_path }}"
ansible.builtin.template:
src: backup.sh.j2
dest: /root/backup.sh
mode: '0700'
owner: root
- name: Generate SSH directory
ansible.builtin.file:
path: /root/.ssh
owner: root
state: directory
mode: '0700'
- name: Generate SSH Key
community.crypto.openssh_keypair:
path: /root/.ssh/borgbackup
type: ed25519
owner: root
mode: '0600'
register: keypair
- name: Register SSH Key with backup server
become: true
become_user: root
delegate_to: backup.unruhig.eu
ansible.builtin.lineinfile:
path: /etc/ssh/authorized_keys/backup
state: present
search_string: "{{ keypair.public_key }}"
line: 'command="borg serve --restrict-to-repository {{ repo_path }}",restrict {{ keypair.public_key }}'
- name: Add Known Hosts entries
ansible.builtin.known_hosts:
path: "/root/.ssh/known_hosts"
name: "backup.unruhig.eu"
key: "{{ item }}"
loop: "{{ hostvars['backup.unruhig.eu']['known_hosts'] }}"
- name: Restore from Backup
hosts: backup
become: true
become_user: root
gather_facts: true
vars:
repo_path: "/var/home/backup/storagebox/{{ inventory_hostname }}"
password: "{{ backup.password }}"
pushkey: "{{ backup.pushkey }}"
tasks:
- name: Check if restore is needed
ansible.builtin.stat:
path: "/etc/setup_complete"
register: setup_complete
- block:
- name: Install restore script
vars:
repo: "ssh://{{ common.backup.user }}@{{ common.backup.url }}{{ repo_path }}"
ansible.builtin.template:
src: restore.sh.j2
dest: /root/restore.sh
mode: '0700'
owner: root
- name: Stop and mask backup service
become: true
become_user: root
ansible.builtin.systemd:
name: "borgbackup.service"
state: stopped
masked: true
- name: Restore from Borg
become: true
become_user: root
ansible.builtin.command:
chdir: /
cmd: bash /root/restore.sh
- name: Remove script from host
ansible.builtin.file:
path: /root/restore.sh
state: absent
- name: Mark setup as complete
ansible.builtin.file:
path: "/etc/setup_complete"
state: touch
owner: root
group: root
mode: 0600
- name: Unmask backup service
become: true
become_user: root
ansible.builtin.systemd:
name: "borgbackup.service"
state: stopped
masked: false
when: not setup_complete.stat.exists
- name: Setup Registry credentials
hosts: all
tasks:
- ansible.builtin.file:
path: /home/core/.docker
owner: core
state: directory
mode: '0700'
- ansible.builtin.template:
src: docker-config.json.j2
dest: /home/core/.docker/config.json
mode: '0600'
owner: core
- name: Setup Docker Config
hosts: all
become: true
become_user: root
tasks:
- ansible.builtin.file:
path: /etc/docker
owner: root
state: directory
mode: '0700'
- name: Template Config
ansible.builtin.template:
src: "docker-daemon.json.j2"
dest: /etc/docker/daemon.json
owner: root
group: root
mode: '0600'
notify: Restart Docker
- name: Check if sysconfig exists
ansible.builtin.stat:
path: /etc/sysconfig/docker
register: sysconfig
- name: Remove ulimits from sysconfig
ansible.builtin.lineinfile:
path: /etc/sysconfig/docker
search_string: '--default-ulimit nofile='
state: absent
when: sysconfig.stat.exists
notify: Restart Docker
- name: Remove log-driver from sysconfig
ansible.builtin.lineinfile:
path: /etc/sysconfig/docker
search_string: '--log-driver='
state: absent
when: sysconfig.stat.exists
notify: Restart Docker
- name: Restart Docker if necessary
meta: flush_handlers
handlers:
- name: Restart Docker
ansible.builtin.systemd:
state: restarted
name: docker.service
- name: Setup internal networks
hosts: all
tasks:
- name: Setup network
community.docker.docker_network:
name: "{{ item }}"
internal: true
loop: "{{ docker.internal_networks | default([]) }}"
- name: Setup Push Monitoring
hosts: all
tags:
- never
- setup_monitoring
- setup
tasks:
- name: Login to Kuma
delegate_to: localhost
check_mode: false
lucasheld.uptime_kuma.login:
api_url: "{{ kuma.api_url }}"
api_username: "{{ kuma.api_username }}"
api_password: "{{ kuma.api_password }}"
register: kumalogin
- name: Create Kuma Monitor
delegate_to: localhost
check_mode: false
lucasheld.uptime_kuma.monitor:
api_url: "{{ kuma.api_url }}"
api_token: "{{ kumalogin.token }}"
name: "{{ inventory_hostname }}"
description: "Managed by Ansible"
type: push
interval: "{{ heartbeat_timer_interval|mandatory + 30 }}"
maxretries: 2
notification_names:
- "Kuma Statusmonitor"
state: present
- name: Obtain Kuma Push Token
delegate_to: localhost
check_mode: false
lucasheld.uptime_kuma.monitor_info:
api_url: "{{ kuma.api_url }}"
api_token: "{{ kumalogin.token }}"
name: "{{ inventory_hostname }}"
register: monitor
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ ansible_user }}"
register: user_lingering
- name: Enable lingering for user if needed
command: "loginctl enable-linger {{ ansible_user }}"
when:
- not user_lingering.stat.exists
- name: Create systemd config dir
file:
state: directory
path: "/home/{{ ansible_user }}/.config/systemd/user"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
- name: Copy Push Monitor Service and Timer
ansible.builtin.template:
src: "{{ item }}.j2"
dest: "/home/{{ ansible_user }}/.config/systemd/user/{{ item }}"
mode: '0600'
owner: "{{ ansible_user }}"
vars:
monitor_url: "{{ kuma.api_url }}/api/push/{{ monitor.monitors[0].pushToken }}?status=up&msg=OK"
loop:
- heartbeat.service
- heartbeat.timer
- name: Enable timer
ansible.builtin.systemd:
scope: user
name: heartbeat.timer
state: started
enabled: true
masked: false
daemon_reload: true
- name: Setup Infrastructure Wireguard
tags:
- never
- setup
- setup_wireguard
- setup_vpn
ansible.builtin.import_playbook: vpn.yaml
# vim: ft=yaml.ansible