infrastructure/ansible/plays/common.yaml

307 lines
8.8 KiB
YAML

- name: Setup SSH Config
hosts: all
become: true
become_user: root
tags:
- setup_ssh
- setup
tasks:
- name: Authorized_keys dir present
ansible.builtin.file:
state: directory
path: /etc/ssh/authorized_keys
owner: root
group: root
mode: '0755'
- name: Obtain Machine Pubkey
delegate_to: localhost
become: false
changed_when: false
register: pubkey
community.crypto.openssl_publickey:
format: OpenSSH
path: "/tmp/{{ inventory_hostname }}.pub"
privatekey_path: "{{ ansible_ssh_private_key_file }}"
return_content: true
- name: Deploy Machine SSH-Key
ansible.posix.authorized_key:
user: "{{ ansible_user }}"
state: present
manage_dir: false
path: "/etc/ssh/authorized_keys/{{ ansible_user }}"
key: "{{ pubkey.publickey }} drone-machine-deploy"
- name: Deploy Common SSH-Keys
ansible.posix.authorized_key:
user: "{{ ansible_user }}"
state: present
manage_dir: false
path: "/etc/ssh/authorized_keys/{{ ansible_user }}"
key: "{{ item }}"
loop: "{{ common.ssh.authorized_keys }}"
- name: Ensure authorized_keys ownership
ansible.builtin.file:
state: directory
path: /etc/ssh/authorized_keys
owner: root
group: root
mode: "u=rwX,g=rX,o=rX"
recurse: true
- name: Configure sshd
ansible.builtin.template:
src: 'sshd_config.j2'
dest: '/etc/ssh/sshd_config.d/99-override.conf'
owner: root
group: root
mode: '0600'
- name: Remove Keys Config
ansible.builtin.file:
state: absent
path: /etc/ssh/ssh_config.d/40-ssh-key-dir.conf
- name: Setup Networks
hosts: network_config
become: true
become_user: root
tasks:
- name: Setup wired interface
ansible.builtin.template:
src: "connection.nmconnection.j2"
dest: "/etc/NetworkManager/system-connections/Wired Connection 1.nmconnection"
owner: root
group: root
mode: '0600'
notify: Restart Network
- name: Setup DNS
ansible.builtin.lineinfile:
path: /etc/systemd/resolved.conf
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
notify: Restart systemd-resolved
loop:
- regexp: "^DNS="
line: "DNS=1.1.1.1"
- regexp: "^FallbackDNS="
line: "FallbackDNS=8.8.8.8"
handlers:
- name: Restart Network
ansible.builtin.systemd:
name: NetworkManager.service
state: restarted
- name: Restart systemd-resolved
ansible.builtin.systemd:
name: systemd-resolved.service
state: restarted
- name: Backup
hosts: backup
become: true
become_user: root
tasks:
- name: Install backup script
ansible.builtin.template:
src: backup.sh.j2
dest: /root/backup.sh
mode: '0700'
owner: root
- ansible.builtin.file:
path: /root/.ssh
owner: root
state: directory
mode: '0700'
- name: Install SSH Keys
ansible.builtin.template:
src: storagebox.j2
dest: /root/.ssh/storagebox
mode: '0600'
owner: root
- name: Add Known Hosts entries
ansible.builtin.known_hosts:
path: "/root/.ssh/known_hosts"
name: "{{ backup.known_hosts.name }}"
key: "{{ backup.known_hosts.key }}"
- name: Restore from Backup
hosts: backup
become: true
become_user: root
gather_facts: true
tasks:
- name: Check if restore is needed
ansible.builtin.stat:
path: "/etc/setup_complete"
register: setup_complete
- block:
- name: Install restore script
ansible.builtin.template:
src: restore.sh.j2
dest: /root/restore.sh
mode: '0700'
owner: root
- name: Setup ssh directory
ansible.builtin.file:
path: /root/.ssh
owner: root
state: directory
mode: '0700'
- name: Install SSH Keys
ansible.builtin.template:
src: storagebox.j2
dest: /root/.ssh/storagebox
mode: '0600'
owner: root
- name: Add Known Hosts entries
ansible.builtin.known_hosts:
path: "/root/.ssh/known_hosts"
name: "{{ backup.known_hosts.name }}"
key: "{{ backup.known_hosts.key }}"
- name: Restore from Borg
become: true
become_user: root
ansible.builtin.command:
chdir: /
cmd: bash /root/restore.sh
- name: Remove script from host
ansible.builtin.file:
path: /root/restore.sh
state: absent
- name: Mark setup as complete
ansible.builtin.file:
path: "/etc/setup_complete"
state: touch
owner: root
group: root
mode: 0600
when: not setup_complete.stat.exists
- name: Setup Registry credentials
hosts: all
tasks:
- ansible.builtin.file:
path: /home/core/.docker
owner: core
state: directory
mode: '0700'
- ansible.builtin.template:
src: docker-config.json.j2
dest: /home/core/.docker/config.json
mode: '0600'
owner: core
- name: Setup Docker Config
hosts: all
become: true
become_user: root
tasks:
- name: Template Config
ansible.builtin.template:
src: "docker-daemon.json.j2"
dest: /etc/docker/daemon.json
owner: root
group: root
mode: '0600'
notify: Restart Docker
- name: Setup default ulimts
ansible.builtin.lineinfile:
path: /etc/sysconfig/docker
search_string: '--default-ulimit nofile='
line: ' --default-ulimit nofile=4096:4096 \'
notify: Restart Docker
- name: Remove log-driver from sysconfig
ansible.builtin.lineinfile:
path: /etc/sysconfig/docker
search_string: '--log-driver='
state: absent
notify: Restart Docker
- name: Restart Docker if necessary
meta: flush_handlers
handlers:
- name: Restart Docker
ansible.builtin.systemd:
state: restarted
name: docker.service
- name: Setup internal networks
hosts: all
tasks:
- name: Setup network
community.docker.docker_network:
name: "{{ item }}"
internal: true
loop: "{{ docker.internal_networks | default([]) }}"
- name: Setup Push Monitoring
hosts: all
tags:
- never
- setup_monitoring
- setup
tasks:
- name: Login to Kuma
delegate_to: localhost
check_mode: false
lucasheld.uptime_kuma.login:
api_url: "{{ kuma.api_url }}"
api_username: "{{ kuma.api_username }}"
api_password: "{{ kuma.api_password }}"
register: kumalogin
- name: Create Kuma Monitor
delegate_to: localhost
check_mode: false
lucasheld.uptime_kuma.monitor:
api_url: "{{ kuma.api_url }}"
api_token: "{{ kumalogin.token }}"
name: "{{ inventory_hostname }}"
description: "Managed by Ansible"
type: push
interval: 330
maxretries: 2
notification_names:
- "Kuma Statusmonitor"
state: present
- name: Obtain Kuma Push Token
delegate_to: localhost
check_mode: false
lucasheld.uptime_kuma.monitor_info:
api_url: "{{ kuma.api_url }}"
api_token: "{{ kumalogin.token }}"
name: "{{ inventory_hostname }}"
register: monitor
- name: Check if user is lingering
stat:
path: "/var/lib/systemd/linger/{{ ansible_user }}"
register: user_lingering
- name: Enable lingering for user if needed
command: "loginctl enable-linger {{ ansible_user }}"
when:
- not user_lingering.stat.exists
- name: Create systemd config dir
file:
state: directory
path: "/home/{{ ansible_user }}/.config/systemd/user"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
- name: Copy Push Monitor Service and Timer
ansible.builtin.template:
src: "{{ item }}.j2"
dest: "/home/{{ ansible_user }}/.config/systemd/user/{{ item }}"
mode: '0600'
owner: "{{ ansible_user }}"
vars:
monitor_url: "{{ kuma.api_url }}/api/push/{{ monitor.monitors[0].pushToken }}?status=up&msg=OK"
loop:
- heartbeat.service
- heartbeat.timer
- name: Enable timer
ansible.builtin.systemd:
scope: user
name: heartbeat.timer
state: started
enabled: true
masked: false
daemon_reload: true
- name: Setup Infrastructure Wireguard
tags:
- setup
- setup_wireguard
- setup_vpn
ansible.builtin.import_playbook: vpn.yaml
# vim: ft=yaml.ansible