yt-dlp-dags/ansible/playbook-full-install.yml

405 lines
16 KiB
YAML

---
# This playbook provides a complete installation for fresh nodes.
# It can install either master or worker roles, or both on the same machine.
#
# Usage examples:
# # Install everything on all nodes
# ansible-playbook ansible/playbook-full-install.yml
#
# # Install only on workers
# ansible-playbook ansible/playbook-full-install.yml --limit workers
#
# # Install only on master
# ansible-playbook ansible/playbook-full-install.yml --limit master
#
# # Install both roles on a single machine
# ansible-playbook ansible/playbook-full-install.yml --limit specific-host -e "install_master=true install_worker=true"
# -------------------------------------------------------------------------------------------------
# PHASE 1: Base System Configuration
# Ensures all nodes have the necessary base packages, user configurations, and Docker installed.
# -------------------------------------------------------------------------------------------------
- name: "PHASE 1: Import base system setup playbook"
import_playbook: playbook-base-system.yml
# -------------------------------------------------------------------------------------------------
# PHASE 2: Generate Environment Configuration
# Creates .env files needed by all subsequent steps
# -------------------------------------------------------------------------------------------------
- name: "PHASE 2: Generate .env configuration"
import_playbook: playbook-stress-generate-env.yml
# -------------------------------------------------------------------------------------------------
# PHASE 3: Docker Network Setup
# Ensures the shared Docker network exists before building containers
# -------------------------------------------------------------------------------------------------
- name: "PHASE 3: Ensure Docker network exists"
hosts: all
gather_facts: no
vars_files:
- "group_vars/all/vault.yml"
pre_tasks:
- name: Set inventory_env fact
ansible.builtin.set_fact:
inventory_env: "{{ inventory_file | basename | splitext | first | replace('inventory.', '') }}"
- name: Load environment-specific variables
ansible.builtin.include_vars: "{{ item }}"
with_fileglob:
- "group_vars/all/generated_vars{{ '.' + inventory_env if inventory_env else '' }}.yml"
tasks:
- name: Create shared Docker network
community.docker.docker_network:
name: "{{ docker_network_name }}"
driver: bridge
become: yes
# -------------------------------------------------------------------------------------------------
# PHASE 4: Build yt-dlp Docker Image
# Builds the yt-dlp container from bin/ directory
# -------------------------------------------------------------------------------------------------
- name: "PHASE 4: Build yt-dlp Docker image"
hosts: all
gather_facts: no
vars_files:
- "group_vars/all/vault.yml"
pre_tasks:
- name: Set inventory_env fact
ansible.builtin.set_fact:
inventory_env: "{{ inventory_file | basename | splitext | first | replace('inventory.', '') }}"
- name: Load environment-specific variables
ansible.builtin.include_vars: "{{ item }}"
with_fileglob:
- "group_vars/all/generated_vars{{ '.' + inventory_env if inventory_env else '' }}.yml"
tasks:
- name: Define base directory for node
ansible.builtin.set_fact:
base_dir: "{{ airflow_master_dir if (inventory_hostname in groups['master'] and not (install_worker | default(false) | bool)) else airflow_worker_dir }}"
- name: Ensure bin directory exists
ansible.builtin.file:
path: "{{ base_dir }}/bin"
state: directory
owner: "{{ ansible_user }}"
group: "{{ deploy_group }}"
mode: '0755'
become: yes
- name: Check if Dockerfile exists in bin directory
ansible.builtin.stat:
path: "{{ base_dir }}/bin/Dockerfile"
register: dockerfile_stat
- name: Build yt-dlp Docker image if Dockerfile exists
community.docker.docker_image:
name: yt-dlp-custom
tag: latest
source: build
build:
path: "{{ base_dir }}/bin"
pull: yes
state: present
force_source: yes
become: yes
when: dockerfile_stat.stat.exists
- name: Display message if Dockerfile not found
ansible.builtin.debug:
msg: "Dockerfile not found at {{ base_dir }}/bin/Dockerfile - skipping yt-dlp image build"
when: not dockerfile_stat.stat.exists
# -------------------------------------------------------------------------------------------------
# PHASE 5: Sync Code and Install Dependencies
# Copies application code and installs Python dependencies
# -------------------------------------------------------------------------------------------------
- name: "PHASE 5.1: Sync application code"
import_playbook: playbook-stress-sync-code.yml
- name: "PHASE 5.2: Install Python dependencies"
import_playbook: playbook-stress-install-deps.yml
# -------------------------------------------------------------------------------------------------
# PHASE 6: Deploy Shadowsocks Proxies
# Configures and starts proxy services
# -------------------------------------------------------------------------------------------------
- name: "PHASE 6: Deploy proxy services"
import_playbook: playbook-proxies.yml
# -------------------------------------------------------------------------------------------------
# PHASE 7: Install bgutils
# Note: Currently bgutils is deployed on master via docker-compose
# -------------------------------------------------------------------------------------------------
- name: "PHASE 7: Install bgutils"
import_playbook: playbook-install-bgutils.yml
# -------------------------------------------------------------------------------------------------
# PHASE 8: Master-Specific Services Setup
# Starts Redis, MinIO, and other master-only services
# -------------------------------------------------------------------------------------------------
- name: "PHASE 8: Master Node Services Setup"
hosts: master
gather_facts: no
vars:
inventory_env: "{{ inventory_file | basename | splitext | first | replace('inventory.', '') }}"
vars_files:
- "group_vars/all/vault.yml"
pre_tasks:
- name: Load environment-specific variables
ansible.builtin.include_vars: "{{ item }}"
with_fileglob:
- "group_vars/all/generated_vars{{ '.' + inventory_env if inventory_env else '' }}.yml"
tasks:
- name: Configure system performance and kernel settings
ansible.builtin.copy:
src: "configs/etc/sysctl.d/99-system-limits.conf"
dest: "/etc/sysctl.d/99-system-limits.conf"
owner: root
group: root
mode: '0644'
become: yes
register: sysctl_config_copy
- name: Apply sysctl settings
ansible.builtin.command: sysctl --system
become: yes
when: sysctl_config_copy.changed
- name: Ensure MinIO data directory exists
ansible.builtin.file:
path: "{{ airflow_master_dir }}/minio-data"
state: directory
owner: "{{ ansible_user }}"
group: "{{ deploy_group }}"
mode: '0755'
become: yes
- name: Template Docker Compose file for master services
ansible.builtin.template:
src: templates/docker-compose.stress-master.j2
dest: "{{ airflow_master_dir }}/docker-compose.stress.yml"
owner: "{{ ansible_user }}"
group: "{{ deploy_group }}"
mode: '0644'
become: yes
- name: Stop and remove existing containers before starting services
ansible.builtin.shell:
cmd: |
docker ps -a --filter "name=bgutil-provider" --format "{{{{.ID}}}}" | xargs -r docker rm -f
docker ps -a --filter "name=redis-stress" --format "{{{{.ID}}}}" | xargs -r docker rm -f
docker ps -a --filter "name=minio-stress" --format "{{{{.ID}}}}" | xargs -r docker rm -f
become: yes
changed_when: false
ignore_errors: yes
- name: Start master services (Redis, MinIO)
community.docker.docker_compose_v2:
project_src: "{{ airflow_master_dir }}"
files:
- docker-compose.stress.yml
state: present
remove_orphans: true
become: yes
- name: Wait for MinIO service to be ready
ansible.builtin.wait_for:
host: "{{ hostvars[inventory_hostname].ansible_host }}"
port: 9000
delay: 5
timeout: 60
delegate_to: localhost
- name: Download MinIO Client (mc) if not present
ansible.builtin.command:
cmd: wget https://dl.min.io/client/mc/release/linux-amd64/mc -O /usr/local/bin/mc
creates: /usr/local/bin/mc
become: yes
- name: Ensure MinIO Client (mc) is executable
ansible.builtin.file:
path: /usr/local/bin/mc
mode: '0755'
become: yes
- name: Configure mc alias for local MinIO
ansible.builtin.command: >
mc alias set local http://localhost:9000 {{ vault_s3_access_key_id }} {{ vault_s3_secret_access_key }}
become: yes
become_user: "{{ ansible_user }}"
changed_when: false
environment:
HOME: "/home/{{ ansible_user }}"
- name: Ensure S3 buckets exist in MinIO using mc
ansible.builtin.command: >
mc mb local/{{ item }}
loop:
- "stress-inputs"
- "stress-jsons"
become: yes
become_user: "{{ ansible_user }}"
register: mc_mb_result
failed_when: >
mc_mb_result.rc != 0 and
"already exists" not in mc_mb_result.stderr
changed_when: mc_mb_result.rc == 0
environment:
HOME: "/home/{{ ansible_user }}"
# -------------------------------------------------------------------------------------------------
# PHASE 9: Worker-Specific Services Setup
# Starts worker-only services if needed
# -------------------------------------------------------------------------------------------------
- name: "PHASE 9: Worker Node Services Setup"
hosts: workers
gather_facts: no
vars:
inventory_env: "{{ inventory_file | basename | splitext | first | replace('inventory.', '') }}"
vars_files:
- "group_vars/all/vault.yml"
pre_tasks:
- name: Load environment-specific variables
ansible.builtin.include_vars: "{{ item }}"
with_fileglob:
- "group_vars/all/generated_vars{{ '.' + inventory_env if inventory_env else '' }}.yml"
tasks:
- name: Template Docker Compose file for worker services
ansible.builtin.template:
src: templates/docker-compose.stress-master.j2
dest: "{{ airflow_worker_dir }}/docker-compose.stress.yml"
owner: "{{ ansible_user }}"
group: "{{ deploy_group }}"
mode: '0644'
become: yes
- name: Stop and remove existing containers before starting services
ansible.builtin.shell:
cmd: |
docker ps -a --filter "name=bgutil-provider" --format "{{{{.ID}}}}" | xargs -r docker rm -f
docker ps -a --filter "name=redis-stress" --format "{{{{.ID}}}}" | xargs -r docker rm -f
docker ps -a --filter "name=minio-stress" --format "{{{{.ID}}}}" | xargs -r docker rm -f
become: yes
changed_when: false
ignore_errors: yes
- name: Start worker services
community.docker.docker_compose_v2:
project_src: "{{ airflow_worker_dir }}"
files:
- docker-compose.stress.yml
state: present
remove_orphans: true
become: yes
# -------------------------------------------------------------------------------------------------
# PHASE 10: Shared Storage Setup (s3fs)
# Mounts S3 buckets on all nodes
# -------------------------------------------------------------------------------------------------
- name: "PHASE 10: Shared Storage Setup (s3fs)"
hosts: master:workers
gather_facts: no
vars:
inventory_env: "{{ inventory_file | basename | splitext | first | replace('inventory.', '') }}"
vars_files:
- "group_vars/all/vault.yml"
pre_tasks:
- name: Load environment-specific variables
ansible.builtin.include_vars: "{{ item }}"
with_fileglob:
- "group_vars/all/generated_vars{{ '.' + inventory_env if inventory_env else '' }}.yml"
tasks:
- name: Define base directory for node
ansible.builtin.set_fact:
base_dir: "{{ airflow_master_dir if inventory_hostname in groups['master'] else airflow_worker_dir }}"
- name: Mount S3 buckets via s3fs
block:
- name: Install s3fs for mounting S3 buckets
ansible.builtin.apt:
name: s3fs
state: present
become: yes
- name: Configure s3fs credentials
ansible.builtin.copy:
content: "{{ vault_s3_access_key_id }}:{{ vault_s3_secret_access_key }}"
dest: "/home/{{ ansible_user }}/.passwd-s3fs"
owner: "{{ ansible_user }}"
group: "{{ deploy_group }}"
mode: '0600'
become: yes
- name: Check if mount points are already mounted
ansible.builtin.shell:
cmd: "mount | grep -q '{{ item.path }}'"
loop:
- { bucket: 'stress-inputs', path: '{{ base_dir }}/inputfiles' }
- { bucket: 'stress-jsons', path: '{{ base_dir }}/run/docker_mount/fetched_info_jsons' }
register: mount_check
changed_when: false
failed_when: false
- name: Ensure mount point directories exist (only if not mounted)
ansible.builtin.file:
path: "{{ item.item.path }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ deploy_group }}"
mode: '0755'
loop: "{{ mount_check.results }}"
when: item.rc != 0
become: yes
- name: Mount S3 buckets for stress testing
ansible.posix.mount:
src: "s3fs#{{ item.bucket }}"
path: "{{ item.path }}"
fstype: fuse
opts: "_netdev,allow_other,use_path_request_style,nonempty,url=http://{{ hostvars[groups['master'][0]].ansible_host }}:9000,passwd_file=/home/{{ ansible_user }}/.passwd-s3fs"
state: mounted
loop:
- { bucket: 'stress-inputs', path: '{{ base_dir }}/inputfiles' }
- { bucket: 'stress-jsons', path: '{{ base_dir }}/run/docker_mount/fetched_info_jsons' }
become: yes
# -------------------------------------------------------------------------------------------------
# PHASE 11: Initialize Redis (Master Only)
# Sets up profiles and policies in Redis
# -------------------------------------------------------------------------------------------------
- name: "PHASE 11: Initialize Redis profiles"
import_playbook: playbook-stress-init-redis.yml
# -------------------------------------------------------------------------------------------------
# PHASE 12: Final Status and Next Steps
# -------------------------------------------------------------------------------------------------
- name: "PHASE 12: Installation Complete"
hosts: localhost
gather_facts: no
tasks:
- name: Display installation completion message
ansible.builtin.debug:
msg: |
========================================
Full installation complete!
========================================
Next steps:
1. Start monitoring and enforcer (on master):
ansible-playbook ansible/playbook-stress-manage-processes.yml \
-e "start_monitor=true start_enforcer=true"
2. Start auth generator (on master):
ansible-playbook ansible/playbook-stress-auth-generator.yml \
-e "start_generator=true dummy_batch=true auth_min_seconds=2 auth_max_seconds=3"
3. Start download simulation (on workers):
ansible-playbook ansible/playbook-stress-download-simulation.yml \
-e "start_download=true profile_prefix=user1 download_min_seconds=2 download_max_seconds=5" \
--limit workers
4. Check status:
ansible-playbook ansible/playbook-stress-control.yml -e "action=status"
5. Monitor profiles:
ansible-playbook ansible/playbook-stress-control.yml -e "action=profile-status"