From 0103d13c3fd871d1d0cbffcd1bd891eb226dd0e0 Mon Sep 17 00:00:00 2001 From: aperez Date: Thu, 25 Dec 2025 14:32:53 +0300 Subject: [PATCH] Current changes on ytops docker direct invocation in batch mode --- VERSION.client | 1 + airflow/Dockerfile | 9 +- .../configs/docker-compose-ytdlp-ops.yaml.j2 | 6 +- airflow/dags/QUEUE.md | 76 + airflow/dags/ytdlp_mgmt_proxy_account.py | 698 ++-- airflow/dags/ytdlp_mgmt_queues.py | 26 +- airflow/dags/ytdlp_ops_account_maintenance.py | 247 +- airflow/dags/ytdlp_ops_v01_orchestrator.py | 157 +- airflow/dags/ytdlp_ops_v01_worker_per_url.py | 763 +++-- airflow/dags/ytdlp_ops_v02_dispatcher_dl.py | 34 +- .../dags/ytdlp_ops_v02_orchestrator_auth.py | 133 +- airflow/dags/ytdlp_ops_v02_orchestrator_dl.py | 26 +- .../dags/ytdlp_ops_v02_worker_per_url_auth.py | 1235 ++----- .../dags/ytdlp_ops_v02_worker_per_url_dl.py | 599 ++-- airflow/dags/ytdlp_s3_uploader.py | 20 +- ansible/README.md | 7 + ansible/playbook-install-local.yml | 44 + ansible/playbook-sync-local.yml | 64 +- ansible/playbook-worker.yml | 114 + bin/build-yt-dlp-image | 35 + bin/ytops-client | 24 +- cli.auth.config | 17 + cli.config | 54 +- cli.download.config | 17 + .../10_direct_docker_auth_simulation.yaml | 119 + .../11_direct_docker_download_simulation.yaml | 104 + policies/1_fetch_only_policies.yaml | 155 - policies/2_download_only_policies.yaml | 58 - policies/3_full_stack_policies.yaml | 158 - policies/4_custom_scenarios.yaml | 126 - policies/5_ban_test_policies.yaml | 84 - policies/6_profile_setup_policy.yaml | 27 + policies/8_unified_simulation_enforcer.yaml | 162 + policies/README.md | 28 - setup.py | 58 +- tools/generate-inventory.py | 57 +- ytops_client/check_expiry_tool.py | 132 + ytops_client/check_log_pattern_tool.py | 72 + ytops_client/cli.py | 76 + ytops_client/config_tool.py | 317 ++ ytops_client/download_emulator_tool.py | 0 ytops_client/download_native_py_tool.py | 634 ++-- ytops_client/download_tool.py | 26 +- ytops_client/downloader.py | 67 + ytops_client/get_info_tool.py | 185 +- ytops_client/go_ytdlp_cli/go-ytdlp | Bin 0 -> 10700800 bytes ytops_client/go_ytdlp_cli/go.mod | 22 + ytops_client/go_ytdlp_cli/go.sum | 27 + ytops_client/go_ytdlp_cli/main.go | 64 + ytops_client/list_formats_tool.py | 17 +- .../locking_download_emulator_tool.py | 209 ++ ytops_client/manage_tool.py | 891 +++++ ytops_client/policy_enforcer_tool.py | 1297 ++++++++ ytops_client/profile_allocator_tool.py | 147 + ytops_client/profile_manager_tool.py | 1989 +++++++++++ ytops_client/profile_setup_tool.py | 269 ++ ytops_client/request_params_help.py | 91 +- ytops_client/requirements.txt | 42 + ytops_client/simulation_tool.py | 136 + ytops_client/stress_formats_tool.py | 76 +- ytops_client/stress_policy/__init__.py | 1 + ytops_client/stress_policy/arg_parser.py | 216 ++ ytops_client/stress_policy/process_runners.py | 283 ++ ytops_client/stress_policy/state_manager.py | 794 +++++ ytops_client/stress_policy/utils.py | 507 +++ ytops_client/stress_policy/workers.py | 2615 +++++++++++++++ ytops_client/stress_policy_tool.py | 2948 +++++------------ ytops_client/task_generator_tool.py | 203 ++ ytops_client/youtube-dl/Dockerfile | 71 + ytops_client/youtube-dl/README.md | 33 + .../youtube-dl/release-versions/latest.txt | 1 + ytops_client/yt_dlp_dummy_tool.py | 157 + 72 files changed, 14585 insertions(+), 5572 deletions(-) create mode 100644 VERSION.client create mode 100644 airflow/dags/QUEUE.md create mode 100644 ansible/playbook-install-local.yml create mode 100755 bin/build-yt-dlp-image create mode 100644 cli.auth.config create mode 100644 cli.download.config create mode 100644 policies/10_direct_docker_auth_simulation.yaml create mode 100644 policies/11_direct_docker_download_simulation.yaml delete mode 100644 policies/1_fetch_only_policies.yaml delete mode 100644 policies/2_download_only_policies.yaml delete mode 100644 policies/3_full_stack_policies.yaml delete mode 100644 policies/4_custom_scenarios.yaml delete mode 100644 policies/5_ban_test_policies.yaml create mode 100644 policies/6_profile_setup_policy.yaml create mode 100644 policies/8_unified_simulation_enforcer.yaml delete mode 100644 policies/README.md create mode 100644 ytops_client/check_expiry_tool.py create mode 100644 ytops_client/check_log_pattern_tool.py create mode 100644 ytops_client/config_tool.py create mode 100644 ytops_client/download_emulator_tool.py create mode 100644 ytops_client/downloader.py create mode 100755 ytops_client/go_ytdlp_cli/go-ytdlp create mode 100644 ytops_client/go_ytdlp_cli/go.mod create mode 100644 ytops_client/go_ytdlp_cli/go.sum create mode 100644 ytops_client/go_ytdlp_cli/main.go create mode 100644 ytops_client/locking_download_emulator_tool.py create mode 100644 ytops_client/manage_tool.py create mode 100644 ytops_client/policy_enforcer_tool.py create mode 100644 ytops_client/profile_allocator_tool.py create mode 100644 ytops_client/profile_manager_tool.py create mode 100644 ytops_client/profile_setup_tool.py create mode 100644 ytops_client/requirements.txt create mode 100644 ytops_client/simulation_tool.py create mode 100644 ytops_client/stress_policy/__init__.py create mode 100644 ytops_client/stress_policy/arg_parser.py create mode 100644 ytops_client/stress_policy/process_runners.py create mode 100644 ytops_client/stress_policy/state_manager.py create mode 100644 ytops_client/stress_policy/utils.py create mode 100644 ytops_client/stress_policy/workers.py create mode 100644 ytops_client/task_generator_tool.py create mode 100644 ytops_client/youtube-dl/Dockerfile create mode 100644 ytops_client/youtube-dl/README.md create mode 100644 ytops_client/youtube-dl/release-versions/latest.txt create mode 100644 ytops_client/yt_dlp_dummy_tool.py diff --git a/VERSION.client b/VERSION.client new file mode 100644 index 0000000..6d7de6e --- /dev/null +++ b/VERSION.client @@ -0,0 +1 @@ +1.0.2 diff --git a/airflow/Dockerfile b/airflow/Dockerfile index a48a7cb..87a13bd 100644 --- a/airflow/Dockerfile +++ b/airflow/Dockerfile @@ -132,13 +132,18 @@ COPY --chown=airflow:airflow bin/ytops-client /app/bin/ytops-client RUN chmod +x /app/bin/ytops-client ENV PATH="/app/bin:${PATH}" -# Install the package in editable mode. This runs setup.py and installs all dependencies -# listed in `install_requires`, making the `yt_ops_services` module available everywhere. +# Install dependencies for the ytops_client package, then install the package itself +# in editable mode. This makes the `yt_ops_services` and `ytops_client` modules +# available everywhere. # Bypass the pip root check again. RUN mv /usr/local/bin/pip /usr/local/bin/pip.orig && \ + python3 -m pip install --no-cache-dir -r ytops_client/requirements.txt && \ python3 -m pip install --no-cache-dir -e . && \ mv /usr/local/bin/pip.orig /usr/local/bin/pip +# Ensure all files in /app, including the generated .egg-info directory, are owned by the airflow user. +RUN chown -R airflow:airflow /app + # Copy token generator scripts and utils with correct permissions # COPY --chown=airflow:airflow generate_tokens_direct.mjs ./ # COPY --chown=airflow:airflow utils ./utils/ diff --git a/airflow/configs/docker-compose-ytdlp-ops.yaml.j2 b/airflow/configs/docker-compose-ytdlp-ops.yaml.j2 index ed6b329..2032582 100644 --- a/airflow/configs/docker-compose-ytdlp-ops.yaml.j2 +++ b/airflow/configs/docker-compose-ytdlp-ops.yaml.j2 @@ -127,13 +127,11 @@ services: - "${CAMOUFOX_PROXIES}" - "--camoufox-endpoints-file" - "/app/config/camoufox_endpoints.json" - - "--print-tokens" - "--stop-if-no-proxy" - "--comms-log-root-dir" - "/app/logs/yt-dlp-ops/communication_logs" - - "--bgutils-no-innertube" - - "--visitor-rotation-threshold" - - "250" + #- "--visitor-rotation-threshold" + #- "250" {% endif %} restart: unless-stopped pull_policy: always diff --git a/airflow/dags/QUEUE.md b/airflow/dags/QUEUE.md new file mode 100644 index 0000000..d34b42a --- /dev/null +++ b/airflow/dags/QUEUE.md @@ -0,0 +1,76 @@ +V2 System: Separated Auth & Download Flow + +The v2 system splits the process into two distinct stages, each with its own set of queues. The base names for these queues are queue2_auth and queue2_dl. + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── +1. Authentication Stage (ytdlp_ops_v02_worker_per_url_auth) + +This stage is responsible for taking a raw YouTube URL, authenticating with the yt-ops-server to get an info.json, and creating granular download tasks. + + • Getting Data (Input): + • Queue: queue2_auth_inbox + • Redis Type: LIST + • Purpose: This is the main entry point for the entire v2 system. Raw YouTube URLs or video IDs are pushed here. The ytdlp_ops_v02_dispatcher_auth DAG pulls URLs from this list to start the process. + • Reporting Results: + • Success: + • Queue: queue2_auth_result (Redis HASH) - A success record for the authentication step is stored here. + • Queue: queue_dl_format_tasks (Redis LIST) - This is the critical handoff queue. Upon successful authentication, the auth worker resolves the desired formats (e.g., bestvideo+bestaudio) into specific format IDs (e.g., 299, 140) and pushes one JSON job payload for each format into this list. This queue + feeds the download stage. + • Failure: + • Queue: queue2_auth_fail (Redis HASH) - If the authentication fails due to a system error (like bot detection or a proxy failure), the error details are stored here. + • Skipped: + • Queue: queue2_auth_skipped (Redis HASH) - If the video is unavailable for a non-system reason (e.g., it's private, deleted, or geo-restricted), the URL is logged here. This is not considered a system failure. + • Tracking Tasks: + • Queue: queue2_auth_progress + • Redis Type: HASH + • Purpose: When an auth worker picks up a URL, it adds an entry to this hash to show that the URL is actively being processed. The entry is removed upon completion (success, failure, or skip). + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── +2. Download Stage (ytdlp_ops_v02_worker_per_url_dl) + +This stage is responsible for executing the download and probing of a single media format, based on the job created by the auth worker. + + • Getting Data (Input): + • Queue: queue_dl_format_tasks + • Redis Type: LIST + • Purpose: The ytdlp_ops_v02_worker_per_url_dl DAG pulls granular job payloads from this list. Each payload contains everything needed to download a single format (the path to the info.json, the format ID, etc.). + • Reporting Results: + • Success: + • Queue: queue2_dl_result (Redis HASH) - A success record for the download of a specific format is stored here. + • Failure: + • Queue: queue2_dl_fail (Redis HASH) - If the download or probe fails, the error is logged here. As seen in ytdlp_mgmt_queues.py, these failed items can be requeued, which sends them back to queue2_auth_inbox to start the process over. + • Skipped: + • Queue: queue2_dl_skipped (Redis HASH) - Used for unrecoverable download errors (e.g., HTTP 403 Forbidden), similar to the auth stage. + • Tracking Tasks: + • Queue: queue2_dl_progress + • Redis Type: HASH + • Purpose: Tracks download tasks that are actively in progress. + +Summary Table (V2) + + + Queue Name Pattern Redis Type Purpose + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + queue2_auth_inbox LIST Input for Auth: Holds raw YouTube URLs to be authenticated. + queue2_auth_progress HASH Tracks URLs currently being authenticated. + queue2_auth_result HASH Stores successful authentication results. + queue2_auth_fail HASH Stores failed authentication attempts. + queue2_auth_skipped HASH Stores URLs skipped due to content issues (private, deleted, etc.). + queue_dl_format_tasks LIST Input for Download: Holds granular download jobs (one per format) created by the auth worker. + queue2_dl_progress HASH Tracks download jobs currently in progress. + queue2_dl_result HASH Stores successful download results. + queue2_dl_fail HASH Stores failed download attempts. + queue2_dl_skipped HASH Stores downloads skipped due to unrecoverable errors (e.g., 403 Forbidden). + + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── +V1 System (Monolithic) for Contrast + +For completeness, the older v1 system (ytdlp_ops_v01_worker_per_url) uses a simpler, monolithic set of queues, typically with the base name video_queue. + + • Input: video_queue_inbox (Redis LIST) + • Results: video_queue_result, video_queue_fail, video_queue_skipped (all Redis HASHes) + • In-Progress: video_queue_progress (Redis HASH) + +In this model, there is no handoff between stages; a single worker handles both authentication and download for all requested formats of a URL. + diff --git a/airflow/dags/ytdlp_mgmt_proxy_account.py b/airflow/dags/ytdlp_mgmt_proxy_account.py index b189ba0..fe8fa7d 100644 --- a/airflow/dags/ytdlp_mgmt_proxy_account.py +++ b/airflow/dags/ytdlp_mgmt_proxy_account.py @@ -3,6 +3,12 @@ DAG to manage the state of proxies and accounts used by the ytdlp-ops-server. """ from __future__ import annotations +# --- Add project root to path to allow for yt-ops-client imports --- +import sys +# The yt-ops-client package is installed in editable mode in /app +if '/app' not in sys.path: + sys.path.insert(0, '/app') + import logging import json import re @@ -17,6 +23,7 @@ from airflow.models.dagrun import DagRun from airflow.models.param import Param from airflow.models.taskinstance import TaskInstance from airflow.operators.python import PythonOperator +from airflow.decorators import task from airflow.utils.dates import days_ago from airflow.models.variable import Variable from airflow.providers.redis.hooks.redis import RedisHook @@ -35,12 +42,13 @@ except ImportError: except Exception as e: logger.error(f"Error applying Thrift exceptions patch: {e}") -# Thrift imports +# Thrift imports (kept for DEPRECATED proxy management) try: + from ytops_client.profile_manager_tool import ProfileManager, format_duration, format_timestamp from pangramia.yt.exceptions.ttypes import PBServiceException, PBUserException - from yt_ops_services.client_utils import get_thrift_client, format_timestamp + from yt_ops_services.client_utils import get_thrift_client except ImportError as e: - logger.critical(f"Could not import Thrift modules: {e}. Ensure yt_ops_services package is installed correctly.") + logger.critical(f"Could not import project modules: {e}. Ensure yt-ops-client and services are installed correctly.") # Fail DAG parsing if thrift modules are not available raise @@ -70,6 +78,7 @@ def _get_redis_client(redis_conn_id: str): def _list_proxy_statuses(client, server_identity): """Lists the status of proxies.""" logger.info(f"Listing proxy statuses for server: {server_identity or 'ALL'}") + logger.warning("DEPRECATED: Proxy management is now handled by the standalone policy-enforcer.") logger.info("NOTE: Proxy statuses are read from server's internal state via Thrift service") try: statuses = client.getProxyStatus(server_identity) @@ -126,88 +135,54 @@ def _list_proxy_statuses(client, server_identity): print("NOTE: To see Recent Accounts/Machines, the server's `getProxyStatus` method must be updated to return these fields.") -def _list_account_statuses(client, account_id, redis_conn_id): - """Lists the status of accounts, enriching with live data from Redis.""" - logger.info(f"Listing account statuses for account: {account_id or 'ALL'}") - logger.info("NOTE: Account statuses are read from the Thrift service and enriched with live data from Redis.") +def _list_account_statuses(pm: ProfileManager, account_id_prefix: str | None): + """Lists the status of profiles from Redis using ProfileManager.""" + logger.info(f"Listing v2 profile statuses from Redis for prefix: {account_id_prefix or 'ALL'}") - redis_client = None try: - redis_client = _get_redis_client(redis_conn_id) - logger.info("Successfully connected to Redis to fetch detailed account status.") - except Exception as e: - logger.warning(f"Could not connect to Redis to get detailed status. Will show basic status. Error: {e}") - redis_client = None - - try: - # The thrift method takes accountId (specific) or accountPrefix. - # If account_id is provided, we use it. If not, we get all by leaving both params as None. - statuses = client.getAccountStatus(accountId=account_id, accountPrefix=None) - if not statuses: - print("\n--- Account Statuses ---\nNo account statuses found.\n------------------------\n") + profiles = pm.list_profiles() + if not profiles: + print("\n--- V2 Profile Statuses ---\nNo profiles found.\n---------------------------\n") return from tabulate import tabulate status_list = [] + now = time.time() - for s in statuses: - status_str = s.status - # If an account is resting, get the live countdown from Redis for accuracy. - if redis_client and 'RESTING' in status_str: - try: - status_key = f"account_status:{s.accountId}" - # The server stores resting expiry time in 'resting_until'. - expiry_ts_bytes = redis_client.hget(status_key, "resting_until") - if expiry_ts_bytes: - expiry_ts = float(expiry_ts_bytes) - now = datetime.now().timestamp() - if now >= expiry_ts: - status_str = "ACTIVE (was RESTING)" - else: - remaining_seconds = int(expiry_ts - now) - if remaining_seconds > 3600: - status_str = f"RESTING (active in {remaining_seconds // 3600}h {remaining_seconds % 3600 // 60}m)" - elif remaining_seconds > 60: - status_str = f"RESTING (active in {remaining_seconds // 60}m {remaining_seconds % 60}s)" - else: - status_str = f"RESTING (active in {remaining_seconds}s)" - except Exception as e: - logger.warning(f"Could not parse resting time for {s.accountId} from Redis: {e}. Using server status.") + for p in profiles: + if account_id_prefix and not p['name'].startswith(account_id_prefix): + continue + + status = p.get('state', 'UNKNOWN') + if status == 'RESTING': + rest_until = p.get('rest_until', 0) + if rest_until > now: + status += f" ({format_duration(rest_until - now)} left)" + elif status == 'COOLDOWN': + cooldown_until = p.get('cooldown_until', 0) + if cooldown_until > now: + status += f" ({format_duration(cooldown_until - now)} left)" - # Determine the last activity timestamp for sorting - last_success = float(s.lastSuccessTimestamp) if s.lastSuccessTimestamp else 0 - last_failure = float(s.lastFailureTimestamp) if s.lastFailureTimestamp else 0 - last_activity = max(last_success, last_failure) status_item = { - "Account ID": s.accountId, - "Status": status_str, - "Success": s.successCount, - "Failures": s.failureCount, - "Last Success": format_timestamp(s.lastSuccessTimestamp), - "Last Failure": format_timestamp(s.lastFailureTimestamp), - "Last Proxy": s.lastUsedProxy or "N/A", - "Last Machine": s.lastUsedMachine or "N/A", - "_last_activity": last_activity, # Add a temporary key for sorting + "Name": p.get('name'), + "Status": status, + "Proxy": p.get('proxy', 'N/A'), + "Success": p.get('success', 0), + "Failures": p.get('failure', 0), + "Last Activity": format_timestamp(p.get('last_activity_ts', 0)), + "Owner": p.get('owner', 'None'), + "Lock Time": format_duration(now - p.get('lock_ts', 0)) if p.get('state') == 'LOCKED' else 'N/A', } status_list.append(status_item) - # Sort the list by the last activity timestamp in descending order - status_list.sort(key=lambda item: item.get('_last_activity', 0), reverse=True) + status_list.sort(key=lambda item: item.get('Name', '')) - # Remove the temporary sort key before printing - for item in status_list: - del item['_last_activity'] - - print("\n--- Account Statuses ---") - # The f-string with a newline ensures the table starts on a new line in the logs. + print("\n--- V2 Profile Statuses ---") print(f"\n{tabulate(status_list, headers='keys', tablefmt='grid')}") - print("------------------------\n") - except (PBServiceException, PBUserException) as e: - logger.error(f"Failed to get account statuses: {e.message}", exc_info=True) - print(f"\nERROR: Could not retrieve account statuses. Server returned: {e.message}\n") + print("---------------------------\n") except Exception as e: - logger.error(f"An unexpected error occurred while getting account statuses: {e}", exc_info=True) + logger.error(f"An unexpected error occurred while getting v2 profile statuses: {e}", exc_info=True) print(f"\nERROR: An unexpected error occurred: {e}\n") @@ -317,6 +292,49 @@ def _list_activity_counters(redis_conn_id: str): print(f"\nERROR: An unexpected error occurred: {e}\n") +def _create_profiles_from_json(**context): + """Creates profiles by calling the yt-ops-client setup-profiles tool.""" + import subprocess + import tempfile + import yaml + + params = context['params'] + json_payload_str = params.get('create_profiles_json') + if not json_payload_str: + raise AirflowException("Parameter 'create_profiles_json' is empty.") + + try: + # We accept JSON but the setup tool uses YAML, so we parse and dump. + # This also serves as validation. + json_payload = json.loads(json_payload_str) + yaml_payload = yaml.dump(json_payload) + except (json.JSONDecodeError, yaml.YAMLError) as e: + raise AirflowException(f"Invalid JSON/YAML in 'create_profiles_json': {e}") + + with tempfile.NamedTemporaryFile(mode='w+', delete=True, suffix='.yaml', prefix='airflow-profile-setup-') as temp_policy_file: + temp_policy_file.write(yaml_payload) + temp_policy_file.flush() + logger.info(f"Created temporary policy file for profile setup: {temp_policy_file.name}") + + cmd = [ + 'ytops-client', 'setup-profiles', + '--policy', temp_policy_file.name, + ] + # Pass through Redis connection params if provided + if params.get('redis_conn_id') != DEFAULT_REDIS_CONN_ID: + logger.warning("Custom Redis connection is not supported for `create_profiles` yet. It will use the default from .env or localhost.") + + logger.info(f"Running command: {' '.join(cmd)}") + process = subprocess.run(cmd, capture_output=True, text=True, timeout=300) + + if process.stdout: + print(f"\n--- yt-ops-client setup-profiles STDOUT ---\n{process.stdout}\n----------------------------------------\n") + if process.stderr: + print(f"\n--- yt-ops-client setup-profiles STDERR ---\n{process.stderr}\n----------------------------------------\n") + + if process.returncode != 0: + raise AirflowException(f"Profile creation failed with exit code {process.returncode}.") + def manage_system_callable(**context): """Main callable to interact with the system management endpoints.""" # Log version for debugging @@ -327,7 +345,7 @@ def manage_system_callable(**context): action = params["action"] # For Thrift actions, use the new management host/port - if entity not in ["activity_counters"]: + if entity not in ["activity_counters", "account"]: host = params["management_host"] port = params["management_port"] else: @@ -335,12 +353,13 @@ def manage_system_callable(**context): server_identity = params.get("server_identity") proxy_url = params.get("proxy_url") - account_id = params.get("account_id") + account_id = params.get("account_id") # Used as prefix for v2 profiles + redis_env = params.get("redis_env") # --- Validate Action/Entity Combination and Parameters --- valid_actions = { "proxy": ["list_with_status", "ban", "unban", "ban_all", "unban_all", "delete_from_redis"], - "account": ["list_with_status", "ban", "unban", "unban_all", "delete_from_redis"], + "account": ["list_with_status", "create_profiles", "ban", "unban", "activate", "pause", "delete", "delete_all"], "client": ["list_with_status", "delete_from_redis"], "accounts_and_proxies": ["list_with_status", "ban", "unban", "ban_all", "unban_all", "delete_from_redis"], "activity_counters": ["list_with_status"], @@ -360,9 +379,23 @@ def manage_system_callable(**context): raise ValueError(f"A 'proxy_url' is required for proxy action '{action}'.") if entity == "account": - if action in ["ban", "unban"] and not account_id: - raise ValueError(f"An 'account_id' is required for account action '{action}'.") + if action in ["ban", "unban", "pause", "activate", "delete"] and not account_id: + raise ValueError(f"An 'account_id' (profile name) is required for account action '{action}'.") + # --- ProfileManager setup for v2 account actions --- + pm = None + if entity == "account": + try: + redis_hook = RedisHook(redis_conn_id=params["redis_conn_id"]) + if redis_env: + key_prefix = f"{redis_env}_profile_mgmt_" + else: + raise ValueError("A 'redis_env' (e.g., 'sim_auth') must be provided for v2 profile actions.") + + pm = ProfileManager(redis_hook=redis_hook, key_prefix=key_prefix) + logger.info(f"Initialized ProfileManager for env '{redis_env}' (Redis key prefix: '{key_prefix}')") + except Exception as e: + raise AirflowException(f"Failed to initialize ProfileManager: {e}") # --- Handle Activity Counter action --- if entity == "activity_counters": @@ -372,13 +405,25 @@ def manage_system_callable(**context): else: raise ValueError(f"Action '{action}' is not valid for entity 'activity_counters'. Only 'list_with_status' is supported.") - # Handle Thrift-based deletion actions + # Handle direct Redis deletion actions if action == "delete_from_redis": + if entity == "client": + logger.info("Deleting all client stats from Redis...") + redis_client = _get_redis_client(params["redis_conn_id"]) + result = redis_client.delete("client_stats") + if result > 0: + print(f"\nSuccessfully deleted 'client_stats' key from Redis.\n") + else: + print(f"\nKey 'client_stats' not found in Redis. Nothing to delete.\n") + return + + # All other delete actions are handled by Thrift for now. client, transport = None, None try: client, transport = get_thrift_client(host, port) if entity == "proxy": + logger.warning("DEPRECATED: Proxy management is now handled by the standalone policy-enforcer.") proxy_url = params.get("proxy_url") server_identity = params.get("server_identity") @@ -391,63 +436,12 @@ def manage_system_callable(**context): print(f"\nFailed to delete proxy '{proxy_url}' for server '{server_identity}' from Redis.\n") else: logger.info("Deleting all proxies from Redis via Thrift service...") - # If server_identity is provided, delete all proxies for that server - # If server_identity is None, delete all proxies for ALL servers result = client.deleteAllProxiesFromRedis(server_identity) if server_identity: print(f"\nSuccessfully deleted all proxies for server '{server_identity}' from Redis. Count: {result}\n") else: print(f"\nSuccessfully deleted all proxies from Redis across ALL servers. Count: {result}\n") - elif entity == "account": - account_id = params.get("account_id") - - if account_id: - logger.info(f"Deleting account '{account_id}' from Redis via Thrift service...") - result = client.deleteAccountFromRedis(account_id) - if result: - print(f"\nSuccessfully deleted account '{account_id}' from Redis.\n") - else: - print(f"\nFailed to delete account '{account_id}' from Redis.\n") - else: - logger.info("Deleting all accounts from Redis via Thrift service...") - # If account_id is provided as prefix, delete all accounts with that prefix - # If account_id is None, delete all accounts - account_prefix = params.get("account_id") - result = client.deleteAllAccountsFromRedis(account_prefix) - if account_prefix: - print(f"\nSuccessfully deleted all accounts with prefix '{account_prefix}' from Redis. Count: {result}\n") - else: - print(f"\nSuccessfully deleted all accounts from Redis. Count: {result}\n") - - elif entity == "accounts_and_proxies": - # Delete accounts - account_prefix = params.get("account_id") # Repurpose account_id param as an optional prefix - logger.info("Deleting accounts from Redis via Thrift service...") - account_result = client.deleteAllAccountsFromRedis(account_prefix) - if account_prefix: - print(f"\nSuccessfully deleted {account_result} account keys with prefix '{account_prefix}' from Redis.\n") - else: - print(f"\nSuccessfully deleted {account_result} account keys from Redis.\n") - - # Delete proxies - server_identity = params.get("server_identity") - logger.info("Deleting proxies from Redis via Thrift service...") - proxy_result = client.deleteAllProxiesFromRedis(server_identity) - if server_identity: - print(f"\nSuccessfully deleted {proxy_result} proxy keys for server '{server_identity}' from Redis.\n") - else: - print(f"\nSuccessfully deleted {proxy_result} proxy keys from Redis across ALL servers.\n") - - elif entity == "client": - logger.info("Deleting all client stats from Redis...") - redis_client = _get_redis_client(params["redis_conn_id"]) - result = redis_client.delete("client_stats") - if result > 0: - print(f"\nSuccessfully deleted 'client_stats' key from Redis.\n") - else: - print(f"\nKey 'client_stats' not found in Redis. Nothing to delete.\n") - except (PBServiceException, PBUserException) as e: logger.error(f"Thrift error performing delete action: {e.message}", exc_info=True) print(f"\nERROR: Thrift service error: {e.message}\n") @@ -460,16 +454,21 @@ def manage_system_callable(**context): if transport and transport.isOpen(): transport.close() logger.info("Thrift connection closed.") - return # End execution for this action + return + # --- Main Action Handler --- client, transport = None, None try: - client, transport = get_thrift_client(host, port) + # Connect to Thrift only if needed + if entity == "proxy": + client, transport = get_thrift_client(host, port) if entity == "client": if action == "list_with_status": _list_client_statuses(params["redis_conn_id"]) + elif entity == "proxy": + logger.warning("DEPRECATED: Proxy management is now handled by the standalone policy-enforcer. These actions are for legacy support.") if action == "list_with_status": _list_proxy_statuses(client, server_identity) elif action == "ban": @@ -483,300 +482,60 @@ def manage_system_callable(**context): client.unbanProxy(proxy_url, server_identity) print(f"Successfully sent request to unban proxy '{proxy_url}'.") elif action == "ban_all": - if server_identity: + if server_identity: logger.info(f"Banning all proxies for server '{server_identity}'...") client.banAllProxies(server_identity) print(f"Successfully sent request to ban all proxies for '{server_identity}'.") - else: - logger.info("No server_identity provided. Banning all proxies for ALL servers...") - all_statuses = client.getProxyStatus(None) - if not all_statuses: - print("\nNo proxy statuses found for any server. Nothing to ban.\n") - return - - all_server_identities = sorted(list(set(s.serverIdentity for s in all_statuses))) - logger.info(f"Found {len(all_server_identities)} server identities: {all_server_identities}") - print(f"Found {len(all_server_identities)} server identities. Sending ban request for each...") - - success_count = 0 - fail_count = 0 - for identity in all_server_identities: - try: - client.banAllProxies(identity) - logger.info(f" - Sent ban_all for '{identity}'.") - success_count += 1 - except Exception as e: - logger.error(f" - Failed to ban all proxies for '{identity}': {e}") - fail_count += 1 - - print(f"\nSuccessfully sent ban_all requests for {success_count} server identities.") - if fail_count > 0: - print(f"Failed to send ban_all requests for {fail_count} server identities. See logs for details.") + else: + raise ValueError("A 'server_identity' is required for 'ban_all' on proxies.") elif action == "unban_all": if server_identity: logger.info(f"Unbanning all proxy statuses for server '{server_identity}'...") client.resetAllProxyStatuses(server_identity) print(f"Successfully sent request to unban all proxy statuses for '{server_identity}'.") else: - logger.info("No server_identity provided. Unbanning all proxies for ALL servers...") - all_statuses = client.getProxyStatus(None) - if not all_statuses: - print("\nNo proxy statuses found for any server. Nothing to unban.\n") - return - - all_server_identities = sorted(list(set(s.serverIdentity for s in all_statuses))) - logger.info(f"Found {len(all_server_identities)} server identities: {all_server_identities}") - print(f"Found {len(all_server_identities)} server identities. Sending unban request for each...") - - success_count = 0 - fail_count = 0 - for identity in all_server_identities: - try: - client.resetAllProxyStatuses(identity) - logger.info(f" - Sent unban_all for '{identity}'.") - success_count += 1 - except Exception as e: - logger.error(f" - Failed to unban all proxies for '{identity}': {e}") - fail_count += 1 - - print(f"\nSuccessfully sent unban_all requests for {success_count} server identities.") - if fail_count > 0: - print(f"Failed to send unban_all requests for {fail_count} server identities. See logs for details.") + raise ValueError("A 'server_identity' is required for 'unban_all' on proxies.") elif entity == "account": if action == "list_with_status": - _list_account_statuses(client, account_id, params["redis_conn_id"]) + _list_account_statuses(pm, account_id) + elif action == "create_profiles": + # This action is handled by a separate PythonOperator + pass elif action == "ban": - if not account_id: raise ValueError("An 'account_id' is required.") - reason = f"Manual ban from Airflow mgmt DAG by {socket.gethostname()}" - logger.info(f"Banning account '{account_id}'...") - client.banAccount(accountId=account_id, reason=reason) - print(f"Successfully sent request to ban account '{account_id}'.") - elif action == "unban": - if not account_id: raise ValueError("An 'account_id' is required.") - reason = f"Manual un-ban from Airflow mgmt DAG by {socket.gethostname()}" - logger.info(f"Unbanning account '{account_id}'...") - - # Fetch status to get current success count before unbanning - statuses = client.getAccountStatus(accountId=account_id, accountPrefix=None) - if not statuses: - raise AirflowException(f"Account '{account_id}' not found.") - current_success_count = statuses[0].successCount or 0 - - client.unbanAccount(accountId=account_id, reason=reason) - print(f"Successfully sent request to unban account '{account_id}'.") - - # Set the success_count_at_activation to baseline the account - redis_client = _get_redis_client(params["redis_conn_id"]) - redis_client.hset(f"account_status:{account_id}", "success_count_at_activation", current_success_count) - logger.info(f"Set 'success_count_at_activation' for '{account_id}' to {current_success_count}.") - elif action == "unban_all": - account_prefix = account_id # Repurpose account_id param as an optional prefix - logger.info(f"Unbanning all account statuses to ACTIVE (prefix: '{account_prefix or 'ALL'}')...") + logger.info(f"Banning profile '{account_id}' in env '{redis_env}'...") + pm.update_profile_state(account_id, "BANNED", f"Manual ban from Airflow mgmt DAG") + print(f"Successfully set state of profile '{account_id}' to BANNED.") + elif action == "unban" or action == "activate": + logger.info(f"Activating profile '{account_id}' in env '{redis_env}'...") + pm.update_profile_state(account_id, "ACTIVE", f"Manual activation from Airflow mgmt DAG") + print(f"Successfully set state of profile '{account_id}' to ACTIVE.") + elif action == "pause": + logger.info(f"Pausing (resting) profile '{account_id}' in env '{redis_env}'...") + pm.update_profile_state(account_id, "RESTING", f"Manual pause from Airflow mgmt DAG") + print(f"Successfully set state of profile '{account_id}' to RESTING.") + elif action == "delete": + logger.info(f"Deleting profile '{account_id}' in env '{redis_env}'...") + pm.delete_profile(account_id) + print(f"Successfully deleted profile '{account_id}'.") + elif action == "delete_all": + logger.warning(f"DESTRUCTIVE: Deleting all profiles with prefix '{account_id}' in env '{redis_env}'...") + profiles = pm.list_profiles() + deleted_count = 0 + for p in profiles: + if not account_id or p['name'].startswith(account_id): + pm.delete_profile(p['name']) + deleted_count += 1 + print(f"Successfully deleted {deleted_count} profile(s).") - all_statuses = client.getAccountStatus(accountId=None, accountPrefix=account_prefix) - if not all_statuses: - print(f"No accounts found with prefix '{account_prefix or 'ALL'}' to unban.") - return - - accounts_to_unban = [s.accountId for s in all_statuses] - account_map = {s.accountId: s for s in all_statuses} - redis_client = _get_redis_client(params["redis_conn_id"]) - - logger.info(f"Found {len(accounts_to_unban)} accounts to unban.") - print(f"Found {len(accounts_to_unban)} accounts. Sending unban request for each...") - - unban_count = 0 - fail_count = 0 - for acc_id in accounts_to_unban: - try: - reason = f"Manual unban_all from Airflow mgmt DAG by {socket.gethostname()}" - client.unbanAccount(accountId=acc_id, reason=reason) - logger.info(f" - Sent unban for '{acc_id}'.") - - # Also set the success_count_at_activation to baseline the account - current_success_count = account_map[acc_id].successCount or 0 - redis_client.hset(f"account_status:{acc_id}", "success_count_at_activation", current_success_count) - logger.info(f" - Set 'success_count_at_activation' for '{acc_id}' to {current_success_count}.") - - unban_count += 1 - except Exception as e: - logger.error(f" - Failed to unban account '{acc_id}': {e}") - fail_count += 1 - - print(f"\nSuccessfully sent unban requests for {unban_count} accounts.") - if fail_count > 0: - print(f"Failed to send unban requests for {fail_count} accounts. See logs for details.") - - # Optionally, list statuses again to confirm - print("\n--- Listing statuses after unban_all ---") - _list_account_statuses(client, account_prefix, params["redis_conn_id"]) - elif entity == "accounts_and_proxies": + logger.warning("DEPRECATED: Combined 'accounts_and_proxies' actions are no longer supported in v2. Please manage accounts and proxies separately.") if action == "list_with_status": - print("\n--- Listing statuses for Proxies, Accounts, and Clients ---") + print("\n--- Listing statuses for Proxies, V2 Profiles, and Clients ---") _list_proxy_statuses(client, server_identity) - _list_account_statuses(client, account_id, params["redis_conn_id"]) + _list_account_statuses(pm, account_id) _list_client_statuses(params["redis_conn_id"]) - return # End execution for list_with_status - - print(f"\n--- Performing action '{action}' on BOTH Proxies and Accounts ---") - - # --- Proxy Action --- - try: - print("\n-- Running Proxy Action --") - if action == "list_with_status": - _list_proxy_statuses(client, server_identity) - elif action == "ban": - if not proxy_url: raise ValueError("A 'proxy_url' is required.") - logger.info(f"Banning proxy '{proxy_url}' for server '{server_identity}'...") - client.banProxy(proxy_url, server_identity) - print(f"Successfully sent request to ban proxy '{proxy_url}'.") - elif action == "unban": - if not proxy_url: raise ValueError("A 'proxy_url' is required.") - logger.info(f"Unbanning proxy '{proxy_url}' for server '{server_identity}'...") - client.unbanProxy(proxy_url, server_identity) - print(f"Successfully sent request to unban proxy '{proxy_url}'.") - elif action == "ban_all": - if server_identity: - logger.info(f"Banning all proxies for server '{server_identity}'...") - client.banAllProxies(server_identity) - print(f"Successfully sent request to ban all proxies for '{server_identity}'.") - else: - logger.info("No server_identity provided. Banning all proxies for ALL servers...") - all_statuses = client.getProxyStatus(None) - if not all_statuses: - print("\nNo proxy statuses found for any server. Nothing to ban.\n") - else: - all_server_identities = sorted(list(set(s.serverIdentity for s in all_statuses))) - logger.info(f"Found {len(all_server_identities)} server identities: {all_server_identities}") - print(f"Found {len(all_server_identities)} server identities. Sending ban request for each...") - - success_count = 0 - fail_count = 0 - for identity in all_server_identities: - try: - client.banAllProxies(identity) - logger.info(f" - Sent ban_all for '{identity}'.") - success_count += 1 - except Exception as e: - logger.error(f" - Failed to ban all proxies for '{identity}': {e}") - fail_count += 1 - - print(f"\nSuccessfully sent ban_all requests for {success_count} server identities.") - if fail_count > 0: - print(f"Failed to send ban_all requests for {fail_count} server identities. See logs for details.") - elif action == "unban_all": - if server_identity: - logger.info(f"Unbanning all proxy statuses for server '{server_identity}'...") - client.resetAllProxyStatuses(server_identity) - print(f"Successfully sent request to unban all proxy statuses for '{server_identity}'.") - else: - logger.info("No server_identity provided. Unbanning all proxies for ALL servers...") - all_statuses = client.getProxyStatus(None) - if not all_statuses: - print("\nNo proxy statuses found for any server. Nothing to unban.\n") - else: - all_server_identities = sorted(list(set(s.serverIdentity for s in all_statuses))) - logger.info(f"Found {len(all_server_identities)} server identities: {all_server_identities}") - print(f"Found {len(all_server_identities)} server identities. Sending unban request for each...") - - success_count = 0 - fail_count = 0 - for identity in all_server_identities: - try: - client.resetAllProxyStatuses(identity) - logger.info(f" - Sent unban_all for '{identity}'.") - success_count += 1 - except Exception as e: - logger.error(f" - Failed to unban all proxies for '{identity}': {e}") - fail_count += 1 - - print(f"\nSuccessfully sent unban_all requests for {success_count} server identities.") - if fail_count > 0: - print(f"Failed to send unban_all requests for {fail_count} server identities. See logs for details.") - except Exception as proxy_e: - logger.error(f"Error during proxy action '{action}': {proxy_e}", exc_info=True) - print(f"\nERROR during proxy action: {proxy_e}") - - # --- Account Action --- - try: - print("\n-- Running Account Action --") - if action == "list_with_status": - _list_account_statuses(client, account_id, params["redis_conn_id"]) - elif action == "ban": - if not account_id: raise ValueError("An 'account_id' is required.") - reason = f"Manual ban from Airflow mgmt DAG by {socket.gethostname()}" - logger.info(f"Banning account '{account_id}'...") - client.banAccount(accountId=account_id, reason=reason) - print(f"Successfully sent request to ban account '{account_id}'.") - elif action == "unban": - if not account_id: raise ValueError("An 'account_id' is required.") - reason = f"Manual un-ban from Airflow mgmt DAG by {socket.gethostname()}" - logger.info(f"Unbanning account '{account_id}'...") - - # Fetch status to get current success count before unbanning - statuses = client.getAccountStatus(accountId=account_id, accountPrefix=None) - if not statuses: - logger.warning(f"Account '{account_id}' not found. Skipping account unban.") - else: - current_success_count = statuses[0].successCount or 0 - client.unbanAccount(accountId=account_id, reason=reason) - print(f"Successfully sent request to unban account '{account_id}'.") - - # Set the success_count_at_activation to baseline the account - redis_client = _get_redis_client(params["redis_conn_id"]) - redis_client.hset(f"account_status:{account_id}", "success_count_at_activation", current_success_count) - logger.info(f"Set 'success_count_at_activation' for '{account_id}' to {current_success_count}.") - elif action == "unban_all": - account_prefix = account_id # Repurpose account_id param as an optional prefix - logger.info(f"Unbanning all account statuses to ACTIVE (prefix: '{account_prefix or 'ALL'}')...") - - all_statuses = client.getAccountStatus(accountId=None, accountPrefix=account_prefix) - if not all_statuses: - print(f"No accounts found with prefix '{account_prefix or 'ALL'}' to unban.") - else: - accounts_to_unban = [s.accountId for s in all_statuses] - account_map = {s.accountId: s for s in all_statuses} - redis_client = _get_redis_client(params["redis_conn_id"]) - - logger.info(f"Found {len(accounts_to_unban)} accounts to unban.") - print(f"Found {len(accounts_to_unban)} accounts. Sending unban request for each...") - - unban_count = 0 - fail_count = 0 - for acc_id in accounts_to_unban: - try: - reason = f"Manual unban_all from Airflow mgmt DAG by {socket.gethostname()}" - client.unbanAccount(accountId=acc_id, reason=reason) - logger.info(f" - Sent unban for '{acc_id}'.") - - # Also set the success_count_at_activation to baseline the account - current_success_count = account_map[acc_id].successCount or 0 - redis_client.hset(f"account_status:{acc_id}", "success_count_at_activation", current_success_count) - logger.info(f" - Set 'success_count_at_activation' for '{acc_id}' to {current_success_count}.") - - unban_count += 1 - except Exception as e: - logger.error(f" - Failed to unban account '{acc_id}': {e}") - fail_count += 1 - - print(f"\nSuccessfully sent unban requests for {unban_count} accounts.") - if fail_count > 0: - print(f"Failed to send unban requests for {fail_count} accounts. See logs for details.") - - # Optionally, list statuses again to confirm - print("\n--- Listing statuses after unban_all ---") - _list_account_statuses(client, account_prefix, params["redis_conn_id"]) - except Exception as account_e: - logger.error(f"Error during account action '{action}': {account_e}", exc_info=True) - print(f"\nERROR during account action: {account_e}") - - elif entity == "all": - if action == "list_with_status": - print("\nListing all entities...") - _list_proxy_statuses(client, server_identity) - _list_account_statuses(client, account_id, params["redis_conn_id"]) + return except (PBServiceException, PBUserException) as e: logger.error(f"Thrift error performing action '{action}': {e.message}", exc_info=True) @@ -800,91 +559,120 @@ with DAG( catchup=False, tags=["ytdlp", "mgmt", "master"], doc_md=""" - ### YT-DLP Proxy and Account Manager DAG - This DAG provides tools to manage the state of proxies and accounts used by the `ytdlp-ops-server`. + ### YT-DLP v2 Profile and System Manager + + This DAG provides tools to manage the state of **v2 profiles** (formerly accounts) and other system components. Select an `entity` and an `action` to perform. - - **IMPORTANT NOTE ABOUT DATA SOURCES:** - - **Proxy Statuses**: Read from the server's internal state via Thrift service calls. - - **Account Statuses**: Read from the Thrift service, and then enriched with live cooldown data directly from Redis. - - **IMPORTANT NOTE ABOUT PROXY MANAGEMENT:** - - Proxies are managed by the server's internal state through Thrift methods - - There is NO direct Redis manipulation for proxies - they are managed entirely by the server - - To properly manage proxies, use the Thrift service methods (ban, unban, etc.) + + **V2 Profile Management (`entity: account`):** + - All account/profile actions are now performed directly on Redis using the `ProfileManager`. + - A `redis_env` (e.g., `sim_auth` or `sim_download`) is **required** to target the correct set of profiles. + - Actions include `list`, `create`, `ban`, `activate`, `pause`, and `delete`. + + **Legacy Proxy Management (`entity: proxy`):** + - **DEPRECATED**: Proxy state is now managed automatically by the standalone `policy-enforcer` service. + - These actions are provided for legacy support and interact with the old Thrift service. They may be removed in the future. """, params={ - "management_host": Param(DEFAULT_MANAGEMENT_SERVICE_IP, type="string", title="Management Service Host", description="The hostname or IP of the management service. Can be a Docker container name (e.g., 'envoy-thrift-lb') if on the same network."), - "management_port": Param(DEFAULT_MANAGEMENT_SERVICE_PORT, type="integer", title="Management Service Port", description="The port of the dedicated management service."), + "management_host": Param(DEFAULT_MANAGEMENT_SERVICE_IP, type="string", title="Management Service Host (DEPRECATED)", description="The hostname or IP of the management service. Used only for legacy proxy actions."), + "management_port": Param(DEFAULT_MANAGEMENT_SERVICE_PORT, type="integer", title="Management Service Port (DEPRECATED)", description="The port of the dedicated management service."), "entity": Param( - "accounts_and_proxies", + "account", type="string", - enum=["account", "proxy", "client", "accounts_and_proxies", "activity_counters"], + enum=["account", "proxy", "client", "activity_counters", "accounts_and_proxies"], description="The type of entity to manage.", ), "action": Param( "list_with_status", type="string", - enum=["list_with_status", "ban", "unban", "ban_all", "unban_all", "delete_from_redis"], + enum=["list_with_status", "create_profiles", "ban", "unban", "activate", "pause", "delete", "delete_all", "ban_all", "unban_all", "delete_from_redis"], description="""The management action to perform. --- - #### Actions for `entity: proxy` - - `list_with_status`: View status of all proxies, optionally filtered by `server_identity`. - - `ban`: Ban a specific proxy for a given `server_identity`. Requires `proxy_url`. - - `unban`: Un-ban a specific proxy. Requires `proxy_url`. - - `ban_all`: Sets the status of all proxies for a given `server_identity` (or all servers) to `BANNED`. - - `unban_all`: Resets the status of all proxies for a given `server_identity` (or all servers) to `ACTIVE`. - - `delete_from_redis`: **(Destructive)** Deletes proxy status from Redis via Thrift service. This permanently removes the proxy from being tracked by the system. If `proxy_url` and `server_identity` are provided, it deletes a single proxy. If only `server_identity` is provided, it deletes all proxies for that server. If neither is provided, it deletes ALL proxies across all servers. + #### Actions for `entity: account` (V2 Profiles) + - `list_with_status`: View status of all profiles, optionally filtered by `account_id` as a prefix. + - `create_profiles`: Creates new profiles from a JSON payload. See `create_profiles_json` param. + - `ban`: Sets a profile's state to BANNED. Requires `account_id`. + - `unban`/`activate`: Sets a profile's state to ACTIVE. Requires `account_id`. + - `pause`: Sets a profile's state to RESTING. Requires `account_id`. + - `delete`: Deletes a single profile. Requires `account_id`. + - `delete_all`: **(Destructive)** Deletes all profiles, or those matching the `account_id` as a prefix. - #### Actions for `entity: account` - - `list_with_status`: View status of all accounts, optionally filtered by `account_id` (as a prefix). - - `ban`: Ban a specific account. Requires `account_id`. - - `unban`: Un-ban a specific account. Requires `account_id`. - - `unban_all`: Sets the status of all accounts (or those matching a prefix in `account_id`) to `ACTIVE`. - - `delete_from_redis`: **(Destructive)** Deletes account status from Redis via Thrift service. This permanently removes the account from being tracked by the system. If `account_id` is provided, it deletes that specific account. If `account_id` is provided as a prefix, it deletes all accounts matching that prefix. If `account_id` is empty, it deletes ALL accounts. + #### Actions for `entity: proxy` (DEPRECATED) + - `list_with_status`, `ban`, `unban`, `ban_all`, `unban_all`, `delete_from_redis`. #### Actions for `entity: client` - `list_with_status`: View success/failure statistics for each client type. - `delete_from_redis`: **(Destructive)** Deletes all client stats from Redis. #### Actions for `entity: activity_counters` - - `list_with_status`: View current activity rates (ops/min, ops/hr) for proxies and accounts. - - #### Actions for `entity: accounts_and_proxies` - - This entity performs the selected action on **both** proxies and accounts where applicable. - - `list_with_status`: View statuses for both proxies and accounts. - - `ban`: Ban a specific proxy AND a specific account. Requires `proxy_url`, `server_identity`, and `account_id`. - - `unban`: Un-ban a specific proxy AND a specific account. Requires `proxy_url`, `server_identity`, and `account_id`. - - `ban_all`: Ban all proxies for a `server_identity` (or all servers). Does not affect accounts. - - `unban_all`: Un-ban all proxies for a `server_identity` (or all servers) AND all accounts (optionally filtered by `account_id` as a prefix). - - `delete_from_redis`: Deletes both account and proxy status from Redis via Thrift service. For accounts, if `account_id` is provided as a prefix, it deletes all accounts matching that prefix. If `account_id` is empty, it deletes ALL accounts. For proxies, if `server_identity` is provided, it deletes all proxies for that server. If `server_identity` is empty, it deletes ALL proxies across all servers. - + - `list_with_status`: View current activity rates for proxies and accounts. """, ), - "server_identity": Param( - None, - type=["null", "string"], - description="The identity of the server instance (for proxy management). Leave blank to list all or delete all proxies.", - ), - "proxy_url": Param( - None, - type=["null", "string"], - description="The proxy URL to act upon (e.g., 'socks5://host:port').", + "redis_env": Param( + "sim_auth", + type="string", + enum=["sim_auth", "sim_download"], + title="[V2 Profiles] Redis Environment", + description="The environment for v2 profile management (e.g., 'sim_auth'). Determines the Redis key prefix.", ), "account_id": Param( None, type=["null", "string"], - description="The account ID to act upon. For `unban_all` or `delete_from_redis` on accounts, this can be an optional prefix. Leave blank to delete all accounts.", + description="For v2 profiles: The profile name (e.g., 'auth_user_0') or a prefix for `list` and `delete_all`.", + ), + "create_profiles_json": Param( + """{ + "auth_profile_setup": { + "env": "sim_auth", + "cleanup_before_run": false, + "pools": [ + { + "prefix": "auth_user", + "proxy": "sslocal-rust-1090:1090", + "count": 2 + } + ] + } +}""", + type="string", + title="[V2 Profiles] Create Profiles JSON", + description="For action `create_profiles`. A JSON payload defining the profiles to create. This is passed to `yt-ops-client setup-profiles`.", + **{'ui_widget': 'json', 'multi_line': True} + ), + "server_identity": Param( + None, + type=["null", "string"], + description="[DEPRECATED] The server identity for proxy management.", + ), + "proxy_url": Param( + None, + type=["null", "string"], + description="[DEPRECATED] The proxy URL to act upon.", ), "redis_conn_id": Param( DEFAULT_REDIS_CONN_ID, type="string", title="Redis Connection ID", - description="The Airflow connection ID for the Redis server (used for 'delete_from_redis' and for fetching detailed account status).", + description="The Airflow connection ID for the Redis server.", ), }, ) as dag: + + @task.branch(task_id="branch_on_action") + def branch_on_action(**context): + action = context["params"]["action"] + if action == "create_profiles": + return "create_profiles_task" + return "system_management_task" + + create_profiles_task = PythonOperator( + task_id="create_profiles_task", + python_callable=_create_profiles_from_json, + ) + system_management_task = PythonOperator( task_id="system_management_task", python_callable=manage_system_callable, ) + + branch_on_action() >> [create_profiles_task, system_management_task] diff --git a/airflow/dags/ytdlp_mgmt_queues.py b/airflow/dags/ytdlp_mgmt_queues.py index eaedfdb..5c979c5 100644 --- a/airflow/dags/ytdlp_mgmt_queues.py +++ b/airflow/dags/ytdlp_mgmt_queues.py @@ -322,7 +322,14 @@ def clear_queue_callable(**context): dump_redis_data_to_csv(redis_client, dump_dir, dump_patterns) all_suffixes = ['_inbox', '_fail', '_result', '_progress', '_skipped'] + special_queues = ['queue_dl_format_tasks'] keys_to_delete = set() + + # Handle special queues first + for q in special_queues: + if q in queues_to_clear_options: + keys_to_delete.add(q) + for queue_base_name in queue_base_names_to_clear: if '_all' in queues_to_clear_options: logger.info(f"'_all' option selected. Clearing all standard queues for base '{queue_base_name}'.") @@ -446,6 +453,7 @@ def check_status_callable(**context): raise ValueError(f"Invalid queue_system: {queue_system}") queue_suffixes = ['_inbox', '_progress', '_result', '_fail', '_skipped'] + special_queues = ['queue_dl_format_tasks'] logger.info(f"--- Checking Status for Queue System: '{queue_system}' ---") @@ -468,6 +476,18 @@ def check_status_callable(**context): else: logger.info(f" - Queue '{queue_to_check}': Does not exist.") + logger.info(f"--- Special Queues ---") + for queue_name in special_queues: + key_type = redis_client.type(queue_name).decode('utf-8') + size = 0 + if key_type == 'list': + size = redis_client.llen(queue_name) + + if key_type != 'none': + logger.info(f" - Queue '{queue_name}': Type='{key_type.upper()}', Size={size}") + else: + logger.info(f" - Queue '{queue_name}': Does not exist.") + logger.info(f"--- End of Status Check ---") except Exception as e: @@ -794,10 +814,10 @@ with DAG( None, type=["null", "array"], title="[clear_queue] Queues to Clear", - description="Select which standard queues to clear. '_all' clears all four. If left empty, it defaults to '_all'.", + description="Select which standard queues to clear. '_all' clears all standard queues. 'queue_dl_format_tasks' is the new granular download task queue.", items={ "type": "string", - "enum": ["_inbox", "_fail", "_result", "_progress", "_skipped", "_all"], + "enum": ["_inbox", "_fail", "_result", "_progress", "_skipped", "_all", "queue_dl_format_tasks"], } ), "confirm_clear": Param( @@ -826,7 +846,7 @@ with DAG( ), # --- Params for 'list_contents' --- "queue_to_list": Param( - 'video_queue_inbox,queue2_auth_inbox,queue2_dl_inbox,queue2_dl_result', + 'queue2_auth_inbox,queue_dl_format_tasks,queue2_dl_inbox', type="string", title="[list_contents] Queues to List", description="Comma-separated list of exact Redis key names to list.", diff --git a/airflow/dags/ytdlp_ops_account_maintenance.py b/airflow/dags/ytdlp_ops_account_maintenance.py index 0ae7b52..bf3054b 100644 --- a/airflow/dags/ytdlp_ops_account_maintenance.py +++ b/airflow/dags/ytdlp_ops_account_maintenance.py @@ -4,255 +4,44 @@ # # Distributed under terms of the MIT license. +# -*- coding: utf-8 -*- +# +# Copyright © 2024 rl +# +# Distributed under terms of the MIT license. + """ -Maintenance DAG for managing the lifecycle of ytdlp-ops accounts. -This DAG is responsible for: -- Un-banning accounts whose ban duration has expired. -- Transitioning accounts from RESTING to ACTIVE after their cooldown period. -- Transitioning accounts from ACTIVE to RESTING after their active duration. -This logic was previously handled inside the ytdlp-ops-server and has been -moved here to give the orchestrator full control over account state. +DEPRECATED: Maintenance DAG for managing the lifecycle of ytdlp-ops accounts. """ from __future__ import annotations -import logging -import time -from datetime import datetime, timedelta - -from airflow.decorators import task -from airflow.models import Variable from airflow.models.dag import DAG -from airflow.models.param import Param from airflow.utils.dates import days_ago -# Import utility functions and Thrift modules -from utils.redis_utils import _get_redis_client -from pangramia.yt.management import YTManagementService -from thrift.protocol import TBinaryProtocol -from thrift.transport import TSocket, TTransport - -# Configure logging -logger = logging.getLogger(__name__) - -# Default settings from Airflow Variables or hardcoded fallbacks -DEFAULT_REDIS_CONN_ID = 'redis_default' -DEFAULT_MANAGEMENT_SERVICE_IP = Variable.get("MANAGEMENT_SERVICE_HOST", default_var="172.17.0.1") -DEFAULT_MANAGEMENT_SERVICE_PORT = Variable.get("MANAGEMENT_SERVICE_PORT", default_var=9080) - DEFAULT_ARGS = { 'owner': 'airflow', - 'retries': 1, - 'retry_delay': 30, + 'retries': 0, 'queue': 'queue-mgmt', } - -# --- Helper Functions --- - -def _get_thrift_client(host, port, timeout=60): - """Helper to create and connect a Thrift client.""" - transport = TSocket.TSocket(host, port) - transport.setTimeout(timeout * 1000) - transport = TTransport.TFramedTransport(transport) - protocol = TBinaryProtocol.TBinaryProtocol(transport) - client = YTManagementService.Client(protocol) - transport.open() - logger.info(f"Connected to Thrift server at {host}:{port}") - return client, transport - - -@task -def manage_account_states(**context): - """ - Fetches all account statuses and performs necessary state transitions - based on time durations configured in the DAG parameters. - """ - params = context['params'] - requests_limit = params['account_requests_limit'] - cooldown_duration_s = params['account_cooldown_duration_min'] * 60 - ban_duration_s = params['account_ban_duration_hours'] * 3600 - - host = DEFAULT_MANAGEMENT_SERVICE_IP - port = int(DEFAULT_MANAGEMENT_SERVICE_PORT) - redis_conn_id = DEFAULT_REDIS_CONN_ID - logger.info(f"Starting account maintenance. Service: {host}:{port}, Redis: {redis_conn_id}") - logger.info(f"Using limits: Requests={requests_limit}, Cooldown={params['account_cooldown_duration_min']}m, Ban={params['account_ban_duration_hours']}h") - - client, transport = None, None - try: - client, transport = _get_thrift_client(host, port) - redis_client = _get_redis_client(redis_conn_id) - - logger.info(f"--- Step 1: Fetching all account statuses from the ytdlp-ops-server at {host}:{port}... ---") - all_accounts = client.getAccountStatus(accountId=None, accountPrefix=None) - logger.info(f"Found {len(all_accounts)} total accounts to process.") - - accounts_to_unban = [] - accounts_to_activate = [] - accounts_to_rest = [] - - now_ts = int(time.time()) - - for acc in all_accounts: - # Thrift can return 0 for unset integer fields. - # The AccountStatus thrift object is missing status_changed_timestamp and active_since_timestamp. - # We use available timestamps as proxies. - last_failure_ts = int(acc.lastFailureTimestamp or 0) - last_success_ts = int(acc.lastSuccessTimestamp or 0) - last_usage_ts = max(last_failure_ts, last_success_ts) - - if acc.status == "BANNED" and last_failure_ts > 0: - time_since_ban = now_ts - last_failure_ts - if time_since_ban >= ban_duration_s: - accounts_to_unban.append(acc.accountId) - else: - remaining_s = ban_duration_s - time_since_ban - logger.info(f"Account {acc.accountId} is BANNED. Time until unban: {timedelta(seconds=remaining_s)}") - elif acc.status == "RESTING" and last_usage_ts > 0: - time_since_rest = now_ts - last_usage_ts - if time_since_rest >= cooldown_duration_s: - accounts_to_activate.append(acc.accountId) - else: - remaining_s = cooldown_duration_s - time_since_rest - logger.info(f"Account {acc.accountId} is RESTING. Time until active: {timedelta(seconds=remaining_s)}") - elif acc.status == "ACTIVE": - # For ACTIVE -> RESTING, check how many requests have been made since activation. - count_at_activation_raw = redis_client.hget(f"account_status:{acc.accountId}", "success_count_at_activation") - - if count_at_activation_raw is not None: - count_at_activation = int(count_at_activation_raw) - current_success_count = acc.successCount or 0 - requests_made = current_success_count - count_at_activation - - if requests_made >= requests_limit: - logger.info(f"Account {acc.accountId} reached request limit ({requests_made}/{requests_limit}). Moving to RESTING.") - accounts_to_rest.append(acc.accountId) - else: - requests_remaining = requests_limit - requests_made - logger.info(f"Account {acc.accountId} is ACTIVE. Requests until rest: {requests_remaining}/{requests_limit}") - else: - # This is a fallback for accounts that were activated before this logic was deployed. - # We can activate them "fresh" by setting their baseline count now. - logger.info(f"Account {acc.accountId} is ACTIVE but has no 'success_count_at_activation'. Setting it now.") - redis_client.hset(f"account_status:{acc.accountId}", "success_count_at_activation", acc.successCount or 0) - - logger.info("--- Step 2: Analyzing accounts for state transitions ---") - logger.info(f"Found {len(accounts_to_unban)} accounts with expired bans to un-ban.") - logger.info(f"Found {len(accounts_to_activate)} accounts with expired rest periods to activate.") - logger.info(f"Found {len(accounts_to_rest)} accounts with expired active periods to put to rest.") - - # --- Perform State Transitions --- - - # 1. Un-ban accounts via Thrift call - logger.info("--- Step 3: Processing un-bans ---") - if accounts_to_unban: - logger.info(f"Un-banning {len(accounts_to_unban)} accounts: {accounts_to_unban}") - account_map = {acc.accountId: acc for acc in all_accounts} - for acc_id in accounts_to_unban: - try: - client.unbanAccount(acc_id, "Automatic un-ban by Airflow maintenance DAG.") - logger.info(f"Successfully un-banned account '{acc_id}'.") - - # Set the activation count to baseline the account immediately after un-banning. - key = f"account_status:{acc_id}" - current_success_count = account_map[acc_id].successCount or 0 - redis_client.hset(key, "success_count_at_activation", current_success_count) - logger.info(f"Set 'success_count_at_activation' for un-banned account '{acc_id}' to {current_success_count}.") - except Exception as e: - logger.error(f"Failed to un-ban account '{acc_id}': {e}") - else: - logger.info("No accounts to un-ban.") - - # 2. Activate resting accounts via direct Redis write - logger.info("--- Step 4: Processing activations ---") - if accounts_to_activate: - logger.info(f"Activating {len(accounts_to_activate)} accounts: {accounts_to_activate}") - now_ts = int(time.time()) - account_map = {acc.accountId: acc for acc in all_accounts} - with redis_client.pipeline() as pipe: - for acc_id in accounts_to_activate: - key = f"account_status:{acc_id}" - current_success_count = account_map[acc_id].successCount or 0 - pipe.hset(key, "status", "ACTIVE") - pipe.hset(key, "active_since_timestamp", now_ts) - pipe.hset(key, "status_changed_timestamp", now_ts) - pipe.hset(key, "success_count_at_activation", current_success_count) - pipe.execute() - logger.info("Finished activating accounts.") - else: - logger.info("No accounts to activate.") - - # 3. Rest active accounts via direct Redis write - logger.info("--- Step 5: Processing rests ---") - if accounts_to_rest: - logger.info(f"Putting {len(accounts_to_rest)} accounts to rest: {accounts_to_rest}") - now_ts = int(time.time()) - with redis_client.pipeline() as pipe: - for acc_id in accounts_to_rest: - key = f"account_status:{acc_id}" - pipe.hset(key, "status", "RESTING") - pipe.hset(key, "status_changed_timestamp", now_ts) - pipe.hdel(key, "success_count_at_activation") - pipe.execute() - logger.info("Finished putting accounts to rest.") - else: - logger.info("No accounts to put to rest.") - - logger.info("--- Account maintenance run complete. ---") - - finally: - if transport and transport.isOpen(): - transport.close() - - with DAG( dag_id='ytdlp_ops_account_maintenance', default_args=DEFAULT_ARGS, - schedule='*/5 * * * *', # Run every 5 minutes + schedule=None, # Disabled start_date=days_ago(1), catchup=False, - tags=['ytdlp', 'maintenance'], + is_paused_upon_creation=True, + tags=['ytdlp', 'maintenance', 'deprecated'], doc_md=""" - ### YT-DLP Account Maintenance: Time-Based State Transitions + ### DEPRECATED: YT-DLP Account Maintenance - This DAG is the central authority for automated, **time-based** state management for ytdlp-ops accounts. - It runs periodically to fetch the status of all accounts and applies its own logic to determine if an account's state should change based on configurable time durations. + This DAG is **DEPRECATED** and should not be used. Its functionality has been replaced + by a standalone, continuously running `policy-enforcer` service. - The thresholds are defined as DAG parameters and can be configured via the Airflow UI: - - **Requests Limit**: How many successful requests an account can perform before it needs to rest. - - **Cooldown Duration**: How long an account must rest before it can be used again. - - **Ban Duration**: How long a ban lasts before the account is automatically un-banned. + To run the new enforcer, use the following command on a management node: + `bin/ytops-client policy-enforcer --policy policies/8_unified_simulation_enforcer.yaml --live` - --- - - #### Separation of Concerns: Time vs. Errors - - It is critical to understand that this DAG primarily handles time-based state changes. Error-based banning may be handled by worker DAGs during URL processing. This separation ensures that maintenance is predictable and based on timers, while acute, error-driven actions are handled immediately by the workers that encounter them. - - --- - - #### State Transitions Performed by This DAG: - - On each run, this DAG fetches the raw status and timestamps for all accounts and performs the following checks: - - 1. **Un-banning (`BANNED` -> `ACTIVE`)**: - - **Condition**: An account has been in the `BANNED` state for longer than the configured `account_ban_duration_hours`. - - **Action**: The DAG calls the `unbanAccount` service endpoint to lift the ban. - - 2. **Activation (`RESTING` -> `ACTIVE`)**: - - **Condition**: An account has been in the `RESTING` state for longer than the configured `account_cooldown_duration_min`. - - **Action**: The DAG updates the account's status to `ACTIVE` directly in Redis. - - 3. **Resting (`ACTIVE` -> `RESTING`)**: - - **Condition**: An account has performed more successful requests than the configured `account_requests_limit` since it was last activated. - - **Action**: The DAG updates the account's status to `RESTING` directly in Redis. - - This process gives full control over time-based account lifecycle management to the Airflow orchestrator. + This DAG is paused by default and will be removed in a future version. """, - params={ - 'account_requests_limit': Param(250, type="integer", description="Number of successful requests an account can make before it is rested. Default is 250."), - 'account_cooldown_duration_min': Param(60, type="integer", description="Duration in minutes an account must rest ('pause') before being activated again. Default is 60 minutes (1 hour)."), - 'account_ban_duration_hours': Param(24, type="integer", description="Duration in hours an account stays banned before it can be un-banned."), - } ) as dag: - manage_account_states() + pass diff --git a/airflow/dags/ytdlp_ops_v01_orchestrator.py b/airflow/dags/ytdlp_ops_v01_orchestrator.py index 6d0dc64..9602d85 100644 --- a/airflow/dags/ytdlp_ops_v01_orchestrator.py +++ b/airflow/dags/ytdlp_ops_v01_orchestrator.py @@ -48,6 +48,65 @@ DEFAULT_BUNCH_DELAY_S = 1 DEFAULT_YT_AUTH_SERVICE_IP = Variable.get("YT_AUTH_SERVICE_IP", default_var="172.17.0.1") DEFAULT_YT_AUTH_SERVICE_PORT = Variable.get("YT_AUTH_SERVICE_PORT", default_var=9080) +# Default ytdlp.json content for the unified config parameter +DEFAULT_YTDLP_CONFIG = { + "ytops": { + "force_renew": [], + "session_params": { + # "visitor_rotation_threshold": 250 + } + }, + "ytdlp_params": { + "debug_printtraffic": True, + "write_pages": True, + "verbose": True, + "no_color": True, + "ignoreerrors": True, + "noresizebuffer": True, + "buffersize": "4M", + "concurrent_fragments": 8, + "socket_timeout": 60, + "outtmpl": { + "default": "%(id)s.f%(format_id)s.%(ext)s" + }, + "restrictfilenames": True, + "updatetime": False, + "noplaylist": True, + "match_filter": "!is_live", + "writeinfojson": True, + "skip_download": True, + "allow_playlist_files": False, + "clean_infojson": True, + "getcomments": False, + "writesubtitles": False, + "writethumbnail": False, + "sleep_interval_requests": 0.75, + "parse_metadata": [ + ":(?P)" + ], + "extractor_args": { + "youtube": { + "player_client": ["tv_simply"], + "formats": ["duplicate"], + "jsc_trace": ["true"], + "pot_trace": ["true"], + "skip": ["translated_subs", "hls"] + }, + "youtubepot-bgutilhttp": { + "base_url": ["http://172.17.0.1:4416"] + } + }, + "noprogress": True, + "format_sort": [ + "res", + "ext:mp4:m4a" + ], + "remuxvideo": "mp4", + "nooverwrites": True, + "continuedl": True + } +} + # --- Helper Functions --- def _check_application_queue(redis_client, queue_base_name: str) -> int: @@ -159,26 +218,43 @@ def orchestrate_workers_ignition_callable(**context): # --- Generate a consistent timestamped prefix for this orchestrator run --- # This ensures all workers spawned from this run use the same set of accounts. final_account_pool_prefix = params['account_pool'] + + # --- Unified JSON Config Handling --- + # Start with the JSON config from params, then merge legacy params into it. + try: + ytdlp_config = json.loads(params.get('ytdlp_config_json', '{}')) + except json.JSONDecodeError as e: + logger.error(f"Invalid ytdlp_config_json parameter. Must be valid JSON. Error: {e}") + raise AirflowException("Invalid ytdlp_config_json parameter.") + if params.get('prepend_client_to_account') and params.get('account_pool_size') is not None: - clients_str = params.get('clients', '') + try: + clients_str = ','.join(ytdlp_config['ytdlp_params']['extractor_args']['youtube']['player_client']) + except KeyError: + clients_str = '' + primary_client = clients_str.split(',')[0].strip() if clients_str else 'unknown' - # Use a timestamp from the orchestrator's run for consistency timestamp = datetime.now().strftime('%Y%m%d%H%M%S') final_account_pool_prefix = f"{params['account_pool']}_{timestamp}_{primary_client}" logger.info(f"Generated consistent account prefix for this run: '{final_account_pool_prefix}'") + final_ytdlp_config_str = json.dumps(ytdlp_config) + # --- End of JSON Config Handling --- + for i, bunch in enumerate(bunches): logger.info(f"--- Triggering Bunch {i+1}/{len(bunches)} (contains {len(bunch)} dispatcher(s)) ---") - for j, _ in enumerate(bunch): + for j, worker_index in enumerate(bunch): # Create a unique run_id for each dispatcher run run_id = f"dispatched_{dag_run_id}_{total_triggered}" # Pass all orchestrator params to the dispatcher, which will then pass them to the worker. conf_to_pass = {p: params[p] for p in params} - # Override account_pool with the generated prefix + # Override account_pool with the generated prefix and set the unified JSON config conf_to_pass['account_pool'] = final_account_pool_prefix + conf_to_pass['worker_index'] = worker_index + conf_to_pass['ytdlp_config_json'] = final_ytdlp_config_str - logger.info(f"Triggering dispatcher {j+1}/{len(bunch)} in bunch {i+1} (run {total_triggered + 1}/{total_workers}) (Run ID: {run_id})") + logger.info(f"Triggering dispatcher {j+1}/{len(bunch)} in bunch {i+1} (run {total_triggered + 1}/{total_workers}, worker_index: {worker_index}) (Run ID: {run_id})") logger.debug(f"Full conf for dispatcher run {run_id}: {conf_to_pass}") trigger_dag( @@ -299,73 +375,22 @@ with DAG( 'delay_between_bunches_s': Param(DEFAULT_BUNCH_DELAY_S, type="integer", description="Delay in seconds between starting each bunch."), 'skip_if_queue_empty': Param(False, type="boolean", title="[Ignition Control] Skip if Queue Empty", description="If True, the orchestrator will not start any dispatchers if the application's work queue is empty."), + # --- Unified Worker Configuration --- + 'ytdlp_config_json': Param( + json.dumps(DEFAULT_YTDLP_CONFIG, indent=2), + type="string", + title="[Worker Param] Unified yt-dlp JSON Config", + description="A JSON string containing all parameters for both yt-ops-server and the yt-dlp downloaders. This is the primary way to configure workers.", + **{'ui_widget': 'json', 'multi_line': True} + ), + # --- Worker Passthrough Parameters --- - 'on_auth_failure': Param( - 'proceed_loop_under_manual_inspection', - type="string", - enum=['stop_loop', 'retry_with_new_account', 'retry_without_ban', 'proceed_loop_under_manual_inspection'], - title="[Worker Param] On Authentication Failure Policy", - description="Policy for a worker when a bannable authentication error occurs. " - "'stop_loop': Ban the account, mark URL as failed, and stop the worker's loop. " - "'retry_with_new_account': (Default) Ban the failed account, retry ONCE with a new account. If retry fails, ban the second account and stop." - "'retry_without_ban': If a connection error (e.g. SOCKS timeout) occurs, retry with a new account but do NOT ban the first account/proxy. If retry fails, stop the loop without banning." - "'proceed_loop_under_manual_inspection': **BEWARE: MANUAL SUPERVISION REQUIRED.** Marks the URL as failed but continues the processing loop. Use this only when you can manually intervene." - ), - 'on_download_failure': Param( - 'proceed_loop', - type="string", - enum=['stop_loop', 'proceed_loop', 'retry_with_new_token'], - title="[Worker Param] On Download Failure Policy", - description="Policy for a worker when a download or probe error occurs. " - "'stop_loop': Mark URL as failed and stop the worker's loop. " - "'proceed_loop': (Default) Mark URL as failed but continue the processing loop with a new URL. " - "'retry_with_new_token': Attempt to get a new token with a new account and retry the download once. If it fails again, proceed loop." - ), - 'request_params_json': Param('{}', type="string", title="[Worker Param] Request Params JSON", description="JSON string with per-request parameters to override server defaults. Can be a full JSON object or comma-separated key=value pairs (e.g., 'session_params.location=DE,ytdlp_params.skip_cache=true')."), - 'language_code': Param('en-US', type="string", title="[Worker Param] Language Code", description="The language code (e.g., 'en-US', 'de-DE') to use for the YouTube request headers."), + # These are used by the orchestrator itself and are also passed to workers. 'queue_name': Param(DEFAULT_QUEUE_NAME, type="string", description="[Worker Param] Base name for Redis queues."), 'redis_conn_id': Param(DEFAULT_REDIS_CONN_ID, type="string", description="[Worker Param] Airflow Redis connection ID."), - 'clients': Param( - 'tv_simply', - type="string", - title="[Worker Param] Clients", - description="[Worker Param] Comma-separated list of clients for token generation. Full list: web, web_safari, web_embedded, web_music, web_creator, mweb, web_camoufox, web_safari_camoufox, web_embedded_camoufox, web_music_camoufox, web_creator_camoufox, mweb_camoufox, android, android_music, android_creator, android_vr, ios, ios_music, ios_creator, tv, tv_simply, tv_embedded. See DAG documentation for details." - ), 'account_pool': Param('ytdlp_account', type="string", description="[Worker Param] Account pool prefix or comma-separated list."), 'account_pool_size': Param(10, type=["integer", "null"], description="[Worker Param] If using a prefix for 'account_pool', this specifies the number of accounts to generate (e.g., 10 for 'prefix_01' through 'prefix_10'). Required when using a prefix."), 'prepend_client_to_account': Param(True, type="boolean", title="[Worker Param] Prepend Client to Account", description="If True, prepends client and timestamp to account names in prefix mode. Format: prefix_YYYYMMDDHHMMSS_client_XX."), - 'service_ip': Param(DEFAULT_YT_AUTH_SERVICE_IP, type="string", description="[Worker Param] IP of the ytdlp-ops-server. Default is from Airflow variable YT_AUTH_SERVICE_IP or hardcoded."), - 'service_port': Param(DEFAULT_YT_AUTH_SERVICE_PORT, type="integer", description="[Worker Param] Port of the Envoy load balancer. Default is from Airflow variable YT_AUTH_SERVICE_PORT or hardcoded."), - 'machine_id': Param("ytdlp-ops-airflow-service", type="string", description="[Worker Param] Identifier for the client machine."), - 'assigned_proxy_url': Param(None, type=["string", "null"], title="[Worker Param] Assigned Proxy URL", description="A specific proxy URL to use for the request, overriding the server's proxy pool logic."), - 'auto_create_new_accounts_on_exhaustion': Param(True, type="boolean", description="[Worker Param] If True and all accounts in a prefix-based pool are exhausted, create a new one automatically."), - # --- Download Control Parameters --- - 'delay_between_formats_s': Param(15, type="integer", title="[Worker Param] Delay Between Formats (s)", description="Delay in seconds between downloading each format when multiple formats are specified. A 22s wait may be effective for batch downloads, while 6-12s may suffice if cookies are refreshed regularly."), - 'yt_dlp_test_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Test Mode", description="If True, runs yt-dlp with --test flag (dry run without downloading)."), - 'skip_probe': Param(True, type="boolean", title="[Worker Param] Skip Probe", description="If True, skips the ffmpeg probe of downloaded files."), - 'yt_dlp_cleanup_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Cleanup Mode", description="If True, creates a .empty file and deletes the original media file after successful download and probe."), - 'socket_timeout': Param(15, type="integer", title="[Worker Param] Socket Timeout", description="Timeout in seconds for socket operations."), - 'download_format': Param( - 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best', - type="string", - title="[Worker Param] Download Format", - description="Custom yt-dlp format string. Common presets: [1] 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' (Default, best quality MP4). [2] '18-dashy/18,140-dashy/140,133-dashy/134-dashy/136-dashy/137-dashy/250-dashy/298-dashy/299-dashy' (Legacy formats). [3] '299-dashy/298-dashy/250-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy' (High-framerate formats)." - ), - 'downloader': Param( - 'cli', - type="string", - enum=['py', 'aria-rpc', 'cli'], - title="[Worker Param] Download Tool", - description="Choose the download tool to use: 'py' (native python, recommended), 'aria-rpc' (send to aria2c daemon), 'cli' (legacy yt-dlp wrapper)." - ), - 'aria_host': Param('172.17.0.1', type="string", title="[Worker Param] Aria2c Host", description="For 'aria-rpc' downloader: Host of the aria2c RPC server. Can be set via Airflow Variable 'YTDLP_ARIA_HOST'."), - 'aria_port': Param(6800, type="integer", title="[Worker Param] Aria2c Port", description="For 'aria-rpc' downloader: Port of the aria2c RPC server. Can be set via Airflow Variable 'YTDLP_ARIA_PORT'."), - 'aria_secret': Param('SQGCQPLVFQIASMPNPOJYLVGJYLMIDIXDXAIXOTX', type="string", title="[Worker Param] Aria2c Secret", description="For 'aria-rpc' downloader: Secret token. Can be set via Airflow Variable 'YTDLP_ARIA_SECRET'."), - 'yt_dlp_extra_args': Param( - '', - type=["string", "null"], - title="[Worker Param] Extra yt-dlp arguments", - ), } ) as dag: diff --git a/airflow/dags/ytdlp_ops_v01_worker_per_url.py b/airflow/dags/ytdlp_ops_v01_worker_per_url.py index c42b38e..815b340 100644 --- a/airflow/dags/ytdlp_ops_v01_worker_per_url.py +++ b/airflow/dags/ytdlp_ops_v01_worker_per_url.py @@ -215,6 +215,15 @@ def _get_account_pool(params: dict) -> list: # TASK DEFINITIONS (TaskFlow API) # ============================================================================= +def _get_worker_params(params: dict) -> dict: + """Loads and returns the worker_params dict from the unified JSON config.""" + try: + ytdlp_config = json.loads(params.get('ytdlp_config_json', '{}')) + return ytdlp_config.get('ytops', {}).get('worker_params', {}) + except json.JSONDecodeError: + logger.error("Could not parse ytdlp_config_json. Using empty worker_params.") + return {} + @task def get_url_and_assign_account(**context): """ @@ -223,6 +232,15 @@ def get_url_and_assign_account(**context): """ params = context['params'] ti = context['task_instance'] + worker_params = _get_worker_params(params) + + # Log the active policies + auth_policy = worker_params.get('on_auth_failure', 'not_set') + download_policy = worker_params.get('on_download_failure', 'not_set') + logger.info(f"--- Worker Policies ---") + logger.info(f" Auth Failure Policy: {auth_policy}") + logger.info(f" Download Failure Policy: {download_policy}") + logger.info(f"-----------------------") # --- Worker Pinning Verification --- # This is a safeguard against a known Airflow issue where clearing a task @@ -293,9 +311,20 @@ def get_url_and_assign_account(**context): except Exception as e: logger.error(f"Could not mark URL as in-progress in Redis: {e}", exc_info=True) - # Account assignment logic is the same as before. - account_id = random.choice(_get_account_pool(params)) - logger.info(f"Selected account '{account_id}' for this run.") + # Account assignment logic + account_id = params.get('account_id') + if account_id: + logger.info(f"Using sticky account '{account_id}' passed from previous run.") + else: + account_pool = _get_account_pool(params) + worker_index = params.get('worker_index') + if worker_index is not None: + account_id = account_pool[worker_index % len(account_pool)] + logger.info(f"Selected account '{account_id}' deterministically using worker_index {worker_index}.") + else: + # Fallback to random choice if no worker_index is provided (e.g., for manual runs) + account_id = random.choice(account_pool) + logger.warning(f"No worker_index provided. Selected account '{account_id}' randomly as a fallback.") return { 'url_to_process': url_to_process, @@ -305,10 +334,7 @@ def get_url_and_assign_account(**context): @task def get_token(initial_data: dict, **context): - """Makes a single attempt to get a token by calling the ytops-client get-info tool.""" - import subprocess - import shlex - + """Makes a single attempt to get a token by calling the Thrift service directly.""" ti = context['task_instance'] params = context['params'] @@ -318,26 +344,13 @@ def get_token(initial_data: dict, **context): host, port = params['service_ip'], int(params['service_port']) machine_id = params.get('machine_id') or socket.gethostname() - clients = params.get('clients') - request_params_json = params.get('request_params_json') - language_code = params.get('language_code') + + # For sticky proxy assigned_proxy_url = params.get('assigned_proxy_url') - - if language_code: - try: - params_dict = json.loads(request_params_json) - logger.info(f"Setting language for request: {language_code}") - if 'session_params' not in params_dict: - params_dict['session_params'] = {} - params_dict['session_params']['lang'] = language_code - request_params_json = json.dumps(params_dict) - except (json.JSONDecodeError, TypeError): - logger.warning("Could not parse request_params_json as JSON. Treating as key=value pairs and appending language code.") - lang_kv = f"session_params.lang={language_code}" - if request_params_json: - request_params_json += f",{lang_kv}" - else: - request_params_json = lang_kv + + # The unified JSON config is now the primary source of parameters. + request_params_json = params.get('ytdlp_config_json', '{}') + clients = None # This will be read from the JSON config on the server side. video_id = _extract_video_id(url) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") @@ -346,75 +359,111 @@ def get_token(initial_data: dict, **context): os.makedirs(job_dir_path, exist_ok=True) info_json_path = os.path.join(job_dir_path, f"info_{video_id or 'unknown'}_{account_id}_{timestamp}.json") - cmd = [ - 'ytops-client', 'get-info', - '--host', host, - '--port', str(port), - '--profile', account_id, - '--output', info_json_path, - '--print-proxy', - '--verbose', - '--log-return', - ] + # Save the received JSON config to the job directory for the download tool. + ytdlp_config_path = os.path.join(job_dir_path, 'ytdlp.json') + try: + with open(ytdlp_config_path, 'w', encoding='utf-8') as f: + # Pretty-print the JSON for readability + config_data = json.loads(request_params_json) + json.dump(config_data, f, indent=2) + logger.info(f"Saved ytdlp config to {ytdlp_config_path}") + except (IOError, json.JSONDecodeError) as e: + logger.error(f"Failed to save ytdlp.json config: {e}") + # Continue anyway, but download may fail. + ytdlp_config_path = None - if clients: - cmd.extend(['--client', clients]) - if machine_id: - cmd.extend(['--machine-id', machine_id]) - if request_params_json and request_params_json != '{}': - cmd.extend(['--request-params-json', request_params_json]) - if assigned_proxy_url: - cmd.extend(['--assigned-proxy-url', assigned_proxy_url]) - - cmd.append(url) + client, transport = None, None + try: + timeout = int(params.get('timeout', DEFAULT_TIMEOUT)) + client, transport = _get_thrift_client(host, port, timeout) - logger.info(f"--- Attempting to get token for URL '{url}' with account '{account_id}' (Clients: {clients}) ---") - copy_paste_cmd = ' '.join(shlex.quote(arg) for arg in cmd) - logger.info(f"Executing command: {copy_paste_cmd}") + airflow_log_context = AirflowLogContext( + taskId=ti.task_id, + runId=ti.run_id, + tryNumber=ti.try_number + ) - process = subprocess.run(cmd, capture_output=True, text=True, timeout=int(params.get('timeout', DEFAULT_TIMEOUT))) - - if process.stdout: - logger.info(f"ytops-client STDOUT:\n{process.stdout}") - if process.stderr: - logger.info(f"ytops-client STDERR:\n{process.stderr}") - - if process.returncode != 0: - error_message = "ytops-client failed. See logs for details." - # Try to find a more specific error message from the Thrift client's output - thrift_error_match = re.search(r'A Thrift error occurred: (.*)', process.stderr) - if thrift_error_match: - error_message = thrift_error_match.group(1).strip() - else: # Fallback to old line-by-line parsing - for line in reversed(process.stderr.strip().split('\n')): - if 'ERROR' in line or 'Thrift error' in line or 'Connection to server failed' in line: - error_message = line.strip() - break + logger.info(f"--- Attempting to get token for URL '{url}' with account '{account_id}' (Clients: {clients}, Proxy: {assigned_proxy_url or 'any'}) ---") - # Determine error code for branching logic - error_code = 'GET_INFO_CLIENT_FAIL' - stderr_lower = process.stderr.lower() + token_data = client.getOrRefreshToken( + accountId=account_id, + updateType=TokenUpdateMode.AUTO, + url=url, + clients=clients, + machineId=machine_id, + airflowLogContext=airflow_log_context, + requestParamsJson=request_params_json, + assignedProxyUrl=assigned_proxy_url + ) + + # --- Log server-side details for debugging --- + if hasattr(token_data, 'serverVersionInfo') and token_data.serverVersionInfo: + logger.info(f"--- Server Version Info ---\n{token_data.serverVersionInfo}") - # These patterns should match the error codes from PBUserException and others - error_patterns = { - "BOT_DETECTED": ["bot_detected"], - "BOT_DETECTION_SIGN_IN_REQUIRED": ["bot_detection_sign_in_required"], - "TRANSPORT_ERROR": ["connection to server failed"], - "PRIVATE_VIDEO": ["private video"], - "COPYRIGHT_REMOVAL": ["copyright"], - "GEO_RESTRICTED": ["in your country"], - "VIDEO_REMOVED": ["video has been removed"], - "VIDEO_UNAVAILABLE": ["video unavailable"], - "MEMBERS_ONLY": ["members-only"], - "AGE_GATED_SIGN_IN": ["sign in to confirm your age"], - "VIDEO_PROCESSING": ["processing this video"], + if hasattr(token_data, 'requestSummary') and token_data.requestSummary: + try: + summary_data = json.loads(token_data.requestSummary) + summary_text = summary_data.get('summary', 'Not available.') + prefetch_log = summary_data.get('prefetch_log', 'Not available.') + nodejs_log = summary_data.get('nodejs_log', 'Not available.') + ytdlp_log = summary_data.get('ytdlp_log', 'Not available.') + + logger.info(f"--- Request Summary ---\n{summary_text}") + logger.info(f"--- Prefetch Log ---\n{prefetch_log}") + logger.info(f"--- Node.js Log ---\n{nodejs_log}") + logger.info(f"--- yt-dlp Log ---\n{ytdlp_log}") + except (json.JSONDecodeError, AttributeError): + logger.info(f"--- Raw Request Summary (could not parse JSON) ---\n{token_data.requestSummary}") + + if hasattr(token_data, 'communicationLogPaths') and token_data.communicationLogPaths: + logger.info("--- Communication Log Paths on Server ---") + for log_path in token_data.communicationLogPaths: + logger.info(f" - {log_path}") + # --- End of server-side logging --- + + if not token_data or not token_data.infoJson: + raise AirflowException("Thrift service did not return valid info.json data.") + + # Save info.json to file + with open(info_json_path, 'w', encoding='utf-8') as f: + f.write(token_data.infoJson) + + proxy = token_data.socks + + # Rename file with proxy + final_info_json_path = info_json_path + if proxy: + sanitized_proxy = proxy.replace('://', '---') + new_filename = f"info_{video_id or 'unknown'}_{account_id}_{timestamp}_proxy_{sanitized_proxy}.json" + new_path = os.path.join(job_dir_path, new_filename) + try: + os.rename(info_json_path, new_path) + final_info_json_path = new_path + logger.info(f"Renamed info.json to include proxy: {new_path}") + except OSError as e: + logger.error(f"Failed to rename info.json to include proxy: {e}. Using original path.") + + return { + 'info_json_path': final_info_json_path, + 'job_dir_path': job_dir_path, + 'socks_proxy': proxy, + 'ytdlp_command': None, + 'successful_account_id': account_id, + 'original_url': url, + 'ytdlp_config_path': ytdlp_config_path, } - for code, patterns in error_patterns.items(): - if any(p in stderr_lower for p in patterns): - error_code = code - break # Found a match, stop searching + except (PBServiceException, PBUserException) as e: + error_message = e.message or "Unknown Thrift error" + error_code = getattr(e, 'errorCode', 'THRIFT_ERROR') + # If a "Video unavailable" error mentions rate-limiting, it's a form of bot detection. + if error_code == 'VIDEO_UNAVAILABLE' and 'rate-limited' in error_message.lower(): + logger.warning("Re-classifying rate-limit-related 'VIDEO_UNAVAILABLE' error as 'BOT_DETECTED'.") + error_code = 'BOT_DETECTED' + + logger.error(f"Thrift error getting token: {error_code} - {error_message}") + error_details = { 'error_message': error_message, 'error_code': error_code, @@ -422,35 +471,18 @@ def get_token(initial_data: dict, **context): } ti.xcom_push(key='error_details', value=error_details) raise AirflowException(f"ytops-client get-info failed: {error_message}") - - proxy = None - proxy_match = re.search(r"Proxy used: (.*)", process.stderr) - if proxy_match: - proxy = proxy_match.group(1).strip() - - # Rename the info.json to include the proxy for the download worker - final_info_json_path = info_json_path - if proxy: - # Sanitize for filename: replace '://' which is invalid in paths. Colons are usually fine. - sanitized_proxy = proxy.replace('://', '---') - - new_filename = f"info_{video_id or 'unknown'}_{account_id}_{timestamp}_proxy_{sanitized_proxy}.json" - new_path = os.path.join(job_dir_path, new_filename) - try: - os.rename(info_json_path, new_path) - final_info_json_path = new_path - logger.info(f"Renamed info.json to include proxy: {new_path}") - except OSError as e: - logger.error(f"Failed to rename info.json to include proxy: {e}. Using original path.") - - return { - 'info_json_path': final_info_json_path, - 'job_dir_path': job_dir_path, - 'socks_proxy': proxy, - 'ytdlp_command': None, - 'successful_account_id': account_id, - 'original_url': url, - } + except TTransportException as e: + logger.error(f"Thrift transport error: {e}", exc_info=True) + error_details = { + 'error_message': f"Thrift transport error: {e}", + 'error_code': 'TRANSPORT_ERROR', + 'proxy_url': None + } + ti.xcom_push(key='error_details', value=error_details) + raise AirflowException(f"Thrift transport error: {e}") + finally: + if transport and transport.isOpen(): + transport.close() @task.branch def handle_bannable_error_branch(task_id_to_check: str, **context): @@ -460,7 +492,31 @@ def handle_bannable_error_branch(task_id_to_check: str, **context): """ ti = context['task_instance'] params = context['params'] - error_details = ti.xcom_pull(task_ids=task_id_to_check, key='error_details') + + # Try to get error details from the specified task + error_details = None + try: + error_details = ti.xcom_pull(task_ids=task_id_to_check, key='error_details') + except Exception as e: + logger.warning(f"Could not pull error details from task '{task_id_to_check}': {e}") + + # If not found, try to get from any task in the DAG run + if not error_details: + # Look for error details in any task that may have pushed them + # This is a fallback mechanism + dag_run = ti.get_dagrun() + task_instances = dag_run.get_task_instances() + for task_instance in task_instances: + if task_instance.task_id != ti.task_id: + try: + details = task_instance.xcom_pull(key='error_details') + if details: + error_details = details + logger.info(f"Found error details in task '{task_instance.task_id}'") + break + except Exception: + pass + if not error_details: logger.error(f"Task {task_id_to_check} failed without error details. Marking as fatal.") return 'handle_fatal_error' @@ -577,7 +633,7 @@ def ban_and_retry_logic(initial_data: dict): @task(task_id='ban_account_task') def ban_account_task(data: dict, **context): """Wrapper task to call the main ban_account function.""" - ban_account(initial_data=data, reason="Banned by Airflow worker after sliding window check", **context) + _ban_account(initial_data=data, reason="Banned by Airflow worker after sliding window check", context=context) @task(task_id='skip_ban_task') def skip_ban_task(): @@ -591,8 +647,7 @@ def ban_and_retry_logic(initial_data: dict): check_task >> [ban_task_in_group, skip_task] -@task -def ban_account(initial_data: dict, reason: str, **context): +def _ban_account(initial_data: dict, reason: str, context: dict): """Bans a single account via the Thrift service.""" params = context['params'] account_id = initial_data['account_id'] @@ -602,7 +657,8 @@ def ban_account(initial_data: dict, reason: str, **context): client, transport = _get_thrift_client(host, port, timeout) logger.warning(f"Banning account '{account_id}'. Reason: {reason}") client.banAccount(accountId=account_id, reason=reason) - except Exception as e: + except BaseException as e: + # Catch BaseException to include SystemExit, which may be raised by the Thrift client logger.error(f"Failed to issue ban for account '{account_id}': {e}", exc_info=True) finally: if transport and transport.isOpen(): @@ -650,13 +706,29 @@ def assign_new_account_after_ban_check(initial_data: dict, **context): 'accounts_tried': accounts_tried, } -@task -def ban_and_report_immediately(initial_data: dict, reason: str, **context): - """Bans an account and prepares for failure reporting and continuing the loop.""" - ban_account(initial_data, reason, **context) - logger.info(f"Account '{initial_data.get('account_id')}' banned. Proceeding to report failure.") - # This task is a leaf in its path and is followed by the failure reporting task. - return initial_data # Pass data along if needed by reporting +@task(retries=0) +def ban_and_report_immediately(**context): + """Bans an account and prepares for failure reporting and stopping the loop.""" + ti = context['task_instance'] + # Manually pull initial_data. This is more robust if the upstream task was skipped. + initial_data = ti.xcom_pull(task_ids='get_url_and_assign_account') + if not initial_data: + logger.error("Could not retrieve initial_data to ban account.") + # Return a default dict to allow downstream reporting to proceed. + return {'account_id': 'unknown', 'url_to_process': context['params'].get('url_to_process', 'unknown')} + + try: + reason = "Banned by Airflow worker (policy is stop_loop)" + _ban_account(initial_data, reason, context) + logger.info(f"Account '{initial_data.get('account_id')}' banned. Proceeding to report failure.") + except BaseException as e: + # Catch BaseException to include SystemExit, which may be raised by the Thrift client + logger.error(f"Error during ban_and_report_immediately: {e}", exc_info=True) + # Swallow the exception to ensure this task succeeds. The loop will be stopped by downstream tasks. + + # Always return the initial data, even if banning failed + # Make a copy to ensure we're not returning a reference that might be modified elsewhere + return dict(initial_data) if initial_data else {} @task def list_available_formats(token_data: dict, **context): @@ -787,6 +859,14 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context params = context['params'] info_json_path = token_data.get('info_json_path') original_url = token_data.get('original_url') + ytdlp_config_path = token_data.get('ytdlp_config_path') + ytdlp_config = {} + if ytdlp_config_path and os.path.exists(ytdlp_config_path): + try: + with open(ytdlp_config_path, 'r', encoding='utf-8') as f: + ytdlp_config = json.load(f) + except (IOError, json.JSONDecodeError) as e: + logger.warning(f"Could not load ytdlp config from {ytdlp_config_path}: {e}") # Extract proxy from filename, with fallback to token_data for backward compatibility proxy = None @@ -839,6 +919,10 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context downloader = params.get('downloader', 'py') cmd = ['ytops-client', 'download', downloader, '--load-info-json', info_json_path, '-f', format_selector] + # Pass the unified config file to the download tool + if ytdlp_config_path: + cmd.extend(['--config', ytdlp_config_path]) + if downloader == 'py': if proxy: cmd.extend(['--proxy', proxy]) @@ -846,15 +930,13 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context # The 'py' tool maps many yt-dlp flags via --extra-ytdlp-args # The 'py' tool maps many yt-dlp flags via --extra-ytdlp-args - py_extra_args = ['--output', output_template, '--no-resize-buffer', '--buffer-size', '4M'] - if params.get('fragment_retries'): - py_extra_args.extend(['--fragment-retries', str(params['fragment_retries'])]) - if params.get('socket_timeout'): - py_extra_args.extend(['--socket-timeout', str(params['socket_timeout'])]) + py_extra_args = ['--output', output_template] if params.get('yt_dlp_test_mode'): py_extra_args.append('--test') - existing_extra = shlex.split(params.get('yt_dlp_extra_args') or '') + # Get extra args from the config file now + existing_extra_str = ytdlp_config.get('ytops', {}).get('worker_params', {}).get('yt_dlp_extra_args', '') + existing_extra = shlex.split(existing_extra_str or '') final_extra_args_list = existing_extra + py_extra_args if final_extra_args_list: final_extra_args_str = shlex.join(final_extra_args_list) @@ -877,10 +959,13 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context # The remote-dir is the path relative to aria2c's working directory on the host. # The output-dir is the container's local path to the same shared volume. remote_dir = os.path.relpath(download_dir, '/opt/airflow/downloadfiles/videos') + + # Get aria params from config file + worker_params = ytdlp_config.get('ytops', {}).get('worker_params', {}) cmd.extend([ - '--aria-host', params.get('aria_host', '172.17.0.1'), - '--aria-port', str(params.get('aria_port', 6800)), - '--aria-secret', params.get('aria_secret'), + '--aria-host', worker_params.get('aria_host', '172.17.0.1'), + '--aria-port', str(worker_params.get('aria_port', 6800)), + '--aria-secret', worker_params.get('aria_secret'), '--wait', '--output-dir', download_dir, '--remote-dir', remote_dir, @@ -900,11 +985,7 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context cmd.extend(['--proxy', proxy]) # The 'cli' tool is the old yt-dlp wrapper, so it takes similar arguments. - cli_extra_args = ['--output', full_output_path, '--no-resize-buffer', '--buffer-size', '4M'] - if params.get('fragment_retries'): - cli_extra_args.extend(['--fragment-retries', str(params['fragment_retries'])]) - if params.get('socket_timeout'): - cli_extra_args.extend(['--socket-timeout', str(params['socket_timeout'])]) + cli_extra_args = ['--output', full_output_path, '--verbose'] if params.get('yt_dlp_test_mode'): cli_extra_args.append('--test') @@ -1030,71 +1111,84 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context with open(info_json_path, 'r', encoding='utf-8') as f: info = json.load(f) - # Split the format string by commas to get a list of individual format selectors. - # This enables parallel downloads of different formats or format groups. - # For example, '18,140,299/298' becomes ['18', '140', '299/298'], - # and each item will be downloaded in a separate yt-dlp process. - if download_format and isinstance(download_format, str): - formats_to_download_initial = [selector.strip() for selector in download_format.split(',') if selector.strip()] - else: - # Fallback for safety, though download_format should always be a string. - formats_to_download_initial = [] + ytdlp_params = ytdlp_config.get('ytdlp_params', {}) + download_format = ytdlp_params.get('format') - if not formats_to_download_initial: - raise AirflowException("No valid download format selectors were found after parsing.") - - # --- Filter and resolve requested formats --- final_formats_to_download = [] - if not available_formats: - logger.warning("List of available formats is empty. Cannot validate numeric selectors, but will attempt to resolve generic selectors.") + downloader = params.get('downloader', 'cli') + pass_without_splitting = params.get('pass_without_formats_splitting', False) - for selector in formats_to_download_initial: - # A selector is considered generic if it contains keywords like 'best' or filter brackets '[]'. - is_generic = bool(re.search(r'(best|\[|\])', selector)) - - if is_generic: - resolved_selector = _resolve_generic_selector(selector, info_json_path, logger) - if resolved_selector: - # The resolver returns a list for '+' selectors, or a string for others. - resolved_formats = resolved_selector if isinstance(resolved_selector, list) else [resolved_selector] - - for res_format in resolved_formats: - # Prefer -dashy version if available and the format is a simple numeric ID - if res_format.isdigit() and f"{res_format}-dashy" in available_formats: - final_format = f"{res_format}-dashy" - logger.info(f"Resolved format '{res_format}' from selector '{selector}'. Preferred '-dashy' version: '{final_format}'.") - else: - final_format = res_format - - # Validate the chosen format against available formats - if available_formats: - individual_ids = re.split(r'[/+]', final_format) - is_available = any(fid in available_formats for fid in individual_ids) - - if is_available: - final_formats_to_download.append(final_format) - else: - logger.warning(f"Resolved format '{final_format}' (from '{selector}') contains no available formats. Skipping.") - else: - # Cannot validate, so we trust the resolver's output. - final_formats_to_download.append(final_format) - else: - logger.warning(f"Could not resolve generic selector '{selector}' using yt-dlp. Skipping.") + if pass_without_splitting and downloader != 'aria-rpc': + logger.info("'pass_without_formats_splitting' is True. Passing download format string directly to the download tool.") + final_formats_to_download = download_format + else: + if pass_without_splitting and downloader == 'aria-rpc': + logger.warning("'pass_without_formats_splitting' is True but is not compatible with 'aria-rpc' downloader. Splitting formats as normal.") + + # Split the format string by commas to get a list of individual format selectors. + # This enables parallel downloads of different formats or format groups. + # For example, '18,140,299/298' becomes ['18', '140', '299/298'], + # and each item will be downloaded in a separate yt-dlp process. + if download_format and isinstance(download_format, str): + formats_to_download_initial = [selector.strip() for selector in download_format.split(',') if selector.strip()] else: - # This is a numeric-based selector (e.g., '140' or '299/298' or '140-dashy'). - # Validate it against the available formats. - if not available_formats: - logger.warning(f"Cannot validate numeric selector '{selector}' because available formats list is empty. Assuming it's valid.") - final_formats_to_download.append(selector) - continue + # Fallback for safety, though download_format should always be a string. + formats_to_download_initial = [] - individual_ids = re.split(r'[/+]', selector) - is_available = any(fid in available_formats for fid in individual_ids) - - if is_available: - final_formats_to_download.append(selector) + if not formats_to_download_initial: + raise AirflowException("No valid download format selectors were found after parsing.") + + # --- Filter and resolve requested formats --- + if not available_formats: + logger.warning("List of available formats is empty. Cannot validate numeric selectors, but will attempt to resolve generic selectors.") + + for selector in formats_to_download_initial: + # A selector is considered generic if it contains keywords like 'best' or filter brackets '[]'. + is_generic = bool(re.search(r'(best|\[|\])', selector)) + + if is_generic: + resolved_selector = _resolve_generic_selector(selector, info_json_path, logger) + if resolved_selector: + # The resolver returns a list for '+' selectors, or a string for others. + resolved_formats = resolved_selector if isinstance(resolved_selector, list) else [resolved_selector] + + for res_format in resolved_formats: + # Prefer -dashy version if available and the format is a simple numeric ID + if res_format.isdigit() and f"{res_format}-dashy" in available_formats: + final_format = f"{res_format}-dashy" + logger.info(f"Resolved format '{res_format}' from selector '{selector}'. Preferred '-dashy' version: '{final_format}'.") + else: + final_format = res_format + + # Validate the chosen format against available formats + if available_formats: + individual_ids = re.split(r'[/+]', final_format) + is_available = any(fid in available_formats for fid in individual_ids) + + if is_available: + final_formats_to_download.append(final_format) + else: + logger.warning(f"Resolved format '{final_format}' (from '{selector}') contains no available formats. Skipping.") + else: + # Cannot validate, so we trust the resolver's output. + final_formats_to_download.append(final_format) + else: + logger.warning(f"Could not resolve generic selector '{selector}' using yt-dlp. Skipping.") else: - logger.warning(f"Requested numeric format selector '{selector}' contains no available formats. Skipping.") + # This is a numeric-based selector (e.g., '140' or '299/298' or '140-dashy'). + # Validate it against the available formats. + if not available_formats: + logger.warning(f"Cannot validate numeric selector '{selector}' because available formats list is empty. Assuming it's valid.") + final_formats_to_download.append(selector) + continue + + individual_ids = re.split(r'[/+]', selector) + is_available = any(fid in available_formats for fid in individual_ids) + + if is_available: + final_formats_to_download.append(selector) + else: + logger.warning(f"Requested numeric format selector '{selector}' contains no available formats. Skipping.") if not final_formats_to_download: raise AirflowException("None of the requested formats are available for this video.") @@ -1323,6 +1417,8 @@ def mark_url_as_success(initial_data: dict, downloaded_file_paths: list, token_d logger.info(f"Stored success result for URL '{url}' and removed from progress queue.") + return token_data + @task(trigger_rule='one_failed') def report_failure_and_stop(**context): """ @@ -1331,7 +1427,12 @@ def report_failure_and_stop(**context): """ params = context['params'] ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') + url = params.get('url_to_process') + + # Ensure we have a valid URL string for Redis keys + if not url or url == 'None': + url = f"unknown_url_{context['dag_run'].run_id}" + logger.warning(f"No valid URL found in params. Using generated key: {url}") # Collect error details from XCom error_details = {} @@ -1379,12 +1480,15 @@ def report_failure_and_stop(**context): with client.pipeline() as pipe: pipe.hset(result_queue, url, json.dumps(result_data)) pipe.hset(fail_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) + # Only try to remove from progress queue if we have a real URL + if url != f"unknown_url_{context['dag_run'].run_id}": + pipe.hdel(progress_queue, url) pipe.execute() logger.info(f"Stored failure result for URL '{url}' in '{result_queue}' and '{fail_queue}' and removed from progress queue.") except Exception as e: logger.error(f"Could not report failure to Redis: {e}", exc_info=True) + return None @task(trigger_rule='one_failed') @@ -1395,7 +1499,12 @@ def report_failure_and_continue(**context): """ params = context['params'] ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') + url = params.get('url_to_process') + + # Ensure we have a valid URL string for Redis keys + if not url or url == 'None': + url = f"unknown_url_{context['dag_run'].run_id}" + logger.warning(f"No valid URL found in params. Using generated key: {url}") # Collect error details from XCom error_details = {} @@ -1446,7 +1555,9 @@ def report_failure_and_continue(**context): with client.pipeline() as pipe: pipe.hset(result_queue, url, json.dumps(result_data)) pipe.hset(fail_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) + # Only try to remove from progress queue if we have a real URL + if url != f"unknown_url_{context['dag_run'].run_id}": + pipe.hdel(progress_queue, url) pipe.execute() logger.info(f"Stored failure result for URL '{url}' in '{result_queue}' and '{fail_queue}' and removed from progress queue.") @@ -1463,7 +1574,12 @@ def handle_fatal_error(**context): """ params = context['params'] ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') + url = params.get('url_to_process') + + # Ensure we have a valid URL string for Redis keys + if not url or url == 'None': + url = f"unknown_url_{context['dag_run'].run_id}" + logger.warning(f"No valid URL found in params. Using generated key: {url}") # Collect error details error_details = {} @@ -1509,25 +1625,36 @@ def handle_fatal_error(**context): with client.pipeline() as pipe: pipe.hset(result_queue, url, json.dumps(result_data)) pipe.hset(fail_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) + # Only try to remove from progress queue if we have a real URL + if url != f"unknown_url_{context['dag_run'].run_id}": + pipe.hdel(progress_queue, url) pipe.execute() logger.info(f"Stored fatal error result for URL '{url}' in '{result_queue}' and '{fail_queue}' for later reprocessing.") except Exception as e: logger.error(f"Could not report fatal error to Redis: {e}", exc_info=True) - # Fail the DAG run to prevent automatic continuation of the processing loop - raise AirflowException("Failing DAG due to fatal error. The dispatcher loop will stop.") + # Instead of raising an exception, log a clear message and return a result + # This allows the task to complete successfully while still indicating the error + logger.error("FATAL ERROR: The dispatcher loop will stop due to a non-retryable error.") + return {'status': 'fatal_error', 'url': url} @task(trigger_rule='one_success') -def continue_processing_loop(**context): +def continue_processing_loop(token_data: dict | None = None, **context): """ - After a successful run, triggers a new dispatcher to continue the processing loop, - effectively asking for the next URL to be processed. + After a run, triggers a new dispatcher to continue the processing loop, + passing along the account/proxy to make them sticky if available. """ params = context['params'] dag_run = context['dag_run'] + ti = context['task_instance'] + + # Check if we're coming from a fatal error path + fatal_error_result = ti.xcom_pull(task_ids='handle_fatal_error') + if fatal_error_result and isinstance(fatal_error_result, dict) and fatal_error_result.get('status') == 'fatal_error': + logger.error("Not continuing processing loop due to fatal error in previous task.") + return # Do not continue the loop for manual runs of the worker DAG. # A worker DAG triggered by the dispatcher will have a run_id starting with 'worker_run_'. @@ -1542,18 +1669,29 @@ def continue_processing_loop(**context): return # Create a new unique run_id for the dispatcher. - # Using a timestamp and UUID ensures the ID is unique and does not grow in length over time, - # preventing database errors. new_dispatcher_run_id = f"retriggered_by_worker_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{str(uuid.uuid4())[:8]}" # Pass all original parameters from the orchestrator through to the new dispatcher run. conf_to_pass = {k: v for k, v in params.items() if v is not None} + conf_to_pass['worker_index'] = params.get('worker_index') - # The new dispatcher will pull its own URL and determine its own queue, so we don't pass these. + if token_data: + # On success path, make the account and proxy "sticky" for the next run. + conf_to_pass['account_id'] = token_data.get('successful_account_id') + conf_to_pass['assigned_proxy_url'] = token_data.get('socks_proxy') + logger.info(f"Worker finished successfully. Triggering a new dispatcher ('{new_dispatcher_run_id}') to continue the loop with sticky account/proxy.") + logger.info(f" - Sticky Account: {conf_to_pass.get('account_id')}") + logger.info(f" - Sticky Proxy: {conf_to_pass.get('assigned_proxy_url')}") + else: + # On failure/skip paths, no token_data is passed. Clear sticky params to allow re-selection. + conf_to_pass.pop('account_id', None) + conf_to_pass.pop('assigned_proxy_url', None) + logger.info(f"Worker finished on a non-success path. Triggering a new dispatcher ('{new_dispatcher_run_id}') to continue the loop without sticky account/proxy.") + + # The new dispatcher will pull its own URL and determine its own queue. conf_to_pass.pop('url_to_process', None) conf_to_pass.pop('worker_queue', None) - logger.info(f"Worker finished successfully. Triggering a new dispatcher ('{new_dispatcher_run_id}') to continue the loop.") trigger_dag( dag_id=dispatcher_dag_id, run_id=new_dispatcher_run_id, @@ -1606,10 +1744,21 @@ def handle_retry_failure_branch(task_id_to_check: str, **context): @task -def ban_and_report_after_retry(retry_data: dict, reason: str, **context): +def ban_and_report_after_retry(**context): """Bans the account used in a failed retry and prepares for failure reporting.""" + ti = context['task_instance'] + reason = "Banned by Airflow worker after failed retry" + + # Manually pull XCom because trigger rules can make XComArgs resolve to None. + retry_data = ti.xcom_pull(task_ids='retry_logic.coalesce_retry_data') + if not retry_data: + # This can happen if the upstream task that generates the data was skipped. + logger.error("Could not retrieve retry data to ban account. This may be due to an unexpected task flow.") + # Instead of failing, return a default dict with enough info to continue + return {'account_id': 'unknown', 'url_to_process': context['params'].get('url_to_process', 'unknown')} + # The account to ban is the one from the retry attempt. - ban_account(retry_data, reason, **context) + _ban_account(retry_data, reason, context) logger.info(f"Account '{retry_data.get('account_id')}' banned after retry failed. Proceeding to report failure.") return retry_data @@ -1624,24 +1773,31 @@ def handle_download_failure_branch(**context): policy = params.get('on_download_failure', 'proceed_loop') ti = context['task_instance'] - # The full task_id for download_and_probe is 'download_processing.download_and_probe' download_error_details = ti.xcom_pull(task_ids='download_processing.download_and_probe', key='download_error_details') + # First, check for specific error codes that override the general policy. if download_error_details: error_code = download_error_details.get('error_code') + + # Unrecoverable video errors always go to the 'skipped' handler. unrecoverable_video_errors = [ "AGE_GATED_SIGN_IN", "MEMBERS_ONLY", "VIDEO_PROCESSING", "COPYRIGHT_REMOVAL", - "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED", - "HTTP_403_FORBIDDEN" + "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED" ] if error_code in unrecoverable_video_errors: logger.warning(f"Unrecoverable video error '{error_code}' during download. Skipping.") return 'handle_unrecoverable_video_error' + # A 403 Forbidden error is not retryable, regardless of policy. + if error_code == 'HTTP_403_FORBIDDEN': + logger.error("Download failed with HTTP 403 Forbidden. This is not retryable. Reporting failure and continuing loop.") + return 'report_failure_and_continue' + + # Now, apply the general policy for other download failures. if policy == 'retry_with_new_token': logger.info("Download failed. Policy is to retry with a new token. Branching to retry logic.") return 'retry_logic_for_download' - + if policy == 'stop_loop': logger.error(f"Download or probe failed with policy '{policy}'. Stopping loop by routing to fatal error handler.") return 'handle_fatal_error' @@ -1667,15 +1823,46 @@ def coalesce_token_data(get_token_result=None, retry_get_token_result=None): raise AirflowException("Could not find a successful token result from any attempt.") -@task +# FIX: Use 'all_done' trigger rule so this task runs even when upstream tasks fail. +# The branch operator will skip other branches, but this task needs to run +# when the branch points to it, regardless of the failed get_token task. +@task(trigger_rule='all_done') def handle_unrecoverable_video_error(**context): """ Handles errors for videos that are unavailable (private, removed, etc.). These are not system failures, so the URL is logged to a 'skipped' queue and the processing loop continues without marking the run as failed. """ - params = context['params'] ti = context['task_instance'] + + # Check if this task was actually selected by the branch operator. + # If it was skipped by the branch, we should not execute the logic. + # We can check if the branch task's result points to us. + dag_run = ti.get_dagrun() + + # Check multiple possible branch tasks that could route here + branch_task_ids = [ + 'initial_attempt.handle_bannable_error_branch', + 'retry_logic.handle_retry_failure_branch', + 'download_processing.handle_download_failure_branch' + ] + + was_selected_by_branch = False + for branch_task_id in branch_task_ids: + try: + branch_result = ti.xcom_pull(task_ids=branch_task_id) + if branch_result == 'handle_unrecoverable_video_error': + was_selected_by_branch = True + logger.info(f"Task was selected by branch '{branch_task_id}'") + break + except Exception: + pass + + if not was_selected_by_branch: + logger.info("Task was not selected by any branch operator. Skipping execution.") + raise AirflowSkipException("Not selected by branch operator") + + params = context['params'] url = params.get('url_to_process', 'unknown') # Collect error details from the failed task @@ -1717,22 +1904,50 @@ def handle_unrecoverable_video_error(**context): logger.info(f"Stored skipped result for URL '{url}' in '{skipped_queue}' and removed from progress queue.") except Exception as e: logger.error(f"Could not report skipped video to Redis: {e}", exc_info=True) + + # Return a marker so downstream tasks know this path was taken + return {'status': 'skipped', 'url': url} -@task +# FIX: Use 'all_done' trigger rule for the same reason as handle_unrecoverable_video_error +@task(trigger_rule='all_done') def report_bannable_and_continue(**context): """ Handles a bannable error by reporting it, but continues the loop as per the 'proceed_loop_under_manual_inspection' policy. """ - params = context['params'] ti = context['task_instance'] + + # Check if this task was actually selected by the branch operator. + dag_run = ti.get_dagrun() + + branch_task_ids = [ + 'initial_attempt.handle_bannable_error_branch', + 'retry_logic.handle_retry_failure_branch' + ] + + was_selected_by_branch = False + for branch_task_id in branch_task_ids: + try: + branch_result = ti.xcom_pull(task_ids=branch_task_id) + if branch_result == 'report_bannable_and_continue': + was_selected_by_branch = True + logger.info(f"Task was selected by branch '{branch_task_id}'") + break + except Exception: + pass + + if not was_selected_by_branch: + logger.info("Task was not selected by any branch operator. Skipping execution.") + raise AirflowSkipException("Not selected by branch operator") + + params = context['params'] url = params.get('url_to_process', 'unknown') # Collect error details error_details = {} - first_token_task_id = 'get_token' - retry_token_task_id = 'retry_get_token' + first_token_task_id = 'initial_attempt.get_token' + retry_token_task_id = 'retry_logic.retry_get_token' first_token_error = ti.xcom_pull(task_ids=first_token_task_id, key='error_details') retry_token_error = ti.xcom_pull(task_ids=retry_token_task_id, key='error_details') @@ -1779,6 +1994,11 @@ def report_bannable_and_continue(**context): logger.info(f"Stored bannable error for URL '{url}' in '{result_queue}' and '{fail_queue}'.") except Exception as e: logger.error(f"Could not report bannable error to Redis: {e}", exc_info=True) + + # Return a marker so downstream tasks know this path was taken + return {'status': 'bannable_reported', 'url': url} + + # ============================================================================= @@ -1802,8 +2022,7 @@ with DAG( 'account_pool_size': Param(None, type=["integer", "null"]), 'prepend_client_to_account': Param(True, type="boolean", title="[Worker Param] Prepend Client to Account", description="If True, prepends client and timestamp to account names in prefix mode."), 'machine_id': Param(None, type=["string", "null"]), - 'assigned_proxy_url': Param(None, type=["string", "null"], title="[Worker Param] Assigned Proxy URL", description="A specific proxy URL to use for the request, overriding the server's proxy pool logic."), - 'clients': Param('tv_simply', type="string", description="Comma-separated list of clients for token generation. e.g. mweb,tv,web_camoufox"), + 'assigned_proxy_url': Param(None, type=["string", "null"], title="[Manual/Worker Param] Assigned Proxy URL", description="For manual runs or sticky loops: a specific proxy URL to use, overriding the server's proxy pool logic."), 'timeout': Param(DEFAULT_TIMEOUT, type="integer"), 'output_path_template': Param("%(id)s.f%(format_id)s.%(ext)s", type="string", title="[Worker Param] Output Path Template", description="Output filename template for yt-dlp. It is highly recommended to include `%(format_id)s` to prevent filename collisions when downloading multiple formats."), 'on_auth_failure': Param( @@ -1820,21 +2039,23 @@ with DAG( title="[Worker Param] On Download Failure Policy", description="Policy for handling download or probe failures." ), - 'request_params_json': Param('{}', type="string", title="[Worker Param] Request Params JSON", description="JSON string with request parameters for the token service."), - 'language_code': Param('en-US', type="string", title="[Worker Param] Language Code", description="The language code (e.g., 'en-US', 'de-DE') to use for the YouTube request headers."), 'retry_on_probe_failure': Param(False, type="boolean"), 'skip_probe': Param(False, type="boolean", title="[Worker Param] Skip Probe", description="If True, skips the ffmpeg probe of downloaded files."), 'yt_dlp_cleanup_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Cleanup Mode", description="If True, creates a .empty file and deletes the original media file after successful download and probe."), 'auto_create_new_accounts_on_exhaustion': Param(True, type="boolean"), - 'fragment_retries': Param(2, type="integer", title="[Worker Param] Fragment Retries", description="Number of retries for a fragment before giving up. Default is 2 to fail fast on expired tokens."), 'delay_between_formats_s': Param(15, type="integer", title="[Worker Param] Delay Between Formats (s)", description="Delay in seconds between downloading each format when multiple formats are specified. A 22s wait may be effective for batch downloads, while 6-12s may suffice if cookies are refreshed regularly."), 'yt_dlp_test_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Test Mode", description="If True, runs yt-dlp with --test flag (dry run without downloading)."), - 'socket_timeout': Param(15, type="integer", title="[Worker Param] Socket Timeout", description="Timeout in seconds for socket operations."), 'download_format': Param( 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best', type="string", title="[Worker Param] Download Format", - description="Custom yt-dlp format string. Common presets: [1] 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' (Default, best quality MP4). [2] '18-dashy/18,140-dashy/140,133-dashy/134-dashy/136-dashy/137-dashy/250-dashy/298-dashy/299-dashy' (Legacy formats). [3] '299-dashy/298-dashy/250-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy' (High-framerate formats)." + description="Custom yt-dlp format string. Common presets: [1] 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' (Default, best quality MP4). [2] '18,140-dashy/140,133-dashy/134-dashy/136-dashy/137-dashy/298-dashy/299-dashy' (Legacy formats). [3] '299-dashy/298-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy' (High-framerate formats)." + ), + 'pass_without_formats_splitting': Param( + False, + type="boolean", + title="[Worker Param] Pass format string without splitting", + description="If True, passes the entire 'download_format' string to the download tool as-is. This is for complex selectors. Not compatible with 'aria-rpc' downloader." ), 'downloader': Param( 'cli', @@ -1846,15 +2067,14 @@ with DAG( 'aria_host': Param('172.17.0.1', type="string", title="Aria2c Host", description="For 'aria-rpc' downloader: Host of the aria2c RPC server."), 'aria_port': Param(6800, type="integer", title="Aria2c Port", description="For 'aria-rpc' downloader: Port of the aria2c RPC server."), 'aria_secret': Param('SQGCQPLVFQIASMPNPOJYLVGJYLMIDIXDXAIXOTX', type="string", title="Aria2c Secret", description="For 'aria-rpc' downloader: Secret token."), - 'yt_dlp_extra_args': Param( - '', - type=["string", "null"], - title="Extra yt-dlp arguments", - ), + # --- Unified JSON Config (passed from orchestrator) --- + 'ytdlp_config_json': Param('{}', type="string", title="[Internal] Unified JSON config from orchestrator."), # --- Manual Run / Internal Parameters --- 'manual_url_to_process': Param('iPwdia3gAnk', type=["string", "null"], title="[Manual Run] URL to Process", description="For manual runs, provide a single YouTube URL, or the special value 'PULL_FROM_QUEUE' to pull one URL from the Redis inbox. This is ignored if triggered by the dispatcher."), 'url_to_process': Param(None, type=["string", "null"], title="[Internal] URL from Dispatcher", description="This parameter is set by the dispatcher DAG and should not be used for manual runs."), 'worker_queue': Param(None, type=["string", "null"], title="[Internal] Worker Queue", description="This parameter is set by the dispatcher DAG and should not be used for manual runs."), + 'worker_index': Param(None, type=["integer", "null"], title="[Internal] Worker Index", description="A unique index for each parallel worker loop, assigned by the orchestrator."), + 'account_id': Param(None, type=["string", "null"], title="[Internal] Assigned Account ID", description="A specific account_id to use, making the account 'sticky' for a worker loop."), } ) as dag: initial_data = get_url_and_assign_account() @@ -1862,10 +2082,11 @@ with DAG( # --- Task Instantiation with TaskGroups --- # Main success/failure handlers (outside groups for clear end points) - fatal_error_task = handle_fatal_error() - report_failure_and_stop_task = report_failure_and_stop() - report_failure_task = report_failure_and_continue() - continue_loop_task = continue_processing_loop() + # These tasks are targets of branch operators that run after failures. + # They need trigger_rule='all_done' to run when the branch points to them. + fatal_error_task = handle_fatal_error.override(trigger_rule='all_done')() + report_failure_and_stop_task = report_failure_and_stop.override(trigger_rule='all_done')() + report_failure_task = report_failure_and_continue.override(trigger_rule='all_done')() unrecoverable_video_error_task = handle_unrecoverable_video_error() report_bannable_and_continue_task = report_bannable_and_continue() @@ -1877,10 +2098,7 @@ with DAG( ) # Tasks for the "stop_loop" policy on initial attempt - ban_and_report_immediately_task = ban_and_report_immediately.override(task_id='ban_and_report_immediately')( - initial_data=initial_data, - reason="Banned by Airflow worker (policy is stop_loop)" - ) + ban_and_report_immediately_task = ban_and_report_immediately.override(task_id='ban_and_report_immediately')() first_token_attempt >> initial_branch_task initial_branch_task >> [fatal_error_task, ban_and_report_immediately_task, unrecoverable_video_error_task, report_bannable_and_continue_task] @@ -1922,10 +2140,7 @@ with DAG( retry_branch_task = handle_retry_failure_branch.override(trigger_rule='one_failed')( task_id_to_check=retry_token_task.operator.task_id ) - ban_after_retry_report_task = ban_and_report_after_retry.override(task_id='ban_and_report_after_retry')( - retry_data=coalesced_retry_data, - reason="Banned by Airflow worker after failed retry" - ) + ban_after_retry_report_task = ban_and_report_after_retry.override(task_id='ban_and_report_after_retry', trigger_rule='all_done')() # Internal dependencies within retry group ban_and_retry_group >> after_ban_account_task @@ -2032,25 +2247,49 @@ with DAG( download_retry_token_task_result=new_token_data ) - # Final success task, fed by coalesced results - final_success_task = mark_url_as_success.override(task_id='final_success_report')( + # Instantiate final success task + final_success_task = mark_url_as_success( initial_data=initial_data, downloaded_file_paths=final_files, token_data=final_token ) - final_success_task >> continue_loop_task + + # Coalesce all paths that lead to the continuation of the loop. + @task(trigger_rule='one_success') + def coalesce_all_continue_paths(success_result=None, unrecoverable_result=None, bannable_result=None, failure_result=None, fatal_error_result=None): + """ + Gathers results from all possible paths that can continue the processing loop. + Only the success path provides data; others provide None. + """ + if fatal_error_result and isinstance(fatal_error_result, dict) and fatal_error_result.get('status') == 'fatal_error': + logger.error("Fatal error detected in coalesce_all_continue_paths. Will not continue processing loop.") + return {'status': 'fatal_error'} + + if success_result: + return success_result + return None + + final_data_for_loop = coalesce_all_continue_paths( + success_result=final_success_task, + unrecoverable_result=unrecoverable_video_error_task, + bannable_result=report_bannable_and_continue_task, + failure_result=report_failure_task, + fatal_error_result=fatal_error_task, + ) + + # Final task to trigger the next DAG run + continue_processing_loop(token_data=final_data_for_loop) + + # Final success task, fed by coalesced results + final_files >> final_success_task + final_token >> final_success_task # --- DAG Dependencies between TaskGroups --- # Initial attempt can lead to retry logic or direct failure initial_branch_task >> [retry_logic_group, fatal_error_task, ban_and_report_immediately_task, unrecoverable_video_error_task, report_bannable_and_continue_task] - + # Ban and report immediately leads to failure reporting ban_and_report_immediately_task >> report_failure_and_stop_task - - # Unrecoverable/bannable errors that don't stop the loop should continue processing - unrecoverable_video_error_task >> continue_loop_task - report_bannable_and_continue_task >> continue_loop_task - report_failure_task >> continue_loop_task # Connect download failure branch to the new retry group download_branch_task >> [retry_logic_for_download_group, report_failure_task, fatal_error_task, unrecoverable_video_error_task] @@ -2058,7 +2297,7 @@ with DAG( # Connect success paths to the coalescing tasks download_task >> final_files retry_download_task >> final_files - + # The token from the initial auth path is one input to the final token coalesce coalesce_token_data(get_token_result=first_token_attempt, retry_get_token_result=retry_token_task) >> final_token # The token from the download retry path is the other input diff --git a/airflow/dags/ytdlp_ops_v02_dispatcher_dl.py b/airflow/dags/ytdlp_ops_v02_dispatcher_dl.py index c1cdd12..78614ef 100644 --- a/airflow/dags/ytdlp_ops_v02_dispatcher_dl.py +++ b/airflow/dags/ytdlp_ops_v02_dispatcher_dl.py @@ -27,34 +27,22 @@ DEFAULT_REDIS_CONN_ID = 'redis_default' @task(queue='queue-dl') def dispatch_job_to_dl_worker(**context): """ - Pulls one job payload from Redis, determines the current worker's dedicated queue, - and triggers the download worker DAG to process the job on that specific queue. + Triggers a v2 download worker for the 'profile-first' model. + The worker itself is responsible for locking a profile and finding a suitable task. + This dispatcher simply starts a worker process. """ ti = context['task_instance'] logger.info(f"Download Dispatcher task '{ti.task_id}' running on queue '{ti.queue}'.") - params = context['params'] - redis_conn_id = params['redis_conn_id'] - queue_name = params['queue_name'] - inbox_queue = f"{queue_name}_inbox" - - logger.info(f"Attempting to pull one job from Redis queue '{inbox_queue}'...") - client = _get_redis_client(redis_conn_id) - job_bytes = client.lpop(inbox_queue) - - if not job_bytes: - logger.info("Redis download inbox queue is empty. No work to dispatch. Skipping task.") - raise AirflowSkipException("Redis download inbox queue is empty. No work to dispatch.") - - job_data_str = job_bytes.decode('utf-8') - logger.info(f"Pulled job from the queue.") # Determine the worker-specific queue for affinity hostname = socket.gethostname() worker_queue = f"queue-dl-{hostname}" - logger.info(f"Running on worker '{hostname}'. Dispatching job to its dedicated queue '{worker_queue}'.") + logger.info(f"Running on worker '{hostname}'. Dispatching a new profile-first worker instance to its dedicated queue '{worker_queue}'.") - conf_to_pass = {**params, 'job_data': job_data_str, 'worker_queue': worker_queue} + # Pass all orchestrator params, but remove job_data as the worker finds its own job. + conf_to_pass = {**params, 'worker_queue': worker_queue} + conf_to_pass.pop('job_data', None) run_id = f"worker_run_dl_{context['dag_run'].run_id}_{context['ts_nodash']}_q_{worker_queue}" @@ -75,10 +63,12 @@ with DAG( tags=['ytdlp', 'worker', 'dispatcher', 'download'], is_paused_upon_creation=True, doc_md=""" - ### YT-DLP Download Job Dispatcher + ### YT-DLP v2 Download Worker Dispatcher (Profile-First) - This DAG dispatches a single download job to a download worker with a pinned queue. - It pulls a JSON payload from the `queue2_dl_inbox` Redis queue and triggers the `ytdlp_ops_v02_worker_per_url_dl` DAG. + This DAG dispatches a single "profile-first" download worker. + It does **not** pull a job from a queue. Instead, it triggers the `ytdlp_ops_v02_worker_per_url_dl` DAG, + which is responsible for locking an available download profile and then finding a matching task + from the `queue_dl_format_tasks` Redis list. """, render_template_as_native_obj=True, params={ diff --git a/airflow/dags/ytdlp_ops_v02_orchestrator_auth.py b/airflow/dags/ytdlp_ops_v02_orchestrator_auth.py index 7039cd2..02eb52e 100644 --- a/airflow/dags/ytdlp_ops_v02_orchestrator_auth.py +++ b/airflow/dags/ytdlp_ops_v02_orchestrator_auth.py @@ -24,6 +24,12 @@ import random import time import json +# --- Add project root to path to allow for yt-ops-client imports --- +import sys +# The yt-ops-client package is installed in editable mode in /app +if '/app' not in sys.path: + sys.path.insert(0, '/app') + # Import utility functions from utils.redis_utils import _get_redis_client @@ -45,6 +51,66 @@ DEFAULT_BUNCH_DELAY_S = 1 DEFAULT_YT_AUTH_SERVICE_IP = Variable.get("YT_AUTH_SERVICE_IP", default_var="172.17.0.1") DEFAULT_YT_AUTH_SERVICE_PORT = Variable.get("YT_AUTH_SERVICE_PORT", default_var=9080) +# Default ytdlp.json content for the unified config parameter +DEFAULT_YTDLP_CONFIG = { + "ytops": { + "force_renew": [], + "session_params": { + # "visitor_rotation_threshold": 250 + } + }, + "ytdlp_params": { + "debug_printtraffic": True, + "write_pages": True, + "verbose": True, + "no_color": True, + "ignoreerrors": True, + "noresizebuffer": True, + "buffersize": "4M", + "concurrent_fragments": 8, + "socket_timeout": 60, + "outtmpl": { + "default": "%(id)s.f%(format_id)s.%(ext)s" + }, + "restrictfilenames": True, + "updatetime": False, + "noplaylist": True, + "match_filter": "!is_live", + "writeinfojson": True, + "skip_download": True, + "allow_playlist_files": False, + "clean_infojson": True, + "getcomments": False, + "writesubtitles": False, + "writethumbnail": False, + "sleep_interval_requests": 0.75, + "parse_metadata": [ + ":(?P)" + ], + "extractor_args": { + "youtube": { + "player_client": ["tv_simply"], + "formats": ["duplicate"], + "jsc_trace": ["true"], + "pot_trace": ["true"], + "skip": ["translated_subs", "hls"] + }, + "youtubepot-bgutilhttp": { + "base_url": ["http://172.17.0.1:4416"] + } + }, + "noprogress": True, + "format_sort": [ + "res", + "ext:mp4:m4a" + ], + "remuxvideo": "mp4", + "nooverwrites": True, + "continuedl": True + } +} + + # --- Helper Functions --- def _check_application_queue(redis_client, queue_base_name: str) -> int: @@ -153,27 +219,21 @@ def orchestrate_workers_ignition_callable(**context): dag_run_id = context['dag_run'].run_id total_triggered = 0 - # --- Generate a consistent timestamped prefix for this orchestrator run --- - # This ensures all workers spawned from this run use the same set of accounts. - final_account_pool_prefix = params['account_pool'] - if params.get('prepend_client_to_account') and params.get('account_pool_size') is not None: - clients_str = params.get('clients', '') - primary_client = clients_str.split(',')[0].strip() if clients_str else 'unknown' - # Use a timestamp from the orchestrator's run for consistency - timestamp = datetime.now().strftime('%Y%m%d%H%M%S') - final_account_pool_prefix = f"{params['account_pool']}_{timestamp}_{primary_client}" - logger.info(f"Generated consistent account prefix for this run: '{final_account_pool_prefix}'") + # --- End of Inspection --- + + logger.info(f"Plan: Triggering {total_workers} total dispatcher runs in {len(bunches)} bunches. Each run will attempt to process one URL.") + + dag_run_id = context['dag_run'].run_id + total_triggered = 0 for i, bunch in enumerate(bunches): logger.info(f"--- Triggering Bunch {i+1}/{len(bunches)} (contains {len(bunch)} dispatcher(s)) ---") - for j, _ in enumerate(bunch): + for j, worker_index in enumerate(bunch): # Create a unique run_id for each dispatcher run run_id = f"dispatched_{dag_run_id}_{total_triggered}" # Pass all orchestrator params to the dispatcher, which will then pass them to the worker. conf_to_pass = {p: params[p] for p in params} - # Override account_pool with the generated prefix - conf_to_pass['account_pool'] = final_account_pool_prefix logger.info(f"Triggering dispatcher {j+1}/{len(bunch)} in bunch {i+1} (run {total_triggered + 1}/{total_workers}) (Run ID: {run_id})") logger.debug(f"Full conf for dispatcher run {run_id}: {conf_to_pass}") @@ -259,36 +319,33 @@ with DAG( 'delay_between_bunches_s': Param(DEFAULT_BUNCH_DELAY_S, type="integer", description="Delay in seconds between starting each bunch."), 'skip_if_queue_empty': Param(False, type="boolean", title="[Ignition Control] Skip if Queue Empty", description="If True, the orchestrator will not start any dispatchers if the application's work queue is empty."), + # --- Unified Worker Configuration --- + 'ytdlp_config_json': Param( + json.dumps(DEFAULT_YTDLP_CONFIG, indent=2), + type="string", + title="[Worker Param] Unified yt-dlp JSON Config", + description="A JSON string containing all parameters for both yt-ops-server and the yt-dlp downloaders. This is the primary way to configure workers.", + **{'ui_widget': 'json', 'multi_line': True} + ), + # --- Worker Passthrough Parameters --- - 'on_bannable_failure': Param( - 'proceed_loop_under_manual_inspection', - type="string", - enum=['stop_loop', 'retry_with_new_account', 'retry_without_ban', 'retry_and_ban_account_only', 'retry_on_connection_error', 'proceed_loop_under_manual_inspection', 'stop_loop_on_auth_proceed_on_download_error'], - title="[Worker Param] On Bannable Failure Policy", - description="Policy for a worker when a bannable error occurs. " - "'stop_loop': Ban the account, mark URL as failed, and stop the worker's loop on any failure (auth or download). " - "'retry_with_new_account': Ban the failed account, retry ONCE with a new account. If retry fails, ban the second account and proxy, then stop." - "'retry_on_connection_error': If a connection error (e.g. SOCKS timeout) occurs, retry with a new account but do NOT ban the first account/proxy. If retry fails, stop the loop without banning." - "'proceed_loop_under_manual_inspection': **BEWARE: MANUAL SUPERVISION REQUIRED.** Marks the URL as failed but continues the processing loop. Use this only when you can manually intervene by pausing the dispatcher DAG or creating a lock file (`/opt/airflow/inputfiles/AIRFLOW.PREVENT_URL_PULL.lockfile`) to prevent a runaway failure loop." - "'stop_loop_on_auth_proceed_on_download_error': **(Default)** Stops the loop on an authentication/token error (like 'stop_loop'), but continues the loop on a download/probe error (like 'proceed...')." - ), - 'request_params_json': Param('{}', type="string", title="[Worker Param] Request Params JSON", description="JSON string with per-request parameters to override server defaults. Can be a full JSON object or comma-separated key=value pairs (e.g., 'session_params.location=DE,ytdlp_params.skip_cache=true')."), - 'language_code': Param('en-US', type="string", title="[Worker Param] Language Code", description="The language code (e.g., 'en-US', 'de-DE') to use for the YouTube request headers."), + # --- V2 Profile Management Parameters --- + 'redis_env': Param("sim_auth", type="string", title="[V2 Profiles] Redis Environment", description="The environment for v2 profile management (e.g., 'sim_auth'). Determines the Redis key prefix."), + 'profile_prefix': Param("auth_user", type="string", title="[V2 Profiles] Profile Prefix", description="The prefix for auth profiles that workers should attempt to lock."), + + # --- Worker Passthrough Parameters --- + 'on_bannable_failure': Param('proceed_loop_under_manual_inspection', type="string", title="DEPRECATED: Worker handles failures internally."), 'redis_conn_id': Param(DEFAULT_REDIS_CONN_ID, type="string", description="[Worker Param] Airflow Redis connection ID."), - 'clients': Param( - 'tv_simply', - type="string", - title="[Worker Param] Clients", - description="[Worker Param] Comma-separated list of clients for token generation. Full list: web, web_safari, web_embedded, web_music, web_creator, mweb, web_camoufox, web_safari_camoufox, web_embedded_camoufox, web_music_camoufox, web_creator_camoufox, mweb_camoufox, android, android_music, android_creator, android_vr, ios, ios_music, ios_creator, tv, tv_simply, tv_embedded. See DAG documentation for details." - ), - 'account_pool': Param('ytdlp_account', type="string", description="[Worker Param] Account pool prefix or comma-separated list."), - 'account_pool_size': Param(10, type=["integer", "null"], description="[Worker Param] If using a prefix for 'account_pool', this specifies the number of accounts to generate (e.g., 10 for 'prefix_01' through 'prefix_10'). Required when using a prefix."), - 'prepend_client_to_account': Param(True, type="boolean", title="[Worker Param] Prepend Client to Account", description="If True, prepends client and timestamp to account names in prefix mode. Format: prefix_YYYYMMDDHHMMSS_client_XX."), 'service_ip': Param(DEFAULT_YT_AUTH_SERVICE_IP, type="string", description="[Worker Param] IP of the ytdlp-ops-server. Default is from Airflow variable YT_AUTH_SERVICE_IP or hardcoded."), 'service_port': Param(DEFAULT_YT_AUTH_SERVICE_PORT, type="integer", description="[Worker Param] Port of the Envoy load balancer. Default is from Airflow variable YT_AUTH_SERVICE_PORT or hardcoded."), 'machine_id': Param("ytdlp-ops-airflow-service", type="string", description="[Worker Param] Identifier for the client machine."), - 'assigned_proxy_url': Param(None, type=["string", "null"], title="[Worker Param] Assigned Proxy URL", description="If provided, forces the token service to use this specific proxy for the request."), - 'auto_create_new_accounts_on_exhaustion': Param(True, type="boolean", description="[Worker Param] If True and all accounts in a prefix-based pool are exhausted, create a new one automatically."), + + # --- DEPRECATED PARAMS --- + 'account_pool': Param('ytdlp_account', type="string", description="DEPRECATED: Use profile_prefix instead."), + 'account_pool_size': Param(10, type=["integer", "null"], description="DEPRECATED: Pool size is managed in Redis."), + 'prepend_client_to_account': Param(True, type="boolean", description="DEPRECATED"), + 'assigned_proxy_url': Param(None, type=["string", "null"], description="DEPRECATED: Proxy is determined by the locked profile."), + 'auto_create_new_accounts_on_exhaustion': Param(True, type="boolean", description="DEPRECATED"), } ) as dag: diff --git a/airflow/dags/ytdlp_ops_v02_orchestrator_dl.py b/airflow/dags/ytdlp_ops_v02_orchestrator_dl.py index 54c7499..e478bbc 100644 --- a/airflow/dags/ytdlp_ops_v02_orchestrator_dl.py +++ b/airflow/dags/ytdlp_ops_v02_orchestrator_dl.py @@ -24,6 +24,12 @@ import random import time import json +# --- Add project root to path to allow for yt-ops-client imports --- +import sys +# The yt-ops-client package is installed in editable mode in /app +if '/app' not in sys.path: + sys.path.insert(0, '/app') + # Import utility functions from utils.redis_utils import _get_redis_client @@ -242,6 +248,11 @@ with DAG( 'delay_between_workers_s': Param(DEFAULT_WORKER_DELAY_S, type="integer", description="Delay in seconds between starting each dispatcher within a bunch."), 'delay_between_bunches_s': Param(DEFAULT_BUNCH_DELAY_S, type="integer", description="Delay in seconds between starting each bunch."), 'skip_if_queue_empty': Param(False, type="boolean", title="[Ignition Control] Skip if Queue Empty", description="If True, the orchestrator will not start any dispatchers if the application's work queue is empty."), + + # --- V2 Profile Management Parameters --- + 'redis_env': Param("sim_download", type="string", title="[V2 Profiles] Redis Environment", description="The environment for v2 profile management (e.g., 'sim_download'). Determines the Redis key prefix."), + 'profile_prefix': Param("download_user", type="string", title="[V2 Profiles] Profile Prefix", description="The prefix for download profiles that workers should attempt to lock."), + 'redis_conn_id': Param(DEFAULT_REDIS_CONN_ID, type="string", description="[Worker Param] Airflow Redis connection ID."), 'clients': Param('mweb,web_camoufox,tv', type="string", title="[Worker Param] Clients", description="Comma-separated list of clients for token generation. e.g. mweb,tv,web_camoufox"), @@ -250,16 +261,17 @@ with DAG( 'yt_dlp_test_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Test Mode", description="If True, runs yt-dlp with --test flag (dry run without downloading)."), 'skip_probe': Param(True, type="boolean", title="[Worker Param] Skip Probe", description="If True, skips the ffmpeg probe of downloaded files."), 'yt_dlp_cleanup_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Cleanup Mode", description="If True, creates a .empty file and deletes the original media file after successful download and probe."), - 'fragment_retries': Param(2, type="integer", title="[Worker Param] Fragment Retries", description="Number of retries for a fragment before giving up."), - 'limit_rate': Param('5M', type=["string", "null"], title="[Worker Param] Limit Rate", description="Download speed limit (e.g., 50K, 4.2M)."), - 'socket_timeout': Param(15, type="integer", title="[Worker Param] Socket Timeout", description="Timeout in seconds for socket operations."), - 'min_sleep_interval': Param(5, type="integer", title="[Worker Param] Min Sleep Interval", description="Minimum time to sleep between downloads (seconds)."), - 'max_sleep_interval': Param(10, type="integer", title="[Worker Param] Max Sleep Interval", description="Maximum time to sleep between downloads (seconds)."), 'download_format': Param( 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best', type="string", title="[Worker Param] Download Format", - description="Custom yt-dlp format string. Common presets: [1] 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' (Default, best quality MP4). [2] '18-dashy/18,140-dashy/140,133-dashy/134-dashy/136-dashy/137-dashy/250-dashy/298-dashy/299-dashy' (Legacy formats). [3] '299-dashy/298-dashy/250-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy' (High-framerate formats)." + description="Custom yt-dlp format string. Common presets: [1] 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' (Default, best quality MP4). [2] '18,140-dashy/140,133-dashy/134-dashy/136-dashy/137-dashy/298-dashy/299-dashy' (Legacy formats). [3] '299-dashy/298-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy' (High-framerate formats)." + ), + 'pass_without_formats_splitting': Param( + False, + type="boolean", + title="[Worker Param] Pass format string without splitting", + description="If True, passes the entire 'download_format' string to the download tool as-is. This is for complex selectors. Not compatible with 'aria-rpc' downloader." ), 'downloader': Param( 'cli', @@ -272,7 +284,7 @@ with DAG( 'aria_port': Param(6800, type="integer", title="[Worker Param] Aria2c Port", description="For 'aria-rpc' downloader: Port of the aria2c RPC server. Can be set via Airflow Variable 'YTDLP_ARIA_PORT'."), 'aria_secret': Param('SQGCQPLVFQIASMPNPOJYLVGJYLMIDIXDXAIXOTX', type="string", title="[Worker Param] Aria2c Secret", description="For 'aria-rpc' downloader: Secret token. Can be set via Airflow Variable 'YTDLP_ARIA_SECRET'."), 'yt_dlp_extra_args': Param( - '--no-part --restrict-filenames', + '--verbose --no-resize-buffer --buffer-size 4M --fragment-retries 2 --concurrent-fragments 8 --socket-timeout 15 --sleep-interval 5 --max-sleep-interval 10 --no-part --restrict-filenames', type=["string", "null"], title="[Worker Param] Extra yt-dlp arguments", description="Extra command-line arguments for yt-dlp during download." diff --git a/airflow/dags/ytdlp_ops_v02_worker_per_url_auth.py b/airflow/dags/ytdlp_ops_v02_worker_per_url_auth.py index daa81c0..4b6a9f6 100644 --- a/airflow/dags/ytdlp_ops_v02_worker_per_url_auth.py +++ b/airflow/dags/ytdlp_ops_v02_worker_per_url_auth.py @@ -24,7 +24,6 @@ from airflow.operators.dummy import DummyOperator from airflow.utils.dates import days_ago from airflow.utils.task_group import TaskGroup from airflow.api.common.trigger_dag import trigger_dag -import copy from datetime import datetime, timedelta import concurrent.futures import json @@ -50,63 +49,12 @@ from thrift.transport.TTransport import TTransportException # Configure logging logger = logging.getLogger(__name__) - -# --- Client Stats Helper --- - -def _update_client_stats(redis_client, clients_str: str, status: str, url: str, machine_id: str, dag_run_id: str): - """Updates success/failure statistics for a client type in Redis.""" - if not clients_str: - logger.warning("Cannot update client stats: 'clients' string is empty.") - return - - # Assumption: The service tries clients in the order provided. - # We attribute the result to the first client in the list. - primary_client = clients_str.split(',')[0].strip() - if not primary_client: - logger.warning("Cannot update client stats: could not determine primary client.") - return - - stats_key = "client_stats" - - try: - # Using a pipeline with WATCH for safe concurrent updates. - with redis_client.pipeline() as pipe: - pipe.watch(stats_key) - - current_stats_json = redis_client.hget(stats_key, primary_client) - stats = {} - if current_stats_json: - try: - stats = json.loads(current_stats_json) - except json.JSONDecodeError: - logger.warning(f"Could not parse existing stats for client '{primary_client}'. Resetting stats.") - stats = {} - - stats.setdefault('success_count', 0) - stats.setdefault('failure_count', 0) - - details = { - 'timestamp': time.time(), 'url': url, - 'machine_id': machine_id, 'dag_run_id': dag_run_id, - } - - if status == 'success': - stats['success_count'] += 1 - stats['latest_success'] = details - elif status == 'failure': - stats['failure_count'] += 1 - stats['latest_failure'] = details - - pipe.multi() - pipe.hset(stats_key, primary_client, json.dumps(stats)) - pipe.execute() - - logger.info(f"Successfully updated '{status}' stats for client '{primary_client}'.") - - except redis.exceptions.WatchError: - logger.warning(f"WatchError updating stats for client '{primary_client}'. Another process updated it. Skipping this update.") - except Exception as e: - logger.error(f"Failed to update client stats for '{primary_client}': {e}", exc_info=True) +# ytops_client imports for v2 profile management +try: + from ytops_client.profile_manager_tool import ProfileManager, format_duration, format_timestamp +except ImportError as e: + logger.critical(f"Could not import ytops_client modules: {e}. Ensure yt-ops-client package is installed correctly in Airflow's environment.") + raise # Default settings from Airflow Variables or hardcoded fallbacks @@ -192,60 +140,6 @@ def _extract_video_id(url): return match.group(1) return None -def _get_account_pool(params: dict) -> list: - """ - Gets the list of accounts to use for processing, filtering out banned/resting accounts. - Supports explicit list, prefix-based generation, and single account modes. - """ - account_pool_str = params.get('account_pool', 'default_account') - accounts = [] - is_prefix_mode = False - - if ',' in account_pool_str: - accounts = [acc.strip() for acc in account_pool_str.split(',') if acc.strip()] - else: - prefix = account_pool_str - pool_size_param = params.get('account_pool_size') - if pool_size_param is not None: - is_prefix_mode = True - pool_size = int(pool_size_param) - - # The orchestrator now generates the full prefix if prepend_client_to_account is True. - # The worker just appends the numbers. - accounts = [f"{prefix}_{i:02d}" for i in range(1, pool_size + 1)] - else: - accounts = [prefix] - - if not accounts: - raise AirflowException("Initial account pool is empty.") - - redis_conn_id = params.get('redis_conn_id', DEFAULT_REDIS_CONN_ID) - try: - redis_client = _get_redis_client(redis_conn_id) - active_accounts = [] - for account in accounts: - status_bytes = redis_client.hget(f"account_status:{account}", "status") - status = status_bytes.decode('utf-8') if status_bytes else "ACTIVE" - if status not in ['BANNED'] and 'RESTING' not in status: - active_accounts.append(account) - - if not active_accounts and accounts: - auto_create = params.get('auto_create_new_accounts_on_exhaustion', False) - if auto_create and is_prefix_mode: - new_account_id = f"{account_pool_str}-auto-{str(uuid.uuid4())[:8]}" - logger.warning(f"Account pool exhausted. Auto-creating new account: '{new_account_id}'") - active_accounts.append(new_account_id) - else: - raise AirflowException("All accounts in the configured pool are currently exhausted.") - accounts = active_accounts - except Exception as e: - logger.error(f"Could not filter accounts from Redis. Using unfiltered pool. Error: {e}", exc_info=True) - - if not accounts: - raise AirflowException("Account pool is empty after filtering.") - - logger.info(f"Final active account pool with {len(accounts)} accounts.") - return accounts @task def list_available_formats(token_data: dict, **context): @@ -306,15 +200,61 @@ def list_available_formats(token_data: dict, **context): # TASK DEFINITIONS (TaskFlow API) # ============================================================================= +def _resolve_formats(info_json_path: str, format_selector: str, logger) -> list[str]: + """Uses yt-dlp to resolve a format selector into a list of specific format IDs.""" + import subprocess + import shlex + + if not format_selector: + return [] + + try: + cmd = [ + 'yt-dlp', '--print', 'format_id', + '-f', format_selector, + '--load-info-json', info_json_path, + ] + + copy_paste_cmd = ' '.join(shlex.quote(arg) for arg in cmd) + logger.info(f"Resolving format selector '{format_selector}' with command: {copy_paste_cmd}") + + process = subprocess.run(cmd, capture_output=True, text=True, timeout=60) + + if process.stderr: + logger.info(f"yt-dlp format resolver STDERR:\n{process.stderr}") + + if process.returncode != 0: + logger.error(f"yt-dlp format resolver failed with exit code {process.returncode}") + return [] + + output_ids = [fid for fid in process.stdout.strip().split('\n') if fid] + final_ids = [] + for fid in output_ids: + final_ids.extend(fid.split('+')) + + logger.info(f"Resolved selector '{format_selector}' to {len(final_ids)} format(s): {final_ids}") + return final_ids + + except Exception as e: + logger.error(f"An error occurred while resolving format selector: {e}", exc_info=True) + return [] + + @task -def get_url_and_assign_account(**context): +def get_url_and_lock_profile(**context): """ - Gets the URL to process from the DAG run configuration and assigns an active account. + Gets the URL to process, then locks an available auth profile from the Redis pool. This is the first task in the pinned-worker DAG. """ params = context['params'] ti = context['task_instance'] + # Log the active policies + auth_policy = params.get('on_bannable_failure', 'not_set') + logger.info(f"--- Worker Policies ---") + logger.info(f" Auth Failure Policy: {auth_policy}") + logger.info(f"-----------------------") + # --- Worker Pinning Verification --- # This is a safeguard against a known Airflow issue where clearing a task # can cause the task_instance_mutation_hook to be skipped, breaking pinning. @@ -384,603 +324,285 @@ def get_url_and_assign_account(**context): except Exception as e: logger.error(f"Could not mark URL as in-progress in Redis: {e}", exc_info=True) - # Account assignment logic is the same as before. - account_id = random.choice(_get_account_pool(params)) - logger.info(f"Selected account '{account_id}' for this run.") + # V2 Profile Locking + redis_conn_id = params['redis_conn_id'] + redis_env = params['redis_env'] + profile_prefix = params['profile_prefix'] + + try: + redis_hook = _get_redis_client(redis_conn_id, return_hook=True) + key_prefix = f"{redis_env}_profile_mgmt_" + pm = ProfileManager(redis_hook=redis_hook, key_prefix=key_prefix) + logger.info(f"Initialized ProfileManager for env '{redis_env}' (Redis key prefix: '{key_prefix}')") + except Exception as e: + raise AirflowException(f"Failed to initialize ProfileManager: {e}") + + owner_id = f"airflow_auth_worker_{context['dag_run'].run_id}" + locked_profile = None + logger.info(f"Attempting to lock a profile with owner '{owner_id}' and prefix '{profile_prefix}'...") + + lock_attempts = 0 + while not locked_profile: + locked_profile = pm.lock_profile(owner=owner_id, profile_prefix=profile_prefix) + if not locked_profile: + logger.info("No auth profiles available to lock. Waiting for 15 seconds...") + time.sleep(15) + lock_attempts += 1 + if lock_attempts > 20: # 5 minutes timeout + raise AirflowException("Timed out waiting to lock an auth profile.") + + logger.info(f"Successfully locked profile: {locked_profile['name']}") return { 'url_to_process': url_to_process, - 'account_id': account_id, - 'accounts_tried': [account_id], + 'locked_profile': locked_profile, } @task def get_token(initial_data: dict, **context): - """Makes a single attempt to get a token by calling the ytops-client get-info tool.""" - import subprocess - import shlex - + """Makes a single attempt to get a token by calling the Thrift service directly.""" ti = context['task_instance'] params = context['params'] - account_id = initial_data['account_id'] + locked_profile = initial_data['locked_profile'] + account_id = locked_profile['name'] + assigned_proxy_url = locked_profile['proxy'] url = initial_data['url_to_process'] info_json_dir = os.path.join(Variable.get('DOWNLOADS_TEMP', '/opt/airflow/downloadfiles'), 'videos', 'in-progress') host, port = params['service_ip'], int(params['service_port']) machine_id = params.get('machine_id') or socket.gethostname() - clients = params.get('clients') - request_params_json = params.get('request_params_json') - language_code = params.get('language_code') - assigned_proxy_url = params.get('assigned_proxy_url') - if language_code: - try: - params_dict = json.loads(request_params_json) - if not params_dict: - params_dict = copy.deepcopy(DEFAULT_REQUEST_PARAMS) - - logger.info(f"Setting language for request: {language_code}") - if 'session_params' not in params_dict: - params_dict['session_params'] = {} - params_dict['session_params']['lang'] = language_code - request_params_json = json.dumps(params_dict) - except (json.JSONDecodeError, TypeError): - logger.warning("Could not parse request_params_json as JSON. Treating as key=value pairs and appending language code.") - lang_kv = f"session_params.lang={language_code}" - if request_params_json: - request_params_json += f",{lang_kv}" - else: - request_params_json = lang_kv + # The unified JSON config is now the primary source of parameters. + request_params_json = params.get('ytdlp_config_json', '{}') + clients = None # This will be read from the JSON config on the server side. video_id = _extract_video_id(url) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") job_dir_name = f"{timestamp}-{video_id or 'unknown'}" job_dir_path = os.path.join(info_json_dir, job_dir_name) os.makedirs(job_dir_path, exist_ok=True) - info_json_filename = f"info_{video_id or 'unknown'}_{account_id}_{timestamp}.json" - info_json_path = os.path.join(job_dir_path, info_json_filename) + info_json_path = os.path.join(job_dir_path, f"info_{video_id or 'unknown'}_{account_id}_{timestamp}.json") - cmd = [ - 'ytops-client', 'get-info', - '--host', host, - '--port', str(port), - '--profile', account_id, - '--output', info_json_path, - '--print-proxy', - '--verbose', - '--log-return', - ] + # Save the received JSON config to the job directory for the download worker. + ytdlp_config_path = os.path.join(job_dir_path, 'ytdlp.json') + try: + with open(ytdlp_config_path, 'w', encoding='utf-8') as f: + # Pretty-print the JSON for readability + config_data = json.loads(request_params_json) + json.dump(config_data, f, indent=2) + logger.info(f"Saved ytdlp config to {ytdlp_config_path}") + except (IOError, json.JSONDecodeError) as e: + logger.error(f"Failed to save ytdlp.json config: {e}") + # Continue anyway, but download worker may fail. + ytdlp_config_path = None - if clients: - cmd.extend(['--client', clients]) - if machine_id: - cmd.extend(['--machine-id', machine_id]) - if request_params_json and request_params_json != '{}': - cmd.extend(['--request-params-json', request_params_json]) - if assigned_proxy_url: - cmd.extend(['--assigned-proxy-url', assigned_proxy_url]) - - cmd.append(url) - logger.info(f"--- Attempting to get token for URL '{url}' with account '{account_id}' (Clients: {clients}) ---") - copy_paste_cmd = ' '.join(shlex.quote(arg) for arg in cmd) - logger.info(f"Executing command: {copy_paste_cmd}") + client, transport = None, None + try: + timeout = int(params.get('timeout', DEFAULT_TIMEOUT)) + client, transport = _get_thrift_client(host, port, timeout) - process = subprocess.run(cmd, capture_output=True, text=True, timeout=int(params.get('timeout', DEFAULT_TIMEOUT))) + airflow_log_context = AirflowLogContext( + taskId=ti.task_id, + runId=ti.run_id, + tryNumber=ti.try_number + ) - if process.stdout: - logger.info(f"ytops-client STDOUT:\n{process.stdout}") - if process.stderr: - logger.info(f"ytops-client STDERR:\n{process.stderr}") - - if process.returncode != 0: - error_message = "ytops-client failed. See logs for details." - # Try to find a more specific error message from the Thrift client's output - thrift_error_match = re.search(r'A Thrift error occurred: (.*)', process.stderr) - if thrift_error_match: - error_message = thrift_error_match.group(1).strip() - else: # Fallback to old line-by-line parsing - for line in reversed(process.stderr.strip().split('\n')): - if 'ERROR' in line or 'Thrift error' in line or 'Connection to server failed' in line: - error_message = line.strip() - break + logger.info(f"--- Attempting to get token for URL '{url}' with account '{account_id}' (Clients: {clients}, Proxy: {assigned_proxy_url or 'any'}) ---") - # Determine error code for branching logic - error_code = 'GET_INFO_CLIENT_FAIL' - stderr_lower = process.stderr.lower() + token_data = client.getOrRefreshToken( + accountId=account_id, + updateType=TokenUpdateMode.AUTO, + url=url, + clients=clients, + machineId=machine_id, + airflowLogContext=airflow_log_context, + requestParamsJson=request_params_json, + assignedProxyUrl=assigned_proxy_url + ) - # These patterns should match the error codes from PBUserException and others - error_patterns = { - "BOT_DETECTED": ["bot_detected"], - "BOT_DETECTION_SIGN_IN_REQUIRED": ["bot_detection_sign_in_required"], - "TRANSPORT_ERROR": ["connection to server failed"], - "PRIVATE_VIDEO": ["private video"], - "COPYRIGHT_REMOVAL": ["copyright"], - "GEO_RESTRICTED": ["in your country"], - "VIDEO_REMOVED": ["video has been removed"], - "VIDEO_UNAVAILABLE": ["video unavailable"], - "MEMBERS_ONLY": ["members-only"], - "AGE_GATED_SIGN_IN": ["sign in to confirm your age"], - "VIDEO_PROCESSING": ["processing this video"], + # --- Log server-side details for debugging --- + if hasattr(token_data, 'serverVersionInfo') and token_data.serverVersionInfo: + logger.info(f"--- Server Version Info ---\n{token_data.serverVersionInfo}") + + if hasattr(token_data, 'requestSummary') and token_data.requestSummary: + try: + summary_data = json.loads(token_data.requestSummary) + summary_text = summary_data.get('summary', 'Not available.') + prefetch_log = summary_data.get('prefetch_log', 'Not available.') + nodejs_log = summary_data.get('nodejs_log', 'Not available.') + ytdlp_log = summary_data.get('ytdlp_log', 'Not available.') + + logger.info(f"--- Request Summary ---\n{summary_text}") + logger.info(f"--- Prefetch Log ---\n{prefetch_log}") + logger.info(f"--- Node.js Log ---\n{nodejs_log}") + logger.info(f"--- yt-dlp Log ---\n{ytdlp_log}") + except (json.JSONDecodeError, AttributeError): + logger.info(f"--- Raw Request Summary (could not parse JSON) ---\n{token_data.requestSummary}") + + if hasattr(token_data, 'communicationLogPaths') and token_data.communicationLogPaths: + logger.info("--- Communication Log Paths on Server ---") + for log_path in token_data.communicationLogPaths: + logger.info(f" - {log_path}") + # --- End of server-side logging --- + + if not token_data or not token_data.infoJson: + raise AirflowException("Thrift service did not return valid info.json data.") + + # Save info.json to file + with open(info_json_path, 'w', encoding='utf-8') as f: + f.write(token_data.infoJson) + + proxy = token_data.socks + + # Rename file with proxy + final_info_json_path = info_json_path + if proxy: + sanitized_proxy = proxy.replace('://', '---') + new_filename = f"info_{video_id or 'unknown'}_{account_id}_{timestamp}_proxy_{sanitized_proxy}.json" + new_path = os.path.join(job_dir_path, new_filename) + try: + os.rename(info_json_path, new_path) + final_info_json_path = new_path + logger.info(f"Renamed info.json to include proxy: {new_path}") + except OSError as e: + logger.error(f"Failed to rename info.json to include proxy: {e}. Using original path.") + + return { + 'info_json_path': final_info_json_path, + 'job_dir_path': job_dir_path, + 'socks_proxy': proxy, + 'ytdlp_command': None, + 'successful_account_id': account_id, + 'original_url': url, + 'ytdlp_config_path': ytdlp_config_path, + 'ytdlp_config_json': request_params_json, + # Pass locked profile through for unlock/activity tasks + 'locked_profile': locked_profile, } - for code, patterns in error_patterns.items(): - if any(p in stderr_lower for p in patterns): - error_code = code - break # Found a match, stop searching + except (PBServiceException, PBUserException) as e: + error_message = e.message or "Unknown Thrift error" + error_code = getattr(e, 'errorCode', 'THRIFT_ERROR') + # If a "Video unavailable" error mentions rate-limiting, it's a form of bot detection. + if error_code == 'VIDEO_UNAVAILABLE' and 'rate-limited' in error_message.lower(): + logger.warning("Re-classifying rate-limit-related 'VIDEO_UNAVAILABLE' error as 'BOT_DETECTED'.") + error_code = 'BOT_DETECTED' + + unrecoverable_video_errors = [ + "AGE_GATED_SIGN_IN", "MEMBERS_ONLY", "VIDEO_PROCESSING", "COPYRIGHT_REMOVAL", + "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED" + ] + + if error_code in unrecoverable_video_errors: + error_details = { + 'error_message': error_message, + 'error_code': error_code, + 'proxy_url': None + } + ti.xcom_push(key='error_details', value=error_details) + logger.warning(f"Unrecoverable video error '{error_code}' - {error_message}. Marking for skip without failing the task.") + return {'status': 'unrecoverable_video_error'} + else: + logger.error(f"Thrift error getting token: {error_code} - {error_message}") + + error_details = { + 'error_message': error_message, + 'error_code': error_code, + 'proxy_url': None + } + ti.xcom_push(key='error_details', value=error_details) + raise AirflowException(f"ytops-client get-info failed: {error_message}") + except TTransportException as e: + logger.error(f"Thrift transport error: {e}", exc_info=True) error_details = { - 'error_message': error_message, - 'error_code': error_code, + 'error_message': f"Thrift transport error: {e}", + 'error_code': 'TRANSPORT_ERROR', 'proxy_url': None } ti.xcom_push(key='error_details', value=error_details) - raise AirflowException(f"ytops-client get-info failed: {error_message}") - - proxy = None - proxy_match = re.search(r"Proxy used: (.*)", process.stderr) - if proxy_match: - proxy = proxy_match.group(1).strip() - - # Rename the info.json to include the proxy for the download worker - final_info_json_path = info_json_path - if proxy: - # Sanitize for filename: replace '://' which is invalid in paths. Colons are usually fine. - sanitized_proxy = proxy.replace('://', '---') - - new_filename = f"info_{video_id or 'unknown'}_{account_id}_{timestamp}_proxy_{sanitized_proxy}.json" - new_path = os.path.join(job_dir_path, new_filename) - try: - os.rename(info_json_path, new_path) - final_info_json_path = new_path - logger.info(f"Renamed info.json to include proxy: {new_path}") - except OSError as e: - logger.error(f"Failed to rename info.json to include proxy: {e}. Using original path.") - - return { - 'info_json_path': final_info_json_path, - 'job_dir_path': job_dir_path, - 'socks_proxy': proxy, - 'ytdlp_command': None, - 'successful_account_id': account_id, - 'original_url': url, - 'clients': clients, - } - -@task.branch -def handle_bannable_error_branch(task_id_to_check: str, **context): - """ - Inspects a failed task and routes to retry logic if the error is retryable. - Routes to a fatal error handler for non-retryable infrastructure issues. - """ - ti = context['task_instance'] - params = context['params'] - error_details = ti.xcom_pull(task_ids=task_id_to_check, key='error_details') - if not error_details: - logger.error(f"Task {task_id_to_check} failed without error details. Marking as fatal.") - return 'handle_fatal_error' - - error_message = error_details.get('error_message', '').strip() - error_code = error_details.get('error_code', '').strip() - policy = params.get('on_bannable_failure', 'retry_with_new_account') - - # Unrecoverable video errors that should not be retried or treated as system failures. - unrecoverable_video_errors = [ - "AGE_GATED_SIGN_IN", "MEMBERS_ONLY", "VIDEO_PROCESSING", "COPYRIGHT_REMOVAL", - "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED" - ] - - if error_code in unrecoverable_video_errors: - logger.warning(f"Unrecoverable video error '{error_code}' detected for '{task_id_to_check}'. This is a content issue, not a system failure.") - return 'handle_unrecoverable_video_error' - - # Fatal Thrift connection errors that should stop all processing. - if error_code == 'TRANSPORT_ERROR': - logger.error(f"Fatal Thrift connection error from '{task_id_to_check}'. Stopping processing.") - return 'handle_fatal_error' - - # Service-side connection errors that are potentially retryable. - connection_errors = ['SOCKS5_CONNECTION_FAILED', 'SOCKET_TIMEOUT', 'CAMOUFOX_TIMEOUT'] - if error_code in connection_errors: - logger.info(f"Handling connection error '{error_code}' from '{task_id_to_check}'. Policy: '{policy}'") - if policy == 'stop_loop': - logger.warning(f"Connection error with 'stop_loop' policy. Marking as fatal.") - return 'handle_fatal_error' - else: - logger.info("Retrying with a new account without banning.") - return 'assign_new_account_for_direct_retry' - - # Bannable errors (e.g., bot detection) that can be retried with a new account. - is_bannable = error_code in ["BOT_DETECTED", "BOT_DETECTION_SIGN_IN_REQUIRED"] - logger.info(f"Handling failure from '{task_id_to_check}'. Error code: '{error_code}', Policy: '{policy}'") - if is_bannable: - if policy in ['retry_with_new_account', 'retry_and_ban_account_only']: - return 'ban_account_and_prepare_for_retry' - if policy in ['retry_on_connection_error', 'retry_without_ban']: - return 'assign_new_account_for_direct_retry' - if policy in ['stop_loop', 'stop_loop_on_auth_proceed_on_download_error']: - return 'ban_and_report_immediately' - if policy == 'proceed_loop_under_manual_inspection': - logger.warning(f"Bannable error with 'proceed_loop_under_manual_inspection' policy. Reporting failure and continuing loop. MANUAL INTERVENTION IS LIKELY REQUIRED.") - return 'report_bannable_and_continue' - - # Any other error is considered fatal for this run. - logger.error(f"Unhandled or non-retryable error '{error_code}' from '{task_id_to_check}'. Marking as fatal.") - return 'handle_fatal_error' - -@task_group(group_id='ban_and_retry_logic') -def ban_and_retry_logic(initial_data: dict): - """ - Task group that checks for sliding window failures before banning an account. - If the account meets ban criteria, it's banned. Otherwise, the ban is skipped - but the retry proceeds. - """ - - @task.branch - def check_sliding_window_for_ban(data: dict, **context): - """ - Checks Redis for recent failures. If thresholds are met, proceeds to ban. - Otherwise, proceeds to a dummy task to allow retry without ban. - """ - params = context['params'] - account_id = data['account_id'] - redis_conn_id = params.get('redis_conn_id', DEFAULT_REDIS_CONN_ID) - - # These thresholds should ideally be Airflow Variables to be configurable - failure_window_seconds = 3600 # 1 hour - failure_threshold_count = 5 - failure_threshold_unique_proxies = 3 - - try: - redis_client = _get_redis_client(redis_conn_id) - failure_key = f"account_failures:{account_id}" - now = time.time() - window_start = now - failure_window_seconds - - # 1. Remove old failures and get recent ones - redis_client.zremrangebyscore(failure_key, '-inf', window_start) - recent_failures = redis_client.zrange(failure_key, 0, -1) - - if len(recent_failures) >= failure_threshold_count: - # Decode from bytes to string for processing - recent_failures_str = [f.decode('utf-8') for f in recent_failures] - # Failure format is "context:job_id:timestamp" - unique_proxies = {f.split(':')[0] for f in recent_failures_str} - - if len(unique_proxies) >= failure_threshold_unique_proxies: - logger.warning( - f"Account {account_id} has failed {len(recent_failures)} times " - f"with {len(unique_proxies)} unique contexts in the last hour. Proceeding to ban." - ) - return 'ban_account_task' - else: - logger.info( - f"Account {account_id} has {len(recent_failures)} failures, but only " - f"from {len(unique_proxies)} unique contexts (threshold is {failure_threshold_unique_proxies}). Skipping ban." - ) - else: - logger.info(f"Account {account_id} has {len(recent_failures)} failures (threshold is {failure_threshold_count}). Skipping ban.") - - except Exception as e: - logger.error(f"Error during sliding window check for account {account_id}: {e}. Skipping ban as a precaution.", exc_info=True) - - return 'skip_ban_task' - - @task(task_id='ban_account_task') - def ban_account_task(data: dict, **context): - """Wrapper task to call the main ban_account function.""" - ban_account(initial_data=data, reason="Banned by Airflow worker after sliding window check", **context) - - @task(task_id='skip_ban_task') - def skip_ban_task(): - """Dummy task to represent the 'skip ban' path.""" - pass - - check_task = check_sliding_window_for_ban(data=initial_data) - ban_task_in_group = ban_account_task(data=initial_data) - skip_task = skip_ban_task() - - check_task >> [ban_task_in_group, skip_task] - - -@task -def ban_account(initial_data: dict, reason: str, **context): - """Bans a single account via the Thrift service.""" - params = context['params'] - account_id = initial_data['account_id'] - client, transport = None, None - try: - host, port, timeout = params['service_ip'], int(params['service_port']), int(params.get('timeout', DEFAULT_TIMEOUT)) - client, transport = _get_thrift_client(host, port, timeout) - logger.warning(f"Banning account '{account_id}'. Reason: {reason}") - client.banAccount(accountId=account_id, reason=reason) - except Exception as e: - logger.error(f"Failed to issue ban for account '{account_id}': {e}", exc_info=True) + raise AirflowException(f"Thrift transport error: {e}") finally: if transport and transport.isOpen(): transport.close() -@task -def assign_new_account_for_direct_retry(initial_data: dict, **context): - """Selects a new, unused account for a direct retry (e.g., after connection error).""" - params = context['params'] - accounts_tried = initial_data['accounts_tried'] - account_pool = _get_account_pool(params) - available_for_retry = [acc for acc in account_pool if acc not in accounts_tried] - if not available_for_retry: - raise AirflowException("No other accounts available in the pool for a retry.") - - new_account_id = random.choice(available_for_retry) - accounts_tried.append(new_account_id) - logger.info(f"Selected new account for retry: '{new_account_id}'") - - # Return updated initial_data with new account - return { - 'url_to_process': initial_data['url_to_process'], - 'account_id': new_account_id, - 'accounts_tried': accounts_tried, - } @task -def assign_new_account_after_ban_check(initial_data: dict, **context): - """Selects a new, unused account for the retry attempt after a ban check.""" - params = context['params'] - accounts_tried = initial_data['accounts_tried'] - account_pool = _get_account_pool(params) - available_for_retry = [acc for acc in account_pool if acc not in accounts_tried] - if not available_for_retry: - raise AirflowException("No other accounts available in the pool for a retry.") - - new_account_id = random.choice(available_for_retry) - accounts_tried.append(new_account_id) - logger.info(f"Selected new account for retry: '{new_account_id}'") - - # Return updated initial_data with new account - return { - 'url_to_process': initial_data['url_to_process'], - 'account_id': new_account_id, - 'accounts_tried': accounts_tried, - } - -@task -def ban_and_report_immediately(initial_data: dict, reason: str, **context): - """Bans an account and prepares for failure reporting and continuing the loop.""" - ban_account(initial_data, reason, **context) - logger.info(f"Account '{initial_data.get('account_id')}' banned. Proceeding to report failure.") - # This task is a leaf in its path and is followed by the failure reporting task. - return initial_data # Pass data along if needed by reporting - -@task -def push_auth_success_to_redis(initial_data: dict, token_data: dict, **context): +def generate_and_push_download_tasks(token_data: dict, **context): """ - On successful token acquisition, pushes the complete token data to the - Redis queue for the download worker and records the auth success. + On success, resolves the format selector into individual format IDs and pushes + granular download tasks to the `queue_dl_format_tasks` Redis list. + Also records the successful auth activity for the profile. """ params = context['params'] - url = initial_data['url_to_process'] + url = token_data['original_url'] + info_json_path = token_data['info_json_path'] + locked_profile = token_data['locked_profile'] + + # Resolve format selector from the JSON config + try: + ytdlp_config = json.loads(token_data.get('ytdlp_config_json', '{}')) + download_format_selector = ytdlp_config.get('download_format', 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best') + # This profile prefix is for the *download* worker that will pick up the task + download_profile_prefix = ytdlp_config.get('download_profile_prefix', 'download_user') + except (json.JSONDecodeError, KeyError): + logger.error("Could not parse download_format from ytdlp_config_json. Falling back to default.") + download_format_selector = 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' + download_profile_prefix = 'download_user' + + resolved_formats = _resolve_formats(info_json_path, download_format_selector, logger) + if not resolved_formats: + raise AirflowException(f"Format selector '{download_format_selector}' resolved to no formats for {url}.") + + tasks = [] + for format_id in resolved_formats: + task_payload = { + "info_json_path": info_json_path, + "format_id": format_id, + "profile_prefix": download_profile_prefix, + "original_url": url, + "dag_run_id": context['dag_run'].run_id, + } + tasks.append(json.dumps(task_payload)) - # The download inbox queue is derived from the auth queue name. - dl_inbox_queue = f"{params['queue_name'].replace('_auth', '_dl')}_inbox" + dl_task_queue = "queue_dl_format_tasks" auth_result_queue = f"{params['queue_name']}_result" progress_queue = f"{params['queue_name']}_progress" - - client = _get_redis_client(params['redis_conn_id']) - - payload = { - 'timestamp': time.time(), - 'dag_run_id': context['dag_run'].run_id, - **token_data - } result_data = { 'status': 'success', 'end_time': time.time(), 'url': url, 'dag_run_id': context['dag_run'].run_id, - 'token_data': token_data + 'token_data': {k: v for k, v in token_data.items() if k != 'locked_profile'} # Don't store profile in result } - with client.pipeline() as pipe: - pipe.lpush(dl_inbox_queue, json.dumps(payload)) - pipe.hset(auth_result_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) - pipe.execute() - - logger.info(f"Pushed successful auth data for URL '{url}' to '{dl_inbox_queue}'.") - logger.info(f"Stored success result for auth on URL '{url}' in '{auth_result_queue}'.") - -@task -def handle_unrecoverable_video_error(**context): - """ - Handles errors for videos that are unavailable (private, removed, etc.). - These are not system failures, so the URL is logged to a 'skipped' queue - and the processing loop continues without marking the run as failed. - """ - params = context['params'] - ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') - - # Collect error details from the failed get_token task - error_details = {} - first_token_error = ti.xcom_pull(task_ids='initial_attempt.get_token', key='error_details') - retry_token_error = ti.xcom_pull(task_ids='retry_logic.retry_get_token', key='error_details') - - if retry_token_error: - error_details = retry_token_error - elif first_token_error: - error_details = first_token_error - - error_code = error_details.get('error_code', 'UNKNOWN_VIDEO_ERROR') - error_message = error_details.get('error_message', 'Video is unavailable for an unknown reason.') - - logger.warning(f"Skipping URL '{url}' due to unrecoverable video error: {error_code} - {error_message}") - - result_data = { - 'status': 'skipped', - 'end_time': time.time(), - 'url': url, - 'dag_run_id': context['dag_run'].run_id, - 'reason': error_code, - 'details': error_message, - 'error_details': error_details - } - try: - client = _get_redis_client(params['redis_conn_id']) - - # New queue for skipped videos - skipped_queue = f"{params['queue_name']}_skipped" - progress_queue = f"{params['queue_name']}_progress" - - with client.pipeline() as pipe: - pipe.hset(skipped_queue, url, json.dumps(result_data)) + redis_client = _get_redis_client(params['redis_conn_id']) + with redis_client.pipeline() as pipe: + pipe.rpush(dl_task_queue, *tasks) + pipe.hset(auth_result_queue, url, json.dumps(result_data)) pipe.hdel(progress_queue, url) pipe.execute() - - logger.info(f"Stored skipped result for URL '{url}' in '{skipped_queue}' and removed from progress queue.") - except Exception as e: - logger.error(f"Could not report skipped video to Redis: {e}", exc_info=True) - - -@task(trigger_rule='one_failed') -def report_failure_and_continue(**context): - """ - Handles a failed URL processing attempt by recording a detailed error report to Redis. - This is a common endpoint for various failure paths that should not stop the overall dispatcher loop. - """ - params = context['params'] - ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') - - # Collect error details from XCom - error_details = {} - - # Check for error details from get_token tasks - first_token_task_id = 'initial_attempt.get_token' - retry_token_task_id = 'retry_logic.retry_get_token' - - first_token_error = ti.xcom_pull(task_ids=first_token_task_id, key='error_details') - retry_token_error = ti.xcom_pull(task_ids=retry_token_task_id, key='error_details') - - # Use the most recent error details - if retry_token_error: - error_details = retry_token_error - elif first_token_error: - error_details = first_token_error - else: - # Check for other possible error sources - # This is a simplified approach - in a real implementation you might want to - # check more task IDs or use a more sophisticated error collection mechanism - pass - - logger.error(f"A failure occurred while processing URL '{url}'. Reporting to Redis.") - - result_data = { - 'status': 'failed', - 'end_time': time.time(), - 'url': url, - 'dag_run_id': context['dag_run'].run_id, - 'error_details': error_details - } - - try: - client = _get_redis_client(params['redis_conn_id']) - - # Update client-specific stats - try: - machine_id = params.get('machine_id') or socket.gethostname() - _update_client_stats(client, params.get('clients', ''), 'failure', url, machine_id, context['dag_run'].run_id) - except Exception as e: - logger.error(f"Could not update client stats on failure: {e}", exc_info=True) - - result_queue = f"{params['queue_name']}_result" - fail_queue = f"{params['queue_name']}_fail" - progress_queue = f"{params['queue_name']}_progress" - - with client.pipeline() as pipe: - pipe.hset(result_queue, url, json.dumps(result_data)) - pipe.hset(fail_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) - pipe.execute() - - logger.info(f"Stored failure result for URL '{url}' in '{result_queue}' and '{fail_queue}' and removed from progress queue.") + logger.info(f"Pushed {len(tasks)} granular download task(s) for URL '{url}' to '{dl_task_queue}'.") + logger.info(f"Stored success result for auth on URL '{url}' in '{auth_result_queue}'.") except Exception as e: - logger.error(f"Could not report failure to Redis: {e}", exc_info=True) + logger.error(f"Failed to push download tasks to Redis: {e}", exc_info=True) + raise AirflowException("Failed to push tasks to Redis.") + # Return the original token_data (including locked_profile) for the unlock task + return token_data -@task(trigger_rule='one_failed') -def handle_fatal_error(**context): - """ - Handles fatal, non-retryable errors (e.g., infrastructure issues). - This task reports the failure to Redis before failing the DAG run to ensure - failed URLs are queued for later reprocessing, then stops the processing loop. - """ - params = context['params'] - ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') - - # Collect error details - error_details = {} - first_token_task_id = 'initial_attempt.get_token' - retry_token_task_id = 'retry_logic.retry_get_token' - - first_token_error = ti.xcom_pull(task_ids=first_token_task_id, key='error_details') - retry_token_error = ti.xcom_pull(task_ids=retry_token_task_id, key='error_details') - - # Use the most recent error details - if retry_token_error: - error_details = retry_token_error - elif first_token_error: - error_details = first_token_error - - logger.error(f"A fatal, non-retryable error occurred for URL '{url}'. See previous task logs for details.") - - # Report failure to Redis so the URL can be reprocessed later - try: - client = _get_redis_client(params['redis_conn_id']) - - # Update client-specific stats - try: - machine_id = params.get('machine_id') or socket.gethostname() - _update_client_stats(client, params.get('clients', ''), 'failure', url, machine_id, context['dag_run'].run_id) - except Exception as e: - logger.error(f"Could not update client stats on fatal error: {e}", exc_info=True) - - result_data = { - 'status': 'failed', - 'end_time': time.time(), - 'url': url, - 'dag_run_id': context['dag_run'].run_id, - 'error': 'fatal_error', - 'error_message': 'Fatal non-retryable error occurred', - 'error_details': error_details - } - result_queue = f"{params['queue_name']}_result" - fail_queue = f"{params['queue_name']}_fail" - - progress_queue = f"{params['queue_name']}_progress" - - with client.pipeline() as pipe: - pipe.hset(result_queue, url, json.dumps(result_data)) - pipe.hset(fail_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) - pipe.execute() - - logger.info(f"Stored fatal error result for URL '{url}' in '{result_queue}' and '{fail_queue}' for later reprocessing.") - except Exception as e: - logger.error(f"Could not report fatal error to Redis: {e}", exc_info=True) - - # Fail the DAG run to prevent automatic continuation of the processing loop - raise AirflowException("Failing DAG due to fatal error. The dispatcher loop will stop.") @task(trigger_rule='one_success') -def continue_processing_loop(**context): +def continue_processing_loop(token_data: dict | None = None, **context): """ - After a successful run, triggers a new dispatcher to continue the processing loop, - effectively asking for the next URL to be processed. + After a run, triggers a new dispatcher to continue the processing loop, + passing along the account/proxy to make them sticky if available. """ params = context['params'] dag_run = context['dag_run'] @@ -998,18 +620,29 @@ def continue_processing_loop(**context): return # Create a new unique run_id for the dispatcher. - # Using a timestamp and UUID ensures the ID is unique and does not grow in length over time, - # preventing database errors. new_dispatcher_run_id = f"retriggered_by_worker_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{str(uuid.uuid4())[:8]}" # Pass all original parameters from the orchestrator through to the new dispatcher run. conf_to_pass = {k: v for k, v in params.items() if v is not None} + conf_to_pass['worker_index'] = params.get('worker_index') - # The new dispatcher will pull its own URL and determine its own queue, so we don't pass these. + if token_data: + # On success path, make the account and proxy "sticky" for the next run. + conf_to_pass['account_id'] = token_data.get('successful_account_id') + conf_to_pass['assigned_proxy_url'] = token_data.get('socks_proxy') + logger.info(f"Worker finished successfully. Triggering a new dispatcher ('{new_dispatcher_run_id}') to continue the loop with sticky account/proxy.") + logger.info(f" - Sticky Account: {conf_to_pass.get('account_id')}") + logger.info(f" - Sticky Proxy: {conf_to_pass.get('assigned_proxy_url')}") + else: + # On failure/skip paths, no token_data is passed. Clear sticky params to allow re-selection. + conf_to_pass.pop('account_id', None) + conf_to_pass.pop('assigned_proxy_url', None) + logger.info(f"Worker finished on a non-success path. Triggering a new dispatcher ('{new_dispatcher_run_id}') to continue the loop without sticky account/proxy.") + + # The new dispatcher will pull its own URL and determine its own queue. conf_to_pass.pop('url_to_process', None) conf_to_pass.pop('worker_queue', None) - logger.info(f"Worker finished successfully. Triggering a new dispatcher ('{new_dispatcher_run_id}') to continue the loop.") trigger_dag( dag_id=dispatcher_dag_id, run_id=new_dispatcher_run_id, @@ -1018,136 +651,53 @@ def continue_processing_loop(**context): ) -@task.branch(trigger_rule='one_failed') -def handle_retry_failure_branch(task_id_to_check: str, **context): + + + + +# ============================================================================= +# DAG Definition with TaskGroups +# ============================================================================= +@task(trigger_rule='all_done') +def unlock_profile(**context): """ - Inspects a failed retry attempt and decides on the final action. - On retry, most errors are considered fatal for the URL, but not for the system. - """ - ti = context['task_instance'] - params = context['params'] - error_details = ti.xcom_pull(task_ids=task_id_to_check, key='error_details') - if not error_details: - return 'handle_fatal_error' - - error_message = error_details.get('error_message', '').strip() - error_code = error_details.get('error_code', '').strip() - - # Unrecoverable video errors that should not be retried or treated as system failures. - unrecoverable_video_errors = [ - "AGE_GATED_SIGN_IN", "MEMBERS_ONLY", "VIDEO_PROCESSING", "COPYRIGHT_REMOVAL", - "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED" - ] - - if error_code in unrecoverable_video_errors: - logger.warning(f"Unrecoverable video error '{error_code}' detected on retry for '{task_id_to_check}'.") - return 'handle_unrecoverable_video_error' - - if error_code == 'TRANSPORT_ERROR': - logger.error(f"Fatal Thrift connection error on retry from '{task_id_to_check}'.") - return 'handle_fatal_error' - - is_bannable = error_code in ["BOT_DETECTED", "BOT_DETECTION_SIGN_IN_REQUIRED"] - if is_bannable: - policy = params.get('on_bannable_failure', 'retry_with_new_account') - if policy == 'proceed_loop_under_manual_inspection': - logger.warning(f"Bannable error '{error_code}' on retry with 'proceed_loop_under_manual_inspection' policy. Reporting failure and continuing loop. MANUAL INTERVENTION IS LIKELY REQUIRED.") - return 'report_bannable_and_continue' - - logger.warning(f"Bannable error '{error_code}' on retry. Banning account and reporting failure.") - return 'ban_and_report_after_retry' - - logger.error(f"URL failed on retry with code '{error_code}'. Reporting failure and continuing loop.") - return 'report_failure_and_continue' - - -@task -def ban_and_report_after_retry(retry_data: dict, reason: str, **context): - """Bans the account used in a failed retry and prepares for failure reporting.""" - # The account to ban is the one from the retry attempt. - ban_account(retry_data, reason, **context) - logger.info(f"Account '{retry_data.get('account_id')}' banned after retry failed. Proceeding to report failure.") - return retry_data - - - - -@task(trigger_rule='one_success') -def coalesce_token_data(get_token_result=None, retry_get_token_result=None): - """ - Selects the successful token data from either the first attempt or the retry. - The task that did not run or failed will have a result of None. - """ - if retry_get_token_result: - logger.info("Using token data from retry attempt.") - return retry_get_token_result - if get_token_result: - logger.info("Using token data from initial attempt.") - return get_token_result - # This should not be reached if trigger_rule='one_success' is working correctly. - raise AirflowException("Could not find a successful token result from any attempt.") - - -@task -def report_bannable_and_continue(**context): - """ - Handles a bannable error by reporting it, but continues the loop - as per the 'proceed_loop_under_manual_inspection' policy. + Unlocks the profile and records activity (success or failure). + This task runs regardless of upstream success or failure. """ params = context['params'] + dag_run = context['dag_run'] + + failed_tasks = [ti for ti in dag_run.get_task_instances() if ti.state == 'failed'] + is_success = not failed_tasks + activity_type = 'auth' if is_success else 'auth_error' + ti = context['task_instance'] - url = params.get('url_to_process', 'unknown') + initial_data = ti.xcom_pull(task_ids='get_url_and_lock_profile') - # Collect error details - error_details = {} - first_token_task_id = 'initial_attempt.get_token' - retry_token_task_id = 'retry_logic.retry_get_token' - - first_token_error = ti.xcom_pull(task_ids=first_token_task_id, key='error_details') - retry_token_error = ti.xcom_pull(task_ids=retry_token_task_id, key='error_details') - - # Use the most recent error details - if retry_token_error: - error_details = retry_token_error - elif first_token_error: - error_details = first_token_error - - logger.error(f"Bannable error for URL '{url}'. Policy is to continue loop under manual supervision.") - - # Report failure to Redis + locked_profile = initial_data.get('locked_profile') if initial_data else None + + if not locked_profile: + logger.warning("No locked_profile data found. Cannot unlock or record activity.") + return + + profile_name = locked_profile.get('name') + owner_id = f"airflow_auth_worker_{dag_run.run_id}" + try: - client = _get_redis_client(params['redis_conn_id']) + redis_conn_id = params['redis_conn_id'] + redis_env = params['redis_env'] + redis_hook = _get_redis_client(redis_conn_id, return_hook=True) + key_prefix = f"{redis_env}_profile_mgmt_" + pm = ProfileManager(redis_hook=redis_hook, key_prefix=key_prefix) + + logger.info(f"Recording activity '{activity_type}' for profile '{profile_name}'.") + pm.record_activity(profile_name, activity_type) - # Update client-specific stats - try: - machine_id = params.get('machine_id') or socket.gethostname() - _update_client_stats(client, params.get('clients', ''), 'failure', url, machine_id, context['dag_run'].run_id) - except Exception as e: - logger.error(f"Could not update client stats on bannable error: {e}", exc_info=True) + logger.info(f"Unlocking profile '{profile_name}' with owner '{owner_id}'.") + pm.unlock_profile(profile_name, owner=owner_id) - result_data = { - 'status': 'failed', - 'end_time': time.time(), - 'url': url, - 'dag_run_id': context['dag_run'].run_id, - 'error': 'bannable_error_manual_override', - 'error_message': 'Bannable error occurred, but policy is set to continue loop under manual supervision.', - 'error_details': error_details - } - result_queue = f"{params['queue_name']}_result" - fail_queue = f"{params['queue_name']}_fail" - - progress_queue = f"{params['queue_name']}_progress" - - with client.pipeline() as pipe: - pipe.hset(result_queue, url, json.dumps(result_data)) - pipe.hset(fail_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) - pipe.execute() - - logger.info(f"Stored bannable error for URL '{url}' in '{result_queue}' and '{fail_queue}'.") except Exception as e: - logger.error(f"Could not report bannable error to Redis: {e}", exc_info=True) + logger.error(f"Failed to unlock profile or record activity for '{profile_name}': {e}", exc_info=True) # ============================================================================= @@ -1159,141 +709,62 @@ with DAG( schedule=None, start_date=days_ago(1), catchup=False, - tags=['ytdlp', 'worker'], + tags=['ytdlp', 'worker', 'v2'], doc_md=__doc__, render_template_as_native_obj=True, is_paused_upon_creation=True, params={ + # V2 Profile Params + 'redis_env': Param("sim_auth", type="string", title="[V2 Profiles] Redis Environment", description="The environment for v2 profile management (e.g., 'sim_auth'). Determines the Redis key prefix."), + 'profile_prefix': Param("auth_user", type="string", title="[V2 Profiles] Profile Prefix", description="The prefix for auth profiles that workers should attempt to lock."), + 'queue_name': Param(DEFAULT_QUEUE_NAME, type="string"), 'redis_conn_id': Param(DEFAULT_REDIS_CONN_ID, type="string"), 'service_ip': Param(DEFAULT_YT_AUTH_SERVICE_IP, type="string"), 'service_port': Param(DEFAULT_YT_AUTH_SERVICE_PORT, type="integer"), - 'account_pool': Param('default_account', type="string"), - 'account_pool_size': Param(None, type=["integer", "null"]), - 'prepend_client_to_account': Param(True, type="boolean", title="[Worker Param] Prepend Client to Account", description="If True, prepends client and timestamp to account names in prefix mode."), + # DEPRECATED PARAMS (kept for reference, but no longer used) + 'account_pool': Param('default_account', type="string", description="DEPRECATED: Use profile_prefix instead."), + 'account_pool_size': Param(None, type=["integer", "null"], description="DEPRECATED: Pool size is managed in Redis."), + 'prepend_client_to_account': Param(True, type="boolean", description="DEPRECATED"), + 'assigned_proxy_url': Param(None, type=["string", "null"], description="DEPRECATED: Proxy is now determined by the locked profile."), + 'account_id': Param(None, type=["string", "null"], description="DEPRECATED: Profile is locked dynamically."), + 'worker_index': Param(None, type=["integer", "null"], description="DEPRECATED"), + 'auto_create_new_accounts_on_exhaustion': Param(True, type="boolean", description="DEPRECATED"), + 'machine_id': Param(None, type=["string", "null"]), - 'assigned_proxy_url': Param(None, type=["string", "null"], title="[Worker Param] Assigned Proxy URL", description="If provided, forces the token service to use this specific proxy for the request."), - 'clients': Param('tv_simply', type="string", description="Comma-separated list of clients for token generation. e.g. mweb,tv,web_camoufox"), + 'clients': Param('tv_simply', type="string", description="DEPRECATED: This is now read from the ytdlp_config_json."), 'timeout': Param(DEFAULT_TIMEOUT, type="integer"), 'on_bannable_failure': Param('stop_loop_on_auth_proceed_on_download_error', type="string", enum=['stop_loop', 'retry_with_new_account', 'retry_without_ban', 'retry_and_ban_account_only', 'retry_on_connection_error', 'proceed_loop_under_manual_inspection', 'stop_loop_on_auth_proceed_on_download_error']), - 'request_params_json': Param(json.dumps(DEFAULT_REQUEST_PARAMS), type="string", title="[Worker Param] Request Params JSON", description="JSON string with request parameters for the token service."), - 'language_code': Param('en-US', type="string", title="[Worker Param] Language Code", description="The language code (e.g., 'en-US', 'de-DE') to use for the YouTube request headers."), - 'auto_create_new_accounts_on_exhaustion': Param(True, type="boolean"), + # --- Unified JSON Config (passed from orchestrator) --- + 'ytdlp_config_json': Param('{}', type="string", title="[Internal] Unified JSON config from orchestrator."), # --- Manual Run / Internal Parameters --- 'manual_url_to_process': Param('iPwdia3gAnk', type=["string", "null"], title="[Manual Run] URL to Process", description="For manual runs, provide a single YouTube URL, or the special value 'PULL_FROM_QUEUE' to pull one URL from the Redis inbox. This is ignored if triggered by the dispatcher."), 'url_to_process': Param(None, type=["string", "null"], title="[Internal] URL from Dispatcher", description="This parameter is set by the dispatcher DAG and should not be used for manual runs."), 'worker_queue': Param(None, type=["string", "null"], title="[Internal] Worker Queue", description="This parameter is set by the dispatcher DAG and should not be used for manual runs."), } ) as dag: - initial_data = get_url_and_assign_account() + initial_data = get_url_and_lock_profile() + unlock_profile_task = unlock_profile() # --- Task Instantiation with TaskGroups --- - # Main success/failure handlers (outside groups for clear end points) - fatal_error_task = handle_fatal_error() - report_failure_task = report_failure_and_continue() - continue_loop_task = continue_processing_loop() - unrecoverable_video_error_task = handle_unrecoverable_video_error() - report_bannable_and_continue_task = report_bannable_and_continue() - + # This is simplified. The auth worker does not retry with different accounts anymore, + # as the policy enforcer is responsible for managing profile health. If get_token fails, + # the profile is unlocked with a failure, and the loop continues to the next URL. + # --- Task Group 1: Initial Attempt --- - with TaskGroup("initial_attempt", tooltip="Initial token acquisition attempt") as initial_attempt_group: - first_token_attempt = get_token(initial_data) - initial_branch_task = handle_bannable_error_branch.override(trigger_rule='one_failed')( - task_id_to_check=first_token_attempt.operator.task_id - ) - - # Tasks for the "stop_loop" policy on initial attempt - ban_and_report_immediately_task = ban_and_report_immediately.override(task_id='ban_and_report_immediately')( - initial_data=initial_data, - reason="Banned by Airflow worker (policy is stop_loop)" - ) - - first_token_attempt >> initial_branch_task - initial_branch_task >> [fatal_error_task, ban_and_report_immediately_task, unrecoverable_video_error_task, report_bannable_and_continue_task] - - # --- Task Group 2: Retry Logic --- - with TaskGroup("retry_logic", tooltip="Retry logic with account management") as retry_logic_group: - # Retry path tasks - ban_and_retry_group = ban_and_retry_logic.override(group_id='ban_account_and_prepare_for_retry')( - initial_data=initial_data - ) - # This task is for retries after a ban check - after_ban_account_task = assign_new_account_after_ban_check.override(task_id='assign_new_account_after_ban_check')( - initial_data=initial_data - ) - # This task is for direct retries (e.g., on connection error) - direct_retry_account_task = assign_new_account_for_direct_retry.override(task_id='assign_new_account_for_direct_retry')( - initial_data=initial_data - ) - - @task(trigger_rule='one_success') - def coalesce_retry_data(direct_retry_data=None, after_ban_data=None): - """Coalesces account data from one of the two mutually exclusive retry paths.""" - if direct_retry_data: - return direct_retry_data - if after_ban_data: - return after_ban_data - raise AirflowException("Could not find valid account data for retry.") - - coalesced_retry_data = coalesce_retry_data( - direct_retry_data=direct_retry_account_task, - after_ban_data=after_ban_account_task - ) - - retry_token_task = get_token.override(task_id='retry_get_token')( - initial_data=coalesced_retry_data - ) - - # Retry failure branch and its tasks - retry_branch_task = handle_retry_failure_branch.override(trigger_rule='one_failed')( - task_id_to_check=retry_token_task.operator.task_id - ) - ban_after_retry_report_task = ban_and_report_after_retry.override(task_id='ban_and_report_after_retry')( - retry_data=coalesced_retry_data, - reason="Banned by Airflow worker after failed retry" - ) - - # Internal dependencies within retry group - ban_and_retry_group >> after_ban_account_task - after_ban_account_task >> coalesced_retry_data - direct_retry_account_task >> coalesced_retry_data - coalesced_retry_data >> retry_token_task - retry_token_task >> retry_branch_task - retry_branch_task >> [fatal_error_task, report_failure_task, ban_after_retry_report_task, unrecoverable_video_error_task, report_bannable_and_continue_task] - ban_after_retry_report_task >> report_failure_task - - # --- Task Group 3: Success/Continuation Logic --- - with TaskGroup("success_and_continuation", tooltip="Push to DL queue and continue loop") as success_group: - token_data = coalesce_token_data( - get_token_result=first_token_attempt, - retry_get_token_result=retry_token_task - ) + with TaskGroup("auth_attempt", tooltip="Token acquisition attempt") as auth_attempt_group: + token_data = get_token(initial_data) list_formats_task = list_available_formats(token_data=token_data) - success_task = push_auth_success_to_redis( - initial_data=initial_data, - token_data=token_data - ) + generate_tasks = generate_and_push_download_tasks(token_data=token_data) - first_token_attempt >> token_data - retry_token_task >> token_data - token_data >> list_formats_task >> success_task - success_task >> continue_loop_task + token_data >> list_formats_task >> generate_tasks - # --- DAG Dependencies between TaskGroups --- - # Initial attempt can lead to retry logic or direct failure - initial_branch_task >> [retry_logic_group, fatal_error_task, ban_and_report_immediately_task, unrecoverable_video_error_task, report_bannable_and_continue_task] + # --- Failure Handling --- + # `unlock_profile` is the terminal task, running after all upstream tasks are done. + # It determines success/failure and records activity. - # A successful initial attempt bypasses retry and goes straight to the success group - initial_attempt_group >> success_group - - # Retry logic leads to success/continuation on success or failure reporting on failure - retry_branch_task >> [report_failure_task] # Handled within the group - retry_logic_group >> success_group - - # Ban and report immediately leads to failure reporting - ban_and_report_immediately_task >> report_failure_task - - # Unrecoverable/bannable errors that don't stop the loop should continue processing - unrecoverable_video_error_task >> continue_loop_task - report_bannable_and_continue_task >> continue_loop_task + # --- DAG Dependencies --- + initial_data >> auth_attempt_group + auth_attempt_group >> unlock_profile_task + unlock_profile_task >> continue_processing_loop(token_data=None) # Continue loop regardless of outcome diff --git a/airflow/dags/ytdlp_ops_v02_worker_per_url_dl.py b/airflow/dags/ytdlp_ops_v02_worker_per_url_dl.py index 29d12f5..d878e34 100644 --- a/airflow/dags/ytdlp_ops_v02_worker_per_url_dl.py +++ b/airflow/dags/ytdlp_ops_v02_worker_per_url_dl.py @@ -11,9 +11,14 @@ This is the "Download Worker" part of a separated Auth/Download pattern. It receives a job payload with all necessary token info and handles only the downloading and probing of media files. """ - from __future__ import annotations +# --- Add project root to path to allow for yt-ops-client imports --- +import sys +# The yt-ops-client package is installed in editable mode in /app +if '/app' not in sys.path: + sys.path.insert(0, '/app') + from airflow.decorators import task, task_group from airflow.exceptions import AirflowException, AirflowSkipException from airflow.models import Variable @@ -30,6 +35,9 @@ import json import logging import os import random + +# Configure logging +logger = logging.getLogger(__name__) import re import redis import socket @@ -47,8 +55,12 @@ from thrift.protocol import TBinaryProtocol from thrift.transport import TSocket, TTransport from thrift.transport.TTransport import TTransportException -# Configure logging -logger = logging.getLogger(__name__) +# ytops_client imports for v2 profile management +try: + from ytops_client.profile_manager_tool import ProfileManager, format_duration, format_timestamp +except ImportError as e: + logger.critical(f"Could not import ytops_client modules: {e}. Ensure yt-ops-client package is installed correctly in Airflow's environment.") + raise # --- Client Stats Helper --- @@ -149,79 +161,86 @@ def _extract_video_id(url): # ============================================================================= @task -def get_download_job_from_conf(**context): +def lock_profile_and_find_task(**context): """ - Gets the download job details (which includes token data) from the DAG run conf. - This is the first task in the download worker DAG. + Profile-first worker logic: + 1. Locks an available download profile from the Redis pool. + 2. Scans the granular download task queue for a job matching the profile's prefix. + 3. Returns both the locked profile and the claimed job data. """ params = context['params'] ti = context['task_instance'] - - # --- Worker Pinning Verification --- - # This is a safeguard against a known Airflow issue where clearing a task - # can cause the task_instance_mutation_hook to be skipped, breaking pinning. - # See: https://github.com/apache/airflow/issues/20143 - expected_queue = None - if ti.run_id and '_q_' in ti.run_id: - expected_queue = ti.run_id.split('_q_')[-1] + dag_run = context['dag_run'] - if not expected_queue: - # Fallback to conf if run_id parsing fails for some reason - expected_queue = params.get('worker_queue') - - if expected_queue and ti.queue != expected_queue: - error_msg = ( - f"WORKER PINNING FAILURE: Task is running on queue '{ti.queue}' but was expected on '{expected_queue}'. " - "This usually happens after manually clearing a task, which is not the recommended recovery method for this DAG. " - "To recover a failed URL, let the DAG run fail, use the 'ytdlp_mgmt_queues' DAG to requeue the URL, " - "and use the 'ytdlp_ops_orchestrator' to start a new worker loop if needed." - ) - logger.error(error_msg) - raise AirflowException(error_msg) - elif expected_queue: - logger.info(f"Worker pinning verified. Task is correctly running on queue '{ti.queue}'.") - # --- End Verification --- + redis_conn_id = params['redis_conn_id'] + redis_env = params['redis_env'] + profile_prefix = params['profile_prefix'] - # The job data is passed by the dispatcher DAG via 'job_data'. - job_data = params.get('job_data') - if not job_data: - raise AirflowException("No job_data provided in DAG run configuration.") - - # If job_data is a string, parse it as JSON - if isinstance(job_data, str): - try: - job_data = json.loads(job_data) - except json.JSONDecodeError: - raise AirflowException(f"Could not decode job_data JSON: {job_data}") - - url_to_process = job_data.get('original_url') - if not url_to_process: - raise AirflowException("'original_url' not found in job_data.") - - logger.info(f"Received job for URL '{url_to_process}'.") - - # Mark the URL as in-progress in Redis + # Initialize ProfileManager try: - redis_conn_id = params.get('redis_conn_id', DEFAULT_REDIS_CONN_ID) - queue_name = params.get('queue_name', DEFAULT_QUEUE_NAME) - progress_queue = f"{queue_name}_progress" - client = _get_redis_client(redis_conn_id) - - progress_data = { - 'status': 'in_progress', - 'start_time': time.time(), - 'dag_run_id': context['dag_run'].run_id, - 'hostname': socket.gethostname(), - } - client.hset(progress_queue, url_to_process, json.dumps(progress_data)) - logger.info(f"Marked URL '{url_to_process}' as in-progress.") + redis_hook = _get_redis_client(redis_conn_id, return_hook=True) + key_prefix = f"{redis_env}_profile_mgmt_" + pm = ProfileManager(redis_hook=redis_hook, key_prefix=key_prefix) + logger.info(f"Initialized ProfileManager for env '{redis_env}' (Redis key prefix: '{key_prefix}')") except Exception as e: - logger.error(f"Could not mark URL as in-progress in Redis: {e}", exc_info=True) + raise AirflowException(f"Failed to initialize ProfileManager: {e}") - return job_data + # Step 1: Lock a profile + owner_id = f"airflow_dl_worker_{dag_run.run_id}" + locked_profile = None + logger.info(f"Attempting to lock a profile with owner '{owner_id}' and prefix '{profile_prefix}'...") + + # This is a blocking loop until a profile is found or the task times out. + while not locked_profile: + locked_profile = pm.lock_profile(owner=owner_id, profile_prefix=profile_prefix) + if not locked_profile: + logger.info("No download profiles available to lock. Waiting for 15 seconds...") + time.sleep(15) + + logger.info(f"Successfully locked profile: {locked_profile['name']}") + + # Step 2: Find a matching task + task_queue = "queue_dl_format_tasks" + job_data = None + logger.info(f"Scanning Redis list '{task_queue}' for a matching task...") + + # This is a simple, non-atomic 'claim' logic suitable for Airflow's concurrency model. + # It's not perfectly race-proof but is a reasonable starting point. + redis_client = pm.redis + max_scan_attempts = 100 # To prevent infinite loops on a busy queue + + for i in range(max_scan_attempts): + task_json = redis_client.lpop(task_queue) + if not task_json: + logger.info("Task queue is empty. Waiting for 10 seconds...") + time.sleep(10) + continue + + try: + task_data = json.loads(task_json) + if task_data.get('profile_prefix') == profile_prefix: + job_data = task_data + logger.info(f"Claimed task for profile prefix '{profile_prefix}': {job_data}") + break + else: + # Not a match, push it back to the end of the queue and try again. + redis_client.rpush(task_queue, task_json) + except (json.JSONDecodeError, TypeError): + logger.error(f"Could not parse task from queue. Discarding item: {task_json}") + + if not job_data: + # If no task is found, unlock the profile and fail gracefully. + pm.unlock_profile(locked_profile['name'], owner=owner_id) + raise AirflowSkipException(f"Could not find a matching task in '{task_queue}' for prefix '{profile_prefix}' after {max_scan_attempts} attempts.") + + # Combine profile and job data to pass to the next task + return { + 'locked_profile': locked_profile, + 'job_data': job_data, + } @task -def list_available_formats(token_data: dict, **context): +def list_available_formats(worker_data: dict, **context): """ Lists available formats for the given video using the info.json. This is for debugging and informational purposes. @@ -229,7 +248,7 @@ def list_available_formats(token_data: dict, **context): import subprocess import shlex - info_json_path = token_data.get('info_json_path') + info_json_path = worker_data['job_data'].get('info_json_path') if not (info_json_path and os.path.exists(info_json_path)): logger.warning(f"Cannot list formats: info.json path is missing or file does not exist ({info_json_path}).") return [] @@ -334,12 +353,55 @@ def _resolve_generic_selector(selector: str, info_json_path: str, logger) -> str return None -@task -def download_and_probe(token_data: dict, available_formats: list[str], **context): +def _check_format_expiry(info_json_path: str, formats_to_check: list[str], logger) -> bool: """ - Uses retrieved token data to download and probe media files. - Supports parallel downloading of specific, comma-separated format IDs. - If probing fails, retries downloading only the failed files. + Checks if any of the specified format URLs have expired using yt-ops-client. + Returns True if any format is expired, False otherwise. + """ + import subprocess + import shlex + + if not formats_to_check: + return False + + logger.info(f"Checking for URL expiry for formats: {formats_to_check}") + + # We can check all formats at once. The tool will report if any of them are expired. + try: + cmd = [ + 'ytops-client', 'check-expiry', + '--load-info-json', info_json_path, + '-f', ','.join(formats_to_check), + ] + + copy_paste_cmd = ' '.join(shlex.quote(arg) for arg in cmd) + logger.info(f"Executing expiry check for all selected formats: {copy_paste_cmd}") + + process = subprocess.run(cmd, capture_output=True, text=True, timeout=60) + + if process.stdout: + logger.info(f"ytops-client check-expiry STDOUT:\n{process.stdout}") + if process.stderr: + logger.info(f"ytops-client check-expiry STDERR:\n{process.stderr}") + + # The tool exits with a non-zero code if a URL is expired. + if process.returncode != 0: + logger.error("Expiry check failed. One or more URLs are likely expired.") + return True # An expiry was found + + except Exception as e: + logger.error(f"An error occurred during expiry check: {e}", exc_info=True) + # To be safe, treat this as a potential expiry to trigger re-authentication. + return True + + logger.info("No expired URLs found for the selected formats.") + return False + + +@task +def download_and_probe(worker_data: dict, **context): + """ + Uses profile and job data to download and probe a single media format. """ try: import subprocess @@ -347,23 +409,11 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context import concurrent.futures params = context['params'] - info_json_path = token_data.get('info_json_path') - original_url = token_data.get('original_url') - - # Extract proxy from filename, with fallback to token_data for backward compatibility - proxy = None - if info_json_path: - filename = os.path.basename(info_json_path) - proxy_match = re.search(r'_proxy_(.+)\.json$', filename) - if proxy_match: - sanitized_proxy = proxy_match.group(1) - # Reverse sanitization from auth worker (replace '---' with '://') - proxy = sanitized_proxy.replace('---', '://') - logger.info(f"Extracted proxy '{proxy}' from filename.") + job_data = worker_data['job_data'] + locked_profile = worker_data['locked_profile'] - if not proxy: - logger.warning("Proxy not found in filename. Falling back to 'socks_proxy' from token_data.") - proxy = token_data.get('socks_proxy') + info_json_path = job_data.get('info_json_path') + proxy = locked_profile.get('proxy') if not (info_json_path and os.path.exists(info_json_path)): raise AirflowException(f"Error: info.json path is missing or file does not exist ({info_json_path}).") @@ -383,14 +433,11 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context except Exception as e: logger.warning(f"Could not process/remove 'js_runtimes' from info.json: {e}", exc_info=True) - download_dir = token_data.get('job_dir_path') - if not download_dir: - # Fallback for older runs or if job_dir_path is missing - download_dir = os.path.dirname(info_json_path) + download_dir = os.path.dirname(info_json_path) - download_format = params.get('download_format') + download_format = job_data.get('format_id') if not download_format: - raise AirflowException("The 'download_format' parameter is missing or empty.") + raise AirflowException("The 'format_id' is missing from the job data.") output_template = params.get('output_path_template', "%(id)s.f%(format_id)s.%(ext)s") full_output_path = os.path.join(download_dir, output_template) @@ -408,17 +455,7 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context # The 'py' tool maps many yt-dlp flags via --extra-ytdlp-args # The 'py' tool maps many yt-dlp flags via --extra-ytdlp-args - py_extra_args = ['--output', output_template, '--no-resize-buffer', '--buffer-size', '4M'] - if params.get('fragment_retries'): - py_extra_args.extend(['--fragment-retries', str(params['fragment_retries'])]) - if params.get('limit_rate'): - py_extra_args.extend(['--limit-rate', params['limit_rate']]) - if params.get('socket_timeout'): - py_extra_args.extend(['--socket-timeout', str(params['socket_timeout'])]) - if params.get('min_sleep_interval'): - py_extra_args.extend(['--sleep-interval', str(params['min_sleep_interval'])]) - if params.get('max_sleep_interval'): - py_extra_args.extend(['--max-sleep-interval', str(params['max_sleep_interval'])]) + py_extra_args = ['--output', output_template] if params.get('yt_dlp_test_mode'): py_extra_args.append('--test') @@ -468,17 +505,7 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context cmd.extend(['--proxy', proxy]) # The 'cli' tool is the old yt-dlp wrapper, so it takes similar arguments. - cli_extra_args = ['--output', full_output_path, '--no-resize-buffer', '--buffer-size', '4M'] - if params.get('fragment_retries'): - cli_extra_args.extend(['--fragment-retries', str(params['fragment_retries'])]) - if params.get('limit_rate'): - cli_extra_args.extend(['--limit-rate', params['limit_rate']]) - if params.get('socket_timeout'): - cli_extra_args.extend(['--socket-timeout', str(params['socket_timeout'])]) - if params.get('min_sleep_interval'): - cli_extra_args.extend(['--sleep-interval', str(params['min_sleep_interval'])]) - if params.get('max_sleep_interval'): - cli_extra_args.extend(['--max-sleep-interval', str(params['max_sleep_interval'])]) + cli_extra_args = ['--output', full_output_path, '--verbose'] if params.get('yt_dlp_test_mode'): cli_extra_args.append('--test') @@ -600,79 +627,19 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context return successful_probes, failed_probes # --- Main Execution Logic --- - with open(info_json_path, 'r', encoding='utf-8') as f: - info = json.load(f) - - # Split the format string by commas to get a list of individual format selectors. - # This enables parallel downloads of different formats or format groups. - # For example, '18,140,299/298' becomes ['18', '140', '299/298'], - # and each item will be downloaded in a separate yt-dlp process. - if download_format and isinstance(download_format, str): - formats_to_download_initial = [selector.strip() for selector in download_format.split(',') if selector.strip()] - else: - # Fallback for safety, though download_format should always be a string. - formats_to_download_initial = [] - - if not formats_to_download_initial: - raise AirflowException("No valid download format selectors were found after parsing.") - - # --- Filter and resolve requested formats --- - final_formats_to_download = [] - if not available_formats: - logger.warning("List of available formats is empty. Cannot validate numeric selectors, but will attempt to resolve generic selectors.") - - for selector in formats_to_download_initial: - # A selector is considered generic if it contains keywords like 'best' or filter brackets '[]'. - is_generic = bool(re.search(r'(best|\[|\])', selector)) - - if is_generic: - resolved_selector = _resolve_generic_selector(selector, info_json_path, logger) - if resolved_selector: - # The resolver returns a list for '+' selectors, or a string for others. - resolved_formats = resolved_selector if isinstance(resolved_selector, list) else [resolved_selector] - - for res_format in resolved_formats: - # Prefer -dashy version if available and the format is a simple numeric ID - if res_format.isdigit() and f"{res_format}-dashy" in available_formats: - final_format = f"{res_format}-dashy" - logger.info(f"Resolved format '{res_format}' from selector '{selector}'. Preferred '-dashy' version: '{final_format}'.") - else: - final_format = res_format - - # Validate the chosen format against available formats - if available_formats: - individual_ids = re.split(r'[/+]', final_format) - is_available = any(fid in available_formats for fid in individual_ids) - - if is_available: - final_formats_to_download.append(final_format) - else: - logger.warning(f"Resolved format '{final_format}' (from '{selector}') contains no available formats. Skipping.") - else: - # Cannot validate, so we trust the resolver's output. - final_formats_to_download.append(final_format) - else: - logger.warning(f"Could not resolve generic selector '{selector}' using yt-dlp. Skipping.") - else: - # This is a numeric-based selector (e.g., '140' or '299/298' or '140-dashy'). - # Validate it against the available formats. - if not available_formats: - logger.warning(f"Cannot validate numeric selector '{selector}' because available formats list is empty. Assuming it's valid.") - final_formats_to_download.append(selector) - continue - - individual_ids = re.split(r'[/+]', selector) - is_available = any(fid in available_formats for fid in individual_ids) - - if is_available: - final_formats_to_download.append(selector) - else: - logger.warning(f"Requested numeric format selector '{selector}' contains no available formats. Skipping.") + final_formats_to_download = download_format if not final_formats_to_download: - raise AirflowException("None of the requested formats are available for this video.") + raise AirflowException("The format_id for this job is empty.") + + # --- Check for expired URLs before attempting download --- + if _check_format_expiry(info_json_path, [final_formats_to_download], logger): + # If URL is expired, we need to fail the task so it can be re-queued for auth. + # We also need to record a failure for the profile. + raise AirflowException("Format URL has expired. The job must be re-authenticated.") # --- Initial Download and Probe --- + # The worker now handles one format at a time. successful_files, failed_files = _download_and_probe_formats(final_formats_to_download) if params.get('yt_dlp_test_mode'): @@ -690,9 +657,10 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context logger.warning(f"Probe failed for {len(failed_files)} file(s). Attempting one re-download for failed files...") delay_between_formats = params.get('delay_between_formats_s', 0) - if delay_between_formats > 0: - logger.info(f"Waiting {delay_between_formats}s before re-download attempt...") - time.sleep(delay_between_formats) + # This delay is no longer needed in the profile-first model. + # if delay_between_formats > 0: + # logger.info(f"Waiting {delay_between_formats}s before re-download attempt...") + # time.sleep(delay_between_formats) format_ids_to_retry = [] # Since each download is now for a specific selector and the output template @@ -744,79 +712,9 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context logger.error(f"Error during cleanup for file {f}: {e}", exc_info=True) # Do not fail the task for a cleanup error, just log it. - # --- Move completed job directory to final destination --- - try: - video_id = _extract_video_id(original_url) - if not video_id: - logger.error(f"Could not extract video_id from URL '{original_url}' for final move. Skipping.") - else: - # --- Rename info.json to a simple format before moving --- - path_to_info_json_for_move = info_json_path # Default to original path - try: - # info_json_path is the full path to the original info.json - if info_json_path and os.path.exists(info_json_path): - new_info_json_name = f"info_{video_id}.json" - new_info_json_path = os.path.join(os.path.dirname(info_json_path), new_info_json_name) - - if info_json_path != new_info_json_path: - logger.info(f"Renaming '{info_json_path}' to '{new_info_json_path}' for final delivery.") - os.rename(info_json_path, new_info_json_path) - path_to_info_json_for_move = new_info_json_path - else: - logger.info("info.json already has the simple name. No rename needed.") - else: - logger.warning("Could not find info.json to rename before moving.") - except Exception as rename_e: - logger.error(f"Failed to rename info.json before move: {rename_e}", exc_info=True) - # --- End of rename logic --- - - source_dir = download_dir # This is the job_dir_path - - # Group downloads into 10-minute batch folders based on completion time. - now = datetime.now() - rounded_minute = (now.minute // 10) * 10 - timestamp_str = now.strftime('%Y%m%dT%H') + f"{rounded_minute:02d}" - - final_dir_base = os.path.join(Variable.get('DOWNLOADS_TEMP', '/opt/airflow/downloadfiles'), 'videos', 'ready', timestamp_str) - final_dir_path = os.path.join(final_dir_base, video_id) - - os.makedirs(final_dir_base, exist_ok=True) - - logger.info(f"Moving completed job from '{source_dir}' to final destination '{final_dir_path}'") - if os.path.exists(final_dir_path): - logger.warning(f"Destination '{final_dir_path}' already exists. It will be removed and replaced.") - shutil.rmtree(final_dir_path) - - # Create the destination directory and move only the essential files, then clean up the source. - # This ensures no temporary or junk files are carried over. - os.makedirs(final_dir_path) - - # 1. Move the info.json file - if path_to_info_json_for_move and os.path.exists(path_to_info_json_for_move): - shutil.move(path_to_info_json_for_move, final_dir_path) - logger.info(f"Moved '{os.path.basename(path_to_info_json_for_move)}' to destination.") - - # 2. Move the media files (or their .empty placeholders) - files_to_move = [] - if params.get('yt_dlp_cleanup_mode', False): - files_to_move = [f"{f}.empty" for f in final_success_list] - else: - files_to_move = final_success_list - - for f in files_to_move: - if os.path.exists(f): - shutil.move(f, final_dir_path) - logger.info(f"Moved '{os.path.basename(f)}' to destination.") - else: - logger.warning(f"File '{f}' expected but not found for moving.") - - # 3. Clean up the original source directory - logger.info(f"Cleaning up original source directory '{source_dir}'") - shutil.rmtree(source_dir) - logger.info(f"Successfully moved job to '{final_dir_path}' and cleaned up source.") - except Exception as e: - logger.error(f"Failed to move completed job directory: {e}", exc_info=True) - # Do not fail the task for a move error, just log it. + # The logic for moving files to a final destination is now handled by the `ytops-client download py` tool + # when `output_to_airflow_ready_dir` is used. This worker no longer needs to perform the move. + # It just needs to return the list of successfully downloaded files. return final_success_list except Exception as e: @@ -834,7 +732,8 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context "PRIVATE_VIDEO": ['private video'], "VIDEO_REMOVED": ['video has been removed'], "VIDEO_UNAVAILABLE": ['video unavailable'], - "HTTP_403_FORBIDDEN": ['http error 403: forbidden'] + "HTTP_403_FORBIDDEN": ['http error 403: forbidden'], + "URL_EXPIRED": ['urls have expired'] } for code, patterns in unrecoverable_patterns.items(): @@ -846,54 +745,56 @@ def download_and_probe(token_data: dict, available_formats: list[str], **context ti.xcom_push(key='download_error_details', value=error_details) raise e -@task -def mark_url_as_success(job_data: dict, downloaded_file_paths: list, **context): - """Records the successful download result in Redis.""" +@task(trigger_rule='all_done') +def unlock_profile(worker_data: dict, **context): + """ + Unlocks the profile and records activity (success or failure). + This task runs regardless of upstream success or failure. + """ params = context['params'] - url = job_data['original_url'] - result_data = { - 'status': 'success', 'end_time': time.time(), 'url': url, - 'downloaded_file_paths': downloaded_file_paths, **job_data, - 'dag_run_id': context['dag_run'].run_id, - } - client = _get_redis_client(params['redis_conn_id']) - - # Update activity counters - try: - proxy_url = job_data.get('socks_proxy') - account_id = job_data.get('successful_account_id') - now = time.time() - # Use a unique member to prevent collisions, e.g., dag_run_id - member = context['dag_run'].run_id - - if proxy_url: - proxy_key = f"activity:per_proxy:{proxy_url}" - client.zadd(proxy_key, {member: now}) - client.expire(proxy_key, 3600 * 2) # Expire after 2 hours - if account_id: - account_key = f"activity:per_account:{account_id}" - client.zadd(account_key, {member: now}) - client.expire(account_key, 3600 * 2) # Expire after 2 hours - except Exception as e: - logger.error(f"Could not update activity counters: {e}", exc_info=True) + dag_run = context['dag_run'] - # Update client-specific stats - try: - machine_id = params.get('machine_id') or socket.gethostname() - clients_str = job_data.get('clients', params.get('clients', '')) # Prefer clients from job, fallback to params - _update_client_stats(client, clients_str, 'success', url, machine_id, context['dag_run'].run_id) - except Exception as e: - logger.error(f"Could not update client stats on success: {e}", exc_info=True) - - progress_queue = f"{params['queue_name']}_progress" - result_queue = f"{params['queue_name']}_result" + # Check if the DAG run failed + failed_tasks = [ti for ti in dag_run.get_task_instances() if ti.state == 'failed'] + is_success = not failed_tasks + activity_type = 'download' if is_success else 'download_error' - with client.pipeline() as pipe: - pipe.hset(result_queue, url, json.dumps(result_data)) - pipe.hdel(progress_queue, url) - pipe.execute() + # Use XCom pull to get the data from the initial task, which is more robust + # in case of upstream failures where the data is not passed directly. + ti = context['task_instance'] + worker_data_pulled = ti.xcom_pull(task_ids='lock_profile_and_find_task') + + locked_profile = worker_data_pulled.get('locked_profile') if worker_data_pulled else None + + if not locked_profile: + logger.warning("No locked_profile data found from 'lock_profile_and_find_task'. Cannot unlock or record activity.") + return + + profile_name = locked_profile.get('name') + owner_id = f"airflow_dl_worker_{dag_run.run_id}" + + try: + redis_conn_id = params['redis_conn_id'] + redis_env = params['redis_env'] + redis_hook = _get_redis_client(redis_conn_id, return_hook=True) + key_prefix = f"{redis_env}_profile_mgmt_" + pm = ProfileManager(redis_hook=redis_hook, key_prefix=key_prefix) - logger.info(f"Stored success result for URL '{url}' and removed from progress queue.") + logger.info(f"Recording activity '{activity_type}' for profile '{profile_name}'.") + pm.record_activity(profile_name, activity_type) + + logger.info(f"Unlocking profile '{profile_name}' with owner '{owner_id}'.") + # Read cooldown from config if available + cooldown_str = pm.get_config('unlock_cooldown_seconds') + cooldown = int(cooldown_str) if cooldown_str and cooldown_str.isdigit() else None + + pm.unlock_profile(profile_name, owner=owner_id, rest_for_seconds=cooldown) + if cooldown: + logger.info(f"Profile '{profile_name}' was put into COOLDOWN for {cooldown} seconds.") + + except Exception as e: + logger.error(f"Failed to unlock profile or record activity for '{profile_name}': {e}", exc_info=True) + # Do not fail the task, as this is a cleanup step. @task(trigger_rule='one_failed') def report_failure_and_continue(**context): @@ -1121,8 +1022,7 @@ def handle_download_failure_branch(**context): error_code = download_error_details.get('error_code') unrecoverable_video_errors = [ "AGE_GATED_SIGN_IN", "MEMBERS_ONLY", "VIDEO_PROCESSING", "COPYRIGHT_REMOVAL", - "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED", - "HTTP_403_FORBIDDEN" + "GEO_RESTRICTED", "VIDEO_UNAVAILABLE", "PRIVATE_VIDEO", "VIDEO_REMOVED" ] if error_code in unrecoverable_video_errors: logger.warning(f"Unrecoverable video error '{error_code}' during download. Skipping.") @@ -1143,34 +1043,28 @@ with DAG( schedule=None, start_date=days_ago(1), catchup=False, - tags=['ytdlp', 'worker'], + tags=['ytdlp', 'worker', 'v2'], doc_md=__doc__, render_template_as_native_obj=True, is_paused_upon_creation=True, params={ - 'queue_name': Param(DEFAULT_QUEUE_NAME, type="string"), + # --- V2 Profile Management Parameters --- + 'redis_env': Param("sim_download", type="string", title="[V2 Profiles] Redis Environment", description="The environment for v2 profile management (e.g., 'sim_download'). Determines the Redis key prefix."), + 'profile_prefix': Param("download_user", type="string", title="[V2 Profiles] Profile Prefix", description="The prefix for download profiles that workers should attempt to lock."), + 'redis_conn_id': Param(DEFAULT_REDIS_CONN_ID, type="string"), 'machine_id': Param(None, type=["string", "null"]), - 'clients': Param('mweb,web_camoufox,tv', type="string", description="Comma-separated list of clients for token generation. e.g. mweb,tv,web_camoufox"), + 'clients': Param('tv_simply', type="string", description="Comma-separated list of clients for token generation. e.g. mweb,tv,web_camoufox"), 'output_path_template': Param("%(id)s.f%(format_id)s.%(ext)s", type="string", title="[Worker Param] Output Path Template", description="Output filename template for yt-dlp. It is highly recommended to include `%(format_id)s` to prevent filename collisions when downloading multiple formats."), 'retry_on_probe_failure': Param(False, type="boolean"), - 'skip_probe': Param(False, type="boolean", title="[Worker Param] Skip Probe", description="If True, skips the ffmpeg probe of downloaded files."), + 'skip_probe': Param(True, type="boolean", title="[Worker Param] Skip Probe", description="If True, skips the ffmpeg probe of downloaded files."), 'yt_dlp_cleanup_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Cleanup Mode", description="If True, creates a .empty file and deletes the original media file after successful download and probe."), - 'delay_between_formats_s': Param(15, type="integer", title="[Worker Param] Delay Between Formats (s)", description="Delay in seconds between downloading each format when multiple formats are specified. A 22s wait may be effective for batch downloads, while 6-12s may suffice if cookies are refreshed regularly."), + 'delay_between_formats_s': Param(0, type="integer", title="[Worker Param] Delay Between Formats (s)", description="No longer used in profile-first model, as each format is a separate task."), 'yt_dlp_test_mode': Param(False, type="boolean", title="[Worker Param] yt-dlp Test Mode", description="If True, runs yt-dlp with --test flag (dry run without downloading)."), - 'fragment_retries': Param(2, type="integer", title="[Worker Param] Fragment Retries", description="Number of retries for a fragment before giving up. Default is 2 to fail fast on expired tokens."), - 'limit_rate': Param('5M', type=["string", "null"], title="[Worker Param] Limit Rate", description="Download speed limit (e.g., 50K, 4.2M)."), - 'socket_timeout': Param(15, type="integer", title="[Worker Param] Socket Timeout", description="Timeout in seconds for socket operations."), - 'min_sleep_interval': Param(5, type="integer", title="[Worker Param] Min Sleep Interval", description="Minimum time to sleep between downloads (seconds)."), - 'max_sleep_interval': Param(10, type="integer", title="[Worker Param] Max Sleep Interval", description="Maximum time to sleep between downloads (seconds)."), - 'download_format': Param( - 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best', - type="string", - title="[Worker Param] Download Format", - description="Custom yt-dlp format string. Common presets: [1] 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best' (Default, best quality MP4). [2] '18-dashy/18,140-dashy/140,133-dashy/134-dashy/136-dashy/137-dashy/250-dashy/298-dashy/299-dashy' (Legacy formats). [3] '299-dashy/298-dashy/250-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy' (High-framerate formats)." - ), + 'download_format': Param(None, type=["string", "null"], title="[DEPRECATED] Download Format", description="This is now specified in the granular task generated by the auth worker."), + 'pass_without_formats_splitting': Param(False, type="boolean", title="[DEPRECATED] Pass format string without splitting"), 'downloader': Param( - 'cli', + 'py', type="string", enum=['py', 'aria-rpc', 'cli'], title="Download Tool", @@ -1180,52 +1074,37 @@ with DAG( 'aria_port': Param(6800, type="integer", title="Aria2c Port", description="For 'aria-rpc' downloader: Port of the aria2c RPC server."), 'aria_secret': Param('SQGCQPLVFQIASMPNPOJYLVGJYLMIDIXDXAIXOTX', type="string", title="Aria2c Secret", description="For 'aria-rpc' downloader: Secret token."), 'yt_dlp_extra_args': Param( - '--no-part --restrict-filenames', + '--verbose --no-resize-buffer --buffer-size 4M --fragment-retries 2 --concurrent-fragments 8 --socket-timeout 15 --sleep-interval 5 --max-sleep-interval 10 --no-part --restrict-filenames', type=["string", "null"], title="Extra yt-dlp arguments", description="Extra command-line arguments for yt-dlp during download." ), # --- Manual Run / Internal Parameters --- - 'job_data': Param(None, type=["object", "string", "null"], title="[Internal] Job Data from Dispatcher", description="This parameter is set by the dispatcher DAG and should not be used for manual runs."), - 'worker_queue': Param(None, type=["string", "null"], title="[Internal] Worker Queue", description="This parameter is set by the dispatcher DAG and should not be used for manual runs."), + 'manual_job_input': Param(None, type=["string", "null"], title="[DEPRECATED] Job Input"), + 'job_data': Param(None, type=["object", "string", "null"], title="[Internal] Job Data from Dispatcher", description="This is no longer used. The worker finds its own job."), + 'worker_queue': Param(None, type=["string", "null"], title="[Internal] Worker Queue", description="This parameter is set by the dispatcher DAG."), } ) as dag: - job_data = get_download_job_from_conf() - - # --- Task Instantiation --- + # --- Task Instantiation for Profile-First Model --- - # Main success/failure handlers - fatal_error_task = handle_fatal_error() - report_failure_task = report_failure_and_continue() - continue_loop_task = continue_processing_loop() - unrecoverable_video_error_task = handle_unrecoverable_video_error() + # 1. Start by locking a profile and finding a task. + worker_data = lock_profile_and_find_task() - # --- Download and Processing Group --- + # 2. Define the download processing group. with TaskGroup("download_processing", tooltip="Download and media processing") as download_processing_group: - list_formats_task = list_available_formats(token_data=job_data) - download_task = download_and_probe( - token_data=job_data, - available_formats=list_formats_task, - ) - download_branch_task = handle_download_failure_branch.override(trigger_rule='one_failed')() - success_task = mark_url_as_success( - job_data=job_data, - downloaded_file_paths=download_task, - ) - + list_formats_task = list_available_formats(worker_data=worker_data) + download_task = download_and_probe(worker_data=worker_data) list_formats_task >> download_task - download_task >> download_branch_task - download_branch_task >> [report_failure_task, unrecoverable_video_error_task] - download_task >> success_task - success_task >> continue_loop_task - # If the initial job setup succeeds, proceed to the download group. - # If it fails, trigger the fatal error handler. This prevents fatal_error_task - # from being an "island" task that gets triggered by any other failure in the DAG. - job_data.operator >> download_processing_group - job_data.operator >> fatal_error_task + # 3. Define the final cleanup and loop continuation tasks. + unlock_profile_task = unlock_profile(worker_data=worker_data) + continue_loop_task = continue_processing_loop() - # Any failure or skip path should continue the loop to process the next URL. - report_failure_task >> continue_loop_task - fatal_error_task >> continue_loop_task - unrecoverable_video_error_task >> continue_loop_task + # --- DAG Dependencies --- + # Start -> Download Group -> Unlock -> Continue Loop + worker_data >> download_processing_group + download_processing_group >> unlock_profile_task + + # The loop continues regardless of whether the download succeeded or failed. + # The unlock_profile task (with trigger_rule='all_done') ensures it always runs. + unlock_profile_task >> continue_loop_task diff --git a/airflow/dags/ytdlp_s3_uploader.py b/airflow/dags/ytdlp_s3_uploader.py index ebbc637..8c3fb63 100644 --- a/airflow/dags/ytdlp_s3_uploader.py +++ b/airflow/dags/ytdlp_s3_uploader.py @@ -278,8 +278,9 @@ def run_s3_upload_batch(**context): try: for batch_dir_path in processed_batch_dirs: try: - # Use rsync with an empty source to efficiently delete the contents of the batch directory - # The trailing slash on both source and destination is important. + # Use rsync with an empty source to efficiently delete the contents of the batch directory. + # This is a performant alternative to `shutil.rmtree`, which can be slow with many small files. + # The trailing slash on both source and destination is important for rsync's behavior. rsync_cmd = [ 'rsync', '-a', '--delete', @@ -287,14 +288,21 @@ def run_s3_upload_batch(**context): f'{batch_dir_path}/' ] subprocess.run(rsync_cmd, check=True, capture_output=True, text=True) - + # After the contents are deleted, remove the now-empty directory os.rmdir(batch_dir_path) logger.info(f"Successfully removed {batch_dir_path}") except Exception as cleanup_e: - logger.error(f"Failed to remove directory {batch_dir_path}: {cleanup_e}", exc_info=True) - if isinstance(cleanup_e, subprocess.CalledProcessError): - logger.error(f"rsync STDERR: {cleanup_e.stderr}") + if isinstance(cleanup_e, OSError) and "Directory not empty" in str(cleanup_e): + # This can happen in a race condition where a download worker adds a new video + # to the batch directory after rsync has emptied it but before rmdir runs. + # We log it as a warning; the directory will be re-processed in the next cycle + # because this task rescans all directories on each run. + logger.warning(f"Could not remove directory {batch_dir_path}, it was not empty: {cleanup_e}. It will be re-processed on the next run.") + else: + logger.error(f"Failed to remove directory {batch_dir_path}: {cleanup_e}", exc_info=True) + if isinstance(cleanup_e, subprocess.CalledProcessError): + logger.error(f"rsync STDERR: {cleanup_e.stderr}") finally: # Clean up the temporary empty directory shutil.rmtree(empty_dir_for_rsync) diff --git a/ansible/README.md b/ansible/README.md index 343c56c..8a00cae 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -37,3 +37,10 @@ These playbooks are used for more specific tasks or are called by the main playb - `playbook-dl.yml`: Older worker deployment logic. Superseded by `playbook-worker.yml`. - `playbook-depricated.dl.yml`: Older worker deployment logic. Superseded by `playbook-worker.yml`. +## Current Goal: Disable Camoufox & Enable Aria2 + +The current objective is to modify the worker deployment (`playbook-worker.yml` and its role `roles/ytdlp-worker/tasks/main.yml`) to: +1. **Disable Camoufox**: Prevent the build, configuration generation, and startup of all `camoufox` services. +2. **Enable Aria2**: Ensure the `aria2-pro` service is built and started correctly on worker nodes. + +The `playbook-worker.yml` has already been updated to build the `aria2-pro` image. The next steps will involve modifying `roles/ytdlp-worker/tasks/main.yml` to remove the Camoufox-related tasks. diff --git a/ansible/playbook-install-local.yml b/ansible/playbook-install-local.yml new file mode 100644 index 0000000..87b4b62 --- /dev/null +++ b/ansible/playbook-install-local.yml @@ -0,0 +1,44 @@ +--- +- name: Install Local Development Packages + hosts: airflow_workers, airflow_master + gather_facts: no + vars_files: + - "{{ inventory_dir }}/group_vars/all/generated_vars.yml" + + tasks: + - name: Ensure python3-pip is installed + ansible.builtin.apt: + name: python3-pip + state: present + update_cache: yes + become: yes + + - name: Upgrade pip to the latest version (for systems without PEP 668) + ansible.builtin.command: python3 -m pip install --upgrade pip + register: pip_upgrade_old_systems + changed_when: "'Requirement already satisfied' not in pip_upgrade_old_systems.stdout" + failed_when: false # This task will fail on newer systems, which is expected. + become: yes + become_user: "{{ ansible_user }}" + + - name: Upgrade pip to the latest version (for systems with PEP 668) + ansible.builtin.command: python3 -m pip install --upgrade pip --break-system-packages + when: pip_upgrade_old_systems.rc != 0 and 'externally-managed-environment' in pip_upgrade_old_systems.stderr + changed_when: "'Requirement already satisfied' not in pip_upgrade_new_systems.stdout" + register: pip_upgrade_new_systems + become: yes + become_user: "{{ ansible_user }}" + + - name: Install or upgrade yt-dlp to the latest nightly version + ansible.builtin.command: python3 -m pip install -U --pre "yt-dlp[default]" --break-system-packages + register: ytdlp_install + changed_when: "'Requirement already satisfied' not in ytdlp_install.stdout" + become: yes + become_user: "{{ ansible_user }}" + + - name: Install requests library + ansible.builtin.command: python3 -m pip install requests==2.31.0 --break-system-packages + register: requests_install + changed_when: "'Requirement already satisfied' not in requests_install.stdout" + become: yes + become_user: "{{ ansible_user }}" diff --git a/ansible/playbook-sync-local.yml b/ansible/playbook-sync-local.yml index 472a4d2..7b15bfe 100644 --- a/ansible/playbook-sync-local.yml +++ b/ansible/playbook-sync-local.yml @@ -1,41 +1,22 @@ --- -- name: Sync Local Development Files to Workers - hosts: airflow_workers +- name: Sync Local Development Files to Workers and Master + hosts: airflow_workers, airflow_master gather_facts: no vars_files: - "{{ inventory_dir }}/group_vars/all/generated_vars.yml" + vars: + sync_dir: "{{ airflow_worker_dir if 'airflow_workers' in group_names else airflow_master_dir }}" pre_tasks: - name: Announce local sync debug: - msg: "Syncing local dev files to {{ inventory_hostname }} at {{ airflow_worker_dir }}" + msg: "Syncing local dev files to {{ inventory_hostname }} at {{ sync_dir }}" tasks: - - name: Ensure python3-pip is installed - ansible.builtin.apt: - name: python3-pip - state: present - update_cache: yes - become: yes - - - name: Check if yt-dlp is installed - ansible.builtin.command: which yt-dlp - register: ytdlp_check - changed_when: false - failed_when: false - become: yes - become_user: "{{ ansible_user }}" - - - name: Install yt-dlp if not found - ansible.builtin.command: python3 -m pip install -U "yt-dlp[default]" --break-system-packages - when: ytdlp_check.rc != 0 - become: yes - become_user: "{{ ansible_user }}" - - - name: Sync thrift_model directory to workers + - name: Sync thrift_model directory ansible.posix.synchronize: src: ../thrift_model/ - dest: "{{ airflow_worker_dir }}/thrift_model/" + dest: "{{ sync_dir }}/thrift_model/" rsync_opts: - "--delete" - "--exclude=.DS_Store" @@ -46,10 +27,10 @@ become: yes become_user: "{{ ansible_user }}" - - name: Sync pangramia package to workers + - name: Sync pangramia package ansible.posix.synchronize: src: ../pangramia/ - dest: "{{ airflow_worker_dir }}/pangramia/" + dest: "{{ sync_dir }}/pangramia/" rsync_opts: - "--delete" - "--exclude=.DS_Store" @@ -60,10 +41,10 @@ become: yes become_user: "{{ ansible_user }}" - - name: Sync ytops_client directory to workers + - name: Sync ytops_client directory ansible.posix.synchronize: src: ../ytops_client/ - dest: "{{ airflow_worker_dir }}/ytops_client/" + dest: "{{ sync_dir }}/ytops_client/" rsync_opts: - "--delete" - "--exclude=.DS_Store" @@ -74,10 +55,10 @@ become: yes become_user: "{{ ansible_user }}" - - name: Sync policies directory to workers + - name: Sync policies directory ansible.posix.synchronize: src: ../policies/ - dest: "{{ airflow_worker_dir }}/policies/" + dest: "{{ sync_dir }}/policies/" rsync_opts: - "--delete" - "--exclude=.DS_Store" @@ -88,22 +69,33 @@ become: yes become_user: "{{ ansible_user }}" - - name: Ensure bin directory exists on workers for client utilities + - name: Sync ytdlp.json + ansible.posix.synchronize: + src: ../ytdlp.json + dest: "{{ sync_dir }}/ytdlp.json" + perms: yes + become: yes + become_user: "{{ ansible_user }}" + + - name: Ensure bin directory exists for client utilities ansible.builtin.file: - path: "{{ airflow_worker_dir }}/bin" + path: "{{ sync_dir }}/bin" state: directory mode: '0755' become: yes become_user: "{{ ansible_user }}" - - name: Sync client utility scripts to workers + - name: Sync client utility scripts ansible.posix.synchronize: src: "../{{ item }}" - dest: "{{ airflow_worker_dir }}/{{ item }}" + dest: "{{ sync_dir }}/{{ item }}" perms: yes loop: - "cli.config" - "package_client.py" + - "setup.py" - "bin/ytops-client" + - "bin/build-yt-dlp-image" + - "VERSION.client" become: yes become_user: "{{ ansible_user }}" diff --git a/ansible/playbook-worker.yml b/ansible/playbook-worker.yml index c9cc841..cb26110 100644 --- a/ansible/playbook-worker.yml +++ b/ansible/playbook-worker.yml @@ -282,6 +282,120 @@ become: yes become_user: "{{ ansible_user }}" + - name: Install base system packages for tools + ansible.builtin.apt: + name: + - unzip + - wget + - xz-utils + state: present + update_cache: yes + become: yes + + - name: Install required Python packages + ansible.builtin.pip: + name: + - python-dotenv + - aria2p + - tabulate + - redis + - PyYAML + - aiothrift + - PySocks + state: present + extra_args: --break-system-packages + become: yes + + - name: Install pinned Python packages + ansible.builtin.pip: + name: + - brotli==1.1.0 + - certifi==2025.10.05 + - curl-cffi==0.13.0 + - mutagen==1.47.0 + - pycryptodomex==3.23.0 + - secretstorage==3.4.0 + - urllib3==2.5.0 + - websockets==15.0.1 + state: present + extra_args: --break-system-packages + become: yes + + - name: Upgrade yt-dlp and bgutil provider + ansible.builtin.shell: | + set -e + python3 -m pip install -U --pre "yt-dlp[default,curl-cffi]" --break-system-packages + python3 -m pip install --no-cache-dir -U bgutil-ytdlp-pot-provider --break-system-packages + args: + warn: false + become: yes + changed_when: true + + - name: Check for FFmpeg + stat: + path: /usr/local/bin/ffmpeg + register: ffmpeg_binary + become: yes + + - name: Install FFmpeg + when: not ffmpeg_binary.stat.exists + become: yes + block: + - name: Create ffmpeg directory + ansible.builtin.file: + path: /opt/ffmpeg + state: directory + mode: '0755' + + - name: Download and unarchive FFmpeg + ansible.builtin.unarchive: + src: "https://github.com/yt-dlp/FFmpeg-Builds/releases/download/latest/ffmpeg-master-latest-linux64-gpl.tar.xz" + dest: /opt/ffmpeg + remote_src: yes + extra_opts: [--strip-components=1] + + - name: Symlink ffmpeg and ffprobe + ansible.builtin.file: + src: "/opt/ffmpeg/bin/{{ item }}" + dest: "/usr/local/bin/{{ item }}" + state: link + force: yes + loop: + - ffmpeg + - ffprobe + + - name: Check for Deno + stat: + path: /usr/local/bin/deno + register: deno_binary + become: yes + + - name: Install Deno + when: not deno_binary.stat.exists + become: yes + block: + - name: Download and unarchive Deno + ansible.builtin.unarchive: + src: https://github.com/denoland/deno/releases/latest/download/deno-x86_64-unknown-linux-gnu.zip + dest: /usr/local/bin/ + remote_src: yes + mode: '0755' + + - name: Check if ytops_client requirements.txt exists + stat: + path: "{{ airflow_worker_dir }}/ytops_client/requirements.txt" + register: ytops_client_reqs + become: yes + become_user: "{{ ansible_user }}" + + - name: Install dependencies from ytops_client/requirements.txt + ansible.builtin.pip: + requirements: "{{ airflow_worker_dir }}/ytops_client/requirements.txt" + state: present + extra_args: --break-system-packages + when: ytops_client_reqs.stat.exists + become: yes + # Include Docker health check - name: Include Docker health check tasks include_tasks: tasks/docker_health_check.yml diff --git a/bin/build-yt-dlp-image b/bin/build-yt-dlp-image new file mode 100755 index 0000000..f3f6858 --- /dev/null +++ b/bin/build-yt-dlp-image @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Script to build and tag the yt-dlp Docker image. + +set -e + +SCRIPT_DIR=$(dirname "$(realpath "$0")") +PROJECT_ROOT=$(realpath "$SCRIPT_DIR/..") +DOCKERFILE_DIR="$PROJECT_ROOT/ytops_client/youtube-dl" +IMAGE_NAME=${1:-"ytops/yt-dlp"} + +# The default version is 'latest'. If a release version file exists, use that for tagging. +VERSION="latest" +VERSION_FILE="$DOCKERFILE_DIR/release-versions/latest.txt" + +if [ -f "$VERSION_FILE" ]; then + VERSION=$(cat "$VERSION_FILE") + echo "Found version: $VERSION from $VERSION_FILE" +fi + +echo "Building Docker image: $IMAGE_NAME:$VERSION" +echo "Dockerfile location: $DOCKERFILE_DIR" + +docker build -t "$IMAGE_NAME:$VERSION" "$DOCKERFILE_DIR" + +if [ "$VERSION" != "latest" ]; then + echo "Also tagging as: $IMAGE_NAME:latest" + docker tag "$IMAGE_NAME:$VERSION" "$IMAGE_NAME:latest" +fi + +echo "Build complete." +echo "Image tags created:" +echo " - $IMAGE_NAME:$VERSION" +if [ "$VERSION" != "latest" ]; then + echo " - $IMAGE_NAME:latest" +fi diff --git a/bin/ytops-client b/bin/ytops-client index 46138aa..513fd14 100755 --- a/bin/ytops-client +++ b/bin/ytops-client @@ -1,10 +1,14 @@ -#!/bin/sh -set -e -# Find the directory where this script is located. -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -# Go up one level to the project root. -PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" -# Set PYTHONPATH to include the project root, so we can import 'ytops_client' -export PYTHONPATH="$PROJECT_ROOT${PYTHONPATH:+:$PYTHONPATH}" -# Execute the Python CLI script as a module to handle relative imports -exec python3 -m ytops_client.cli "$@" +#!/usr/bin/env python3 +import os +import sys + +# Ensure the project root is in the Python path +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +PROJECT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, '..')) +if PROJECT_ROOT not in sys.path: + sys.path.insert(0, PROJECT_ROOT) + +from ytops_client.cli import main + +if __name__ == "__main__": + sys.exit(main()) diff --git a/cli.auth.config b/cli.auth.config new file mode 100644 index 0000000..7bb7f64 --- /dev/null +++ b/cli.auth.config @@ -0,0 +1,17 @@ +# This is a yt-dlp configuration file. +# It contains one command-line option per line. + +#--no-progress +--format-sort "res,ext:mp4:m4a" +--recode-video mp4 +--no-playlist +--no-overwrites +--continue +--output "%(extractor)s - %(title)s.%(ext)s" +--no-mtime +--verbose +#--simulate +# Performance options +#--no-resize-buffer +#--buffer-size 4M +#--concurrent-fragments 8 diff --git a/cli.config b/cli.config index 2703daa..7bb7f64 100644 --- a/cli.config +++ b/cli.config @@ -1,41 +1,17 @@ -# yt-dlp configuration for format_download.py +# This is a yt-dlp configuration file. +# It contains one command-line option per line. -# Continue on broken downloads -#--continue - -# Do not simulate ---no-simulate - -# Do not write info.json file (we already have it) ---no-write-info-json - -# Continue on download errors ---ignore-errors - -# Do not download playlist +#--no-progress +--format-sort "res,ext:mp4:m4a" +--recode-video mp4 --no-playlist - -# Retry fragments 10 times ---fragment-retries 10 - -# Use a fixed buffer size to stabilize throughput and avoid traffic shaping ---no-resize-buffer ---buffer-size 4M - -# Socket timeout ---socket-timeout 15 - -# Sleep interval ---min-sleep-interval 5 ---max-sleep-interval 10 - -# Progress ---progress - -# Merge to mp4 by default ---merge-output-format mp4 - -# Don't use "NA" in filenames if metadata is missing ---output-na-placeholder "" - ---no-part +--no-overwrites +--continue +--output "%(extractor)s - %(title)s.%(ext)s" +--no-mtime +--verbose +#--simulate +# Performance options +#--no-resize-buffer +#--buffer-size 4M +#--concurrent-fragments 8 diff --git a/cli.download.config b/cli.download.config new file mode 100644 index 0000000..7bb7f64 --- /dev/null +++ b/cli.download.config @@ -0,0 +1,17 @@ +# This is a yt-dlp configuration file. +# It contains one command-line option per line. + +#--no-progress +--format-sort "res,ext:mp4:m4a" +--recode-video mp4 +--no-playlist +--no-overwrites +--continue +--output "%(extractor)s - %(title)s.%(ext)s" +--no-mtime +--verbose +#--simulate +# Performance options +#--no-resize-buffer +#--buffer-size 4M +#--concurrent-fragments 8 diff --git a/policies/10_direct_docker_auth_simulation.yaml b/policies/10_direct_docker_auth_simulation.yaml new file mode 100644 index 0000000..f0dac53 --- /dev/null +++ b/policies/10_direct_docker_auth_simulation.yaml @@ -0,0 +1,119 @@ +# Policy: Continuous Authentication Simulation via Direct Docker Exec +# +# This policy simulates a continuous stream of info.json fetch requests using +# the 'direct_docker_cli' mode. It calls a yt-dlp command inside a running +# Docker container, passing in a batch file and configuration. +# +# It uses a pool of managed profiles, locking one for each BATCH of requests. +# The host orchestrator prepares files, and docker exec runs yt-dlp. The container +# itself does not need to be Redis-aware. +# +name: direct_docker_auth_simulation + +settings: + mode: fetch_only + orchestration_mode: direct_docker_cli + profile_mode: from_pool_with_lock + urls_file: "inputfiles/urls.sky3.txt" + # The save directory MUST be inside the docker_host_mount_path for the download + # simulation to be able to find the files. + save_info_json_dir: "run/docker_mount/fetched_info_jsons/direct_docker_simulation" + +execution_control: + workers: 1 + # How long a worker should pause if it cannot find an available profile to lock. + worker_polling_interval_seconds: 1 + # No sleep between tasks; throughput is controlled by yt-dlp performance and profile availability. + +info_json_generation_policy: + profile_prefix: "user1" + +direct_docker_cli_policy: + # Which simulation environment's profiles to use for locking. + use_profile_env: "auth" + + # If true, a worker will try to lock a different profile than the one it just used. + avoid_immediate_profile_reuse: true + # How long the worker should wait for a different profile before re-using the same one. + avoid_reuse_max_wait_seconds: 5 + + # NOTE on Rate Limits: With the default yt-dlp settings, the rate limit for guest + # sessions is ~300 videos/hour (~1000 webpage/player requests per hour). + # For accounts, it is ~2000 videos/hour (~4000 webpage/player requests per hour). + # The enforcer policy (e.g., 8_unified_simulation_enforcer.yaml) should be + # configured to respect these limits via rotation and rest periods. + + # If true, extract the visitor_id from yt-dlp logs, save it per-profile, + # and inject it into subsequent requests for that profile. + #track_visitor_id: true + + # --- Docker Execution Settings --- + docker_image_name: "ytops/yt-dlp" # Image to use for `docker run` + docker_network_name: "airflow_proxynet" + # IMPORTANT: This path on the HOST will be mounted into the container at `docker_container_mount_path`. + docker_host_mount_path: "run/docker_mount" + docker_container_mount_path: "/config" # The mount point inside the container + + # Host path for persisting cache data (e.g., cookies, sigfuncs) between runs. + docker_host_cache_path: ".cache/direct_docker_simulation" + # Path inside the container where the cache is mounted. Should match HOME/.cache + docker_container_cache_path: "/config/.cache" + + # If true, create and use a persistent cookie jar per profile inside the cache dir. + # use_cookies: true + + # --- User-Agent Generation --- + # Template for generating User-Agent strings for new profiles. + # The '{major_version}' will be replaced by a version string. + user_agent_template: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{major_version}.0.0.0 Safari/537.36" + # Range of Chrome major versions to use for the template. A range suitable for TV devices. + user_agent_version_range: [110, 120] + + batch_size: 25 + + # A base config file can be used, with overrides applied from the policy. + # The orchestrator will inject 'proxy', 'batch-file', and 'output' keys into the overrides. + ytdlp_config_file: "cli.auth.config" + ytdlp_config_overrides: + skip-download: true + write-info-json: true + no-write-subs: true + no-color: true + ignore-errors: true + use-extractors: ["youtube"] + + ytdlp_raw_args: + - '--extractor-args "youtube:formats=duplicate;jsc_trace=true;player_client=tv_simply;pot_trace=true;skip=translated_subs,hls"' + - '--extractor-args "youtubepot-bgutilhttp:base_url=http://172.17.0.1:4416"' + - '--sleep-requests 0.75' + # --retry-sleep linear=1::2' + + # --- Live Error Parsing Rules --- + # These regex patterns are checked against yt-dlp's stderr in real-time. + # If a fatal error is detected, immediately ban the profile to stop the container + # and prevent further errors in the same batch. + ban_on_fatal_error_in_batch: true + fatal_error_patterns: + - "Sign in to confirm you’re not a bot" + - "rate-limited by YouTube" + - "This content isn't available, try again later" + - "HTTP Error 502" + + tolerated_error_patterns: + - "HTTP Error 429" + - "The uploader has not made this video available in your country" + - "This video has been removed by the uploader" + - "Private video" + - "This is a private video" + - "Video is private" + - "Video unavailable" + - "account associated with this video has been terminated" + - "members-only content" + - "Sign in to confirm your age" + + # Template for renaming the final info.json. + rename_file_template: "{video_id}-{profile_name}-{proxy}.info.json" + +simulation_parameters: + auth_env: "sim_auth" + download_env: "sim_download" diff --git a/policies/11_direct_docker_download_simulation.yaml b/policies/11_direct_docker_download_simulation.yaml new file mode 100644 index 0000000..811b684 --- /dev/null +++ b/policies/11_direct_docker_download_simulation.yaml @@ -0,0 +1,104 @@ +# Policy: Continuous Download Simulation via Direct Docker Exec +# +# This policy simulates a continuous stream of downloads using the +# 'direct_docker_cli' mode with `mode: download_only`. It finds task files +# (info.jsons) in a directory and invokes a yt-dlp command inside a running +# Docker container to perform the download. +# +name: direct_docker_download_simulation + +settings: + mode: download_only + orchestration_mode: direct_docker_cli + profile_mode: from_pool_with_lock + # This directory should contain info.json files generated by an auth simulation, + # like `10_direct_docker_auth_simulation`. + # It MUST be inside the docker_host_mount_path. + info_json_dir: "run/docker_mount/fetched_info_jsons/direct_docker_simulation" + # Regex to extract the profile name from a task filename. The first capture + # group is used. This is crucial for the task-first locking strategy. + # It looks for a component that starts with 'user' between two hyphens. + profile_extraction_regex: '^.+?-(user[^-]+)-' + +execution_control: + workers: 1 + # How long a worker should pause if it cannot find an available profile or task. + worker_polling_interval_seconds: 1 + +download_policy: + profile_prefix: "user1" + # Default cooldown in seconds if not specified by the enforcer in Redis. + # The value from Redis (set via `unlock_cooldown_seconds` in the enforcer policy) + # will always take precedence. This is a fallback. + # Can be an integer (e.g., 1) or a range (e.g., [1, 3]). + default_unlock_cooldown_seconds: 1 + # If true, check if the download URL in the info.json is expired before + # attempting to download. This is enabled by default. + check_url_expiration: true + # --- Airflow Integration --- + # If true, move downloaded media and info.json to a timestamped, video-id-based + # directory structure that the Airflow DAGs can process. + output_to_airflow_ready_dir: true + airflow_ready_dir_base_path: "downloadfiles/videos/ready" + +simulation_parameters: + download_env: "sim_download" + +direct_docker_cli_policy: + # Which simulation environment's profiles to use for locking. + use_profile_env: "download" + + # If true, a worker will try to lock a different profile than the one it just used. + # This is disabled for downloads, as the cooldown mechanism is sufficient. + avoid_immediate_profile_reuse: false + # How long the worker should wait for a different profile before re-using the same one. + avoid_reuse_max_wait_seconds: 5 + + # NOTE on Rate Limits: With the default yt-dlp settings, the rate limit for guest + # sessions is ~300 videos/hour (~1000 webpage/player requests per hour). + # For accounts, it is ~2000 videos/hour (~4000 webpage/player requests per hour). + # This enforcer policy should be configured to respect these limits via + # rotation and rest periods. + + # --- Docker Execution Settings --- + docker_image_name: "ytops/yt-dlp" + docker_network_name: "airflow_proxynet" + # Host path mounted into the container for task files (info.json, config). + # IMPORTANT: This must be the SAME host path used for the `info_json_dir` above, + # or a parent directory of it, so the container can see the task files. + docker_host_mount_path: "run/docker_mount" + docker_container_mount_path: "/config" + + # Path on the HOST where downloaded files will be saved. + docker_host_download_path: "downloaded_media/direct_docker_simulation" + # Path inside the CONTAINER where `docker_host_download_path` is mounted. + docker_container_download_path: "/downloads" + + # A base config file can be used, with overrides applied from the policy. + # The orchestrator will inject 'proxy', 'load-info-json', and 'output' keys into the overrides. + ytdlp_config_file: "cli.download.config" + ytdlp_config_overrides: + format: "299-dashy/298-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy,140-dashy/140-dashy-0/140" + #format: "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best" + no-resize-buffer: true + buffer-size: "4M" + concurrent-fragments: 8 + + ytdlp_raw_args: [] + + # --- Live Error Parsing Rules --- + # If a fatal error is detected, immediately ban the profile to stop the container. + ban_on_fatal_error_in_batch: true + fatal_error_patterns: + - "HTTP Error 403" + - "HTTP Error 502" + + tolerated_error_patterns: + - "timed out" + - "Timeout" + - "connection reset by peer" + - "Invalid data found when processing input" + - "Error opening input files" + +simulation_parameters: + download_env: "sim_download" diff --git a/policies/1_fetch_only_policies.yaml b/policies/1_fetch_only_policies.yaml deleted file mode 100644 index 50d3ab8..0000000 --- a/policies/1_fetch_only_policies.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# This file contains policies for testing only the info.json generation step. -# No downloads are performed. - ---- -# Policy: Basic fetch-only test for a TV client. -# This policy uses a single, static profile and has a rate limit to avoid being -# too aggressive. It saves the generated info.json files to a directory. -name: tv_downgraded_single_profile - -settings: - mode: fetch_only - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/tv_downgraded" - # Use a single, static profile for all requests. - profile_prefix: "tv_downgraded_user" - profile_mode: per_worker # With 1 worker, this is effectively a single profile. - -execution_control: - run_until: { cycles: 1 } - workers: 1 - sleep_between_tasks: { min_seconds: 5, max_seconds: 10 } - -info_json_generation_policy: - client: tv_downgraded - # Safety rate limit: 450 requests per hour (7.5 req/min) - rate_limits: - per_ip: { max_requests: 450, per_minutes: 60 } - ---- -# Policy: Fetch-only test for an Android client using a cookie file. -# This demonstrates how to pass a cookie file for authenticated requests. -# It uses a single profile and stops if it encounters too many errors. -name: android_sdkless_with_cookies - -settings: - mode: fetch_only - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/android_sdkless" - profile_prefix: "android_user_with_cookies" - profile_mode: per_worker - -execution_control: - run_until: { cycles: 1 } # Run through the URL list once. - workers: 1 - sleep_between_tasks: { min_seconds: 2, max_seconds: 4 } - -info_json_generation_policy: - client: android_sdkless - # Pass per-request parameters. This is how you specify a cookie file. - request_params: - cookies_file_path: "/path/to/your/android_cookies.txt" - -stop_conditions: - # Stop if we get more than 5 errors in any 10-minute window. - on_error_rate: { max_errors: 5, per_minutes: 10 } - ---- -# Policy: TV Fetch with Profile Cooldown (Pipeline Stage 1) -# Fetches info.json files using the 'tv' client. Each profile is limited -# to a certain number of requests before it is put into a cooldown period. -# The output of this policy is intended to be used by a 'download_only' policy. -name: tv_fetch_with_cooldown - -settings: - mode: fetch_only - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - # Save the generated files to this directory for the download task to find. - save_info_json_dir: "live_jsons_tv" - profile_management: - prefix: "tv_user" - initial_pool_size: 10 - auto_expand_pool: true - max_requests_per_profile: 60 - sleep_minutes_on_exhaustion: 60 - -execution_control: - run_until: { cycles: 1 } - workers: 1 - sleep_between_tasks: { min_seconds: 2, max_seconds: 5 } - -info_json_generation_policy: - client: "tv" - request_params: - context_reuse_policy: { enabled: true, max_age_seconds: 86400 } - ---- -# Policy: MWeb with client rotation and rate limits. -# This demonstrates a more complex scenario with multiple clients and strict -# rate limiting, useful for simulating sophisticated user behavior. -name: mweb_client_rotation_and_rate_limits - -settings: - mode: fetch_only - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - # Use the dynamic profile pool management system. - profile_management: - prefix: "mweb_user" - initial_pool_size: 10 - max_requests_per_profile: 100 - sleep_minutes_on_exhaustion: 15 - -execution_control: - run_until: { cycles: 1 } - workers: 10 - sleep_between_tasks: { min_seconds: 2, max_seconds: 5 } - -info_json_generation_policy: - # Enforce strict rate limits for both the entire IP and each individual profile. - rate_limits: - per_ip: { max_requests: 120, per_minutes: 10 } - per_profile: { max_requests: 10, per_minutes: 10 } - - # Rotate between a primary client (mweb) and a refresh client (web_camoufox) - # to keep sessions fresh. - client_rotation_policy: - major_client: "mweb" - major_client_params: - context_reuse_policy: { enabled: true, max_age_seconds: 1800 } - refresh_client: "web_camoufox" - refresh_every: { requests: 20, minutes: 10 } - ---- -# Policy: TV Simply, fetch-only test with per-worker profile rotation. -# Fetches info.json using tv_simply with multiple workers. Each worker gets a -# unique profile that is retired and replaced with a new generation after a -# set number of requests. -name: tv_simply_fetch_rotation - -settings: - mode: fetch_only - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/tv_simply_rotation" - # Use the modern profile management system. - profile_mode: per_worker_with_rotation - profile_management: - prefix: "tv_simply_user" - # Rotate to a new profile generation after 250 requests. - max_requests_per_profile: 250 - -execution_control: - run_until: { cycles: 1 } # Run through the URL list once. - workers: 8 # Run with 8 parallel workers. - sleep_between_tasks: { min_seconds: 2, max_seconds: 5 } - # Optional: Override the assumed time for a fetch task to improve rate estimation. - # The default is 3 seconds for fetch_only mode. - # assumptions: - # fetch_task_duration: 2.5 - -info_json_generation_policy: - client: tv_simply diff --git a/policies/2_download_only_policies.yaml b/policies/2_download_only_policies.yaml deleted file mode 100644 index a5feb54..0000000 --- a/policies/2_download_only_policies.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# This file contains policies for testing only the download step from -# existing info.json files. No new info.json files are generated. - ---- -# Policy: Basic profile-aware download test. -# This policy reads info.json files from a directory, groups them by a profile -# name extracted from the filename, and downloads them using multiple workers. -# Each worker handles one or more profiles sequentially. -name: basic_profile_aware_download - -settings: - mode: download_only - info_json_dir: "prefetched_info_jsons" - # Regex to extract profile names from filenames like '...-VIDEOID-my_profile_name.json'. - profile_extraction_regex: ".*-[a-zA-Z0-9_-]{11}-(.+)\\.json" - -execution_control: - run_until: { cycles: 1 } - # 'auto' sets workers to the number of profiles, capped by auto_workers_max. - workers: auto - auto_workers_max: 8 - # This sleep applies between each file downloaded by a single profile. - sleep_between_tasks: { min_seconds: 1, max_seconds: 2 } - -download_policy: - formats: "18,140,299/298/137/136/135/134/133" - downloader: "aria2c" - downloader_args: "aria2c:-x 4 -k 1M" - extra_args: "--cleanup --output-dir /tmp/downloads" - # This sleep applies between formats of a single video. - sleep_between_formats: { min_seconds: 0, max_seconds: 0 } - ---- -# Policy: Continuous download from a folder (Pipeline Stage 2). -# This policy watches a directory for new info.json files and processes them -# as they appear. It is designed to work as the second stage of a pipeline, -# consuming files generated by a 'fetch_only' policy like 'tv_fetch_with_cooldown'. -name: continuous_watch_download - -settings: - mode: download_only - info_json_dir: "live_info_jsons" - directory_scan_mode: continuous - mark_processed_files: true # Rename files to *.processed to avoid re-downloading. - max_files_per_cycle: 50 # Process up to 50 new files each time it checks. - sleep_if_no_new_files_seconds: 15 - -execution_control: - # Note: For 'continuous' mode, a time-based run_until (e.g., {minutes: 120}) - # is more typical. {cycles: 1} will cause it to scan the directory once - # for new files, process them, and then exit. - run_until: { cycles: 1 } - workers: 4 # Use a few workers to process files in parallel. - sleep_between_tasks: { min_seconds: 0, max_seconds: 0 } - -download_policy: - formats: "18,140" - extra_args: "--cleanup --output-dir /tmp/downloads" diff --git a/policies/3_full_stack_policies.yaml b/policies/3_full_stack_policies.yaml deleted file mode 100644 index 5ac43a0..0000000 --- a/policies/3_full_stack_policies.yaml +++ /dev/null @@ -1,158 +0,0 @@ -# This file contains policies for full-stack tests, which include both -# info.json generation and the subsequent download step. - ---- -# Policy: TV client with profile rotation. -# This test uses multiple parallel workers. Each worker gets its own profile -# that is automatically rotated (e.g., from tv_user_0_0 to tv_user_0_1) after -# a certain number of requests to simulate user churn. -name: tv_simply_profile_rotation - -settings: - mode: full_stack - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/tv_simply_rotation" - # Use the modern profile management system. - profile_mode: per_worker_with_rotation - profile_management: - prefix: "tv_simply" - # Rotate to a new profile generation after 250 requests. - max_requests_per_profile: 250 - -execution_control: - run_until: { cycles: 1 } - workers: 8 # Run with 8 parallel workers. - sleep_between_tasks: { min_seconds: 2, max_seconds: 5 } - # Optional: Override assumptions to improve rate estimation. - # assumptions: - # fetch_task_duration: 10 # Est. seconds to get info.json - # download_task_duration: 20 # Est. seconds to download all formats for one video - -info_json_generation_policy: - client: tv_simply - -download_policy: - formats: "18,140" - extra_args: "--cleanup --output-dir downloads/tv_simply_rotation" - proxy: "socks5://127.0.0.1:1087" - downloader: "aria2c" - downloader_args: "aria2c:-x 8 -k 1M" - sleep_between_formats: { min_seconds: 2, max_seconds: 2 } - -stop_conditions: - on_cumulative_403: { max_errors: 5, per_minutes: 2 } - ---- -# Policy: TV Simply, full-stack test with per-worker profile rotation. -# Generates info.json using tv_simply and immediately attempts to download. -# This combines the fetch and download steps into a single workflow. -name: tv_simply_full_stack_rotation - -settings: - mode: full_stack - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - profile_mode: per_worker_with_rotation - profile_management: - prefix: "tv_simply_worker" - max_requests_per_profile: 240 - -execution_control: - workers: 10 - run_until: { cycles: 1 } - sleep_between_tasks: { min_seconds: 5, max_seconds: 5 } - -info_json_generation_policy: - client: "tv_simply" - request_params: - context_reuse_policy: { enabled: false } - -download_policy: - formats: "18,140" - extra_args: "--output-dir downloads/tv_simply_downloads" - ---- -# Policy: MWeb client with multiple profiles, each with its own cookie file. -# This demonstrates how to run an authenticated test with a pool of accounts. -# The orchestrator will cycle through the cookie files, assigning one to each profile. -name: mweb_multi_profile_with_cookies - -settings: - mode: full_stack - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - # Use the dynamic profile pool management system. - profile_management: - prefix: "mweb_user" - initial_pool_size: 3 # Start with 3 profiles. - auto_expand_pool: true # Create new profiles if the initial 3 are all rate-limited. - max_requests_per_profile: 100 # Let each profile make 100 requests... - sleep_minutes_on_exhaustion: 15 # ...then put it to sleep for 15 minutes. - # Assign a different cookie file to each profile in the pool. - # The tool will cycle through this list. - cookie_files: - - "/path/to/your/mweb_cookies_0.txt" - - "/path/to/your/mweb_cookies_1.txt" - - "/path/to/your/mweb_cookies_2.txt" - -execution_control: - run_until: { cycles: 1 } - workers: 3 # Match workers to the number of initial profiles. - sleep_between_tasks: { min_seconds: 1, max_seconds: 3 } - -info_json_generation_policy: - client: mweb - # This client uses youtubei.js, which generates PO tokens. - -download_policy: - formats: "18,140" - extra_args: "--cleanup --output-dir /tmp/downloads" - ---- -# Policy: TV client with profile rotation and aria2c RPC download. -# This test uses multiple parallel workers. Each worker gets its own profile -# that is automatically rotated. Downloads are submitted to an aria2c daemon -# via its RPC interface. -name: tv_simply_profile_rotation_aria2c_rpc - -settings: - mode: full_stack - urls_file: "urls.txt" - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/tv_simply_rotation_aria" - profile_mode: per_worker_with_rotation - profile_management: - prefix: "tv_simply_aria" - max_requests_per_profile: 250 - -execution_control: - run_until: { cycles: 1 } - workers: 8 - sleep_between_tasks: { min_seconds: 2, max_seconds: 5 } - -info_json_generation_policy: - client: tv_simply - -download_policy: - formats: "18,140" - # Use the aria2c RPC downloader - downloader: "aria2c_rpc" - # RPC server connection details - aria_host: "localhost" - aria_port: 6800 - # aria_secret: "your_secret" # Uncomment and set if needed - # Set to true to wait for each download and get a success/fail result. - # This is the default and recommended for monitoring success/failure. - # Set to false for maximum submission throughput ("fire-and-forget"), - # but you will lose per-download status reporting. - aria_wait: true - # The output directory is on the aria2c host machine - output_dir: "/downloads/tv_simply_rotation_aria" - # Pass custom arguments to aria2c in yt-dlp format for better performance. - # -x: max connections per server, -k: min split size. - downloader_args: "aria2c:[-x 8, -k 1M]" - sleep_between_formats: { min_seconds: 1, max_seconds: 2 } - -stop_conditions: - on_cumulative_403: { max_errors: 5, per_minutes: 2 } diff --git a/policies/4_custom_scenarios.yaml b/policies/4_custom_scenarios.yaml deleted file mode 100644 index 8d648d2..0000000 --- a/policies/4_custom_scenarios.yaml +++ /dev/null @@ -1,126 +0,0 @@ -# This file contains custom policies for specific testing scenarios. - ---- -# Policy: Fetch info.json with visitor ID rotation. -# This policy uses a single worker to fetch info.json files for a list of URLs. -# It simulates user churn by creating a new profile (and thus a new visitor_id and POT) -# every 250 requests. A short sleep is used between requests. -name: fetch_with_visitor_id_rotation - -settings: - mode: fetch_only - urls_file: "urls.txt" # Placeholder, should be overridden with --set - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/visitor_id_rotation" - # Use the modern profile management system to rotate visitor_id. - profile_mode: per_worker_with_rotation - profile_management: - prefix: "visitor_rotator" - # Rotate to a new profile generation after 250 requests. - max_requests_per_profile: 250 - -execution_control: - run_until: { cycles: 1 } # Run through the URL list once. - workers: 1 # Run with a single worker thread. - # A short, fixed sleep between each info.json request. - sleep_between_tasks: { min_seconds: 0.75, max_seconds: 0.75 } - -info_json_generation_policy: - # Use a standard client. The server will handle token generation. - client: tv_simply - ---- -# Policy: Full-stack test with visitor ID rotation and test download. -# This policy uses a single worker to fetch info.json files for a list of URLs, -# and then immediately performs a test download (first 10KB) of specified formats. -# It simulates user churn by creating a new profile (and thus a new visitor_id and POT) -# every 250 requests. A short sleep is used between requests. -name: full_stack_with_visitor_id_rotation - -settings: - mode: full_stack - urls_file: "urls.txt" # Placeholder, should be overridden with --set - info_json_script: "bin/ytops-client get-info" - # Use the modern profile management system to rotate visitor_id. - profile_mode: per_worker_with_rotation - profile_management: - prefix: "visitor_rotator" - # Rotate to a new profile generation after 250 requests. - max_requests_per_profile: 250 - -execution_control: - run_until: { cycles: 1 } # Run through the URL list once. - workers: 1 # Run with a single worker thread. - # A short, fixed sleep between each info.json request. - sleep_between_tasks: { min_seconds: 0.75, max_seconds: 0.75 } - -info_json_generation_policy: - # Use a standard client. The server will handle token generation. - client: tv_simply - -download_policy: - formats: "299-dashy/298-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy" - downloader: "native-py" - extra_args: '--test --cleanup' - output_dir: "downloads/fetch_and_test" - sleep_between_formats: { min_seconds: 6, max_seconds: 6 } - ---- -# Policy: Download-only test from a fetch folder (Batch Mode). -# This policy scans a directory of existing info.json files once, and performs -# a test download (first 10KB) for specific formats. It is designed to run as -# a batch job after a 'fetch_only' policy has completed. -name: download_only_test_from_fetch_folder - -settings: - mode: download_only - # Directory of info.json files to process. - info_json_dir: "fetched_info_jsons/visitor_id_rotation" # Assumes output from 'fetch_with_visitor_id_rotation' - -execution_control: - run_until: { cycles: 1 } # Run through the info.json directory once. - workers: 1 # Run with a single worker thread. - # A longer, randomized sleep between processing each info.json file. - sleep_between_tasks: { min_seconds: 5, max_seconds: 10 } - -download_policy: - # A specific list of video-only DASH formats to test. - formats: "299-dashy/298-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy" - downloader: "native-py" - # Pass extra arguments to perform a "test" download. - extra_args: '--test --cleanup' - output_dir: "downloads/dash_test" - ---- -# Policy: Live download from a watch folder (Continuous Mode). -# This policy continuously watches a directory for new info.json files and -# processes them as they appear. It is designed to work as the second stage -# of a pipeline, consuming files generated by a 'fetch_only' policy. -name: live_download_from_watch_folder - -settings: - mode: download_only - info_json_dir: "live_info_json" # A different directory for the live pipeline - directory_scan_mode: continuous - mark_processed_files: true # Rename files to *.processed to avoid re-downloading. - max_files_per_cycle: 50 # Process up to 50 new files each time it checks. - sleep_if_no_new_files_seconds: 15 - -execution_control: - # For 'continuous' mode, a time-based run_until is typical. - # {cycles: 1} will scan once, process new files, and exit. - # To run for 2 hours, for example, use: run_until: { minutes: 120 } - run_until: { cycles: 1 } - workers: 4 # Use a few workers to process files in parallel. - # sleep_between_tasks controls the pause between processing different info.json files. - # To pause before each download attempt starts, use 'pause_before_download_seconds' - # in the download_policy section below. - sleep_between_tasks: { min_seconds: 0, max_seconds: 0 } - -download_policy: - formats: "299-dashy/298-dashy/137-dashy/136-dashy/135-dashy/134-dashy/133-dashy" - downloader: "native-py" - # Example: Pause for a few seconds before starting each download attempt. - # pause_before_download_seconds: 2 - extra_args: '--test --cleanup' - output_dir: "downloads/live_dash_test" diff --git a/policies/5_ban_test_policies.yaml b/policies/5_ban_test_policies.yaml deleted file mode 100644 index a901fce..0000000 --- a/policies/5_ban_test_policies.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# This file contains policies for testing ban rates and profile survival -# under high request counts. - ---- -# Policy: Single Profile Ban Test (500 Requests) -# This policy uses a single worker and a single, non-rotating profile to make -# 500 consecutive info.json requests. It is designed to test if and when a -# single profile/visitor_id gets banned or rate-limited by YouTube. -# -# It explicitly disables the server's automatic visitor ID rotation to ensure -# the same identity is used for all requests. -# -# The test will stop if it encounters 3 errors within any 1-minute window, -# or a total of 8 errors within any 60-minute window. -name: single_profile_ban_test_500 - -settings: - mode: fetch_only - urls_file: "urls.txt" # Override with --set settings.urls_file=... - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/ban_test_single_profile" - # Use one worker with one profile that does not rotate automatically. - profile_mode: per_worker_with_rotation - profile_management: - prefix: "ban_test_user" - # Set a high request limit to prevent the orchestrator from rotating the profile. - max_requests_per_profile: 1000 - -execution_control: - run_until: { requests: 500 } # Stop after 500 total requests. - workers: 1 - sleep_between_tasks: { min_seconds: 1, max_seconds: 2 } - -info_json_generation_policy: - client: "tv_simply" # A typical client for this kind of test. - # Explicitly disable the server's visitor ID rotation mechanism. - request_params: - session_params: - visitor_rotation_threshold: 0 - -stop_conditions: - # Stop if we get 3 or more errors in any 1-minute window (rapid failure). - on_error_rate: { max_errors: 3, per_minutes: 1 } - # Stop if we get 8 or more 403 errors in any 60-minute window (ban detection). - on_cumulative_403: { max_errors: 8, per_minutes: 60 } - ---- -# Policy: Multi-Profile Survival Test -# This policy uses 5 parallel workers, each with its own unique profile. -# It tests whether using multiple profiles with the server's default automatic -# visitor ID rotation (every 250 requests) can sustain a high request rate -# without getting banned. -# -# The test will run until 1250 total requests have been made (250 per worker), -# which should trigger one rotation for each profile. -name: multi_profile_survival_test - -settings: - mode: fetch_only - urls_file: "urls.txt" # Override with --set settings.urls_file=... - info_json_script: "bin/ytops-client get-info" - save_info_json_dir: "fetched_info_jsons/ban_test_multi_profile" - # Use 5 workers, each getting its own rotating profile. - profile_mode: per_worker_with_rotation - profile_management: - prefix: "survival_test_user" - # Use the default rotation threshold of 250 requests per profile. - max_requests_per_profile: 250 - -execution_control: - run_until: { requests: 1250 } # 5 workers * 250 requests/rotation = 1250 total. - workers: 5 - sleep_between_tasks: { min_seconds: 1, max_seconds: 2 } - -info_json_generation_policy: - client: "tv_simply" - # No request_params are needed here; we want to use the server's default - # visitor ID rotation behavior. - -stop_conditions: - # Stop if we get 3 or more errors in any 1-minute window (rapid failure). - on_error_rate: { max_errors: 3, per_minutes: 1 } - # Stop if we get 8 or more 403 errors in any 60-minute window (ban detection). - on_cumulative_403: { max_errors: 8, per_minutes: 60 } diff --git a/policies/6_profile_setup_policy.yaml b/policies/6_profile_setup_policy.yaml new file mode 100644 index 0000000..56b47f9 --- /dev/null +++ b/policies/6_profile_setup_policy.yaml @@ -0,0 +1,27 @@ +# Configuration for setting up profiles for a simulation or test run. +# This file is used by the `bin/ytops-client setup-profiles` command. +# It contains separate blocks for authentication and download simulations. + +simulation_parameters: + # --- Common Redis settings for all tools --- + # The environment name ('env') is now specified in each setup block below. + env_file: ".env" # Optional: path to a .env file. + +# --- Profile setup for the AUTHENTICATION simulation --- +auth_profile_setup: + env: "sim_auth" + cleanup_before_run: true + pools: + - prefix: "user1" + proxy: "sslocal-rust-1092:1092" + count: 1 + +# --- Profile setup for the DOWNLOAD simulation --- +download_profile_setup: + env: "sim_download" + cleanup_before_run: true + pools: + - prefix: "user1" + proxy: "sslocal-rust-1092:1092" + count: 1 + diff --git a/policies/8_unified_simulation_enforcer.yaml b/policies/8_unified_simulation_enforcer.yaml new file mode 100644 index 0000000..2c1b9c6 --- /dev/null +++ b/policies/8_unified_simulation_enforcer.yaml @@ -0,0 +1,162 @@ +# Policy for the unified simulation enforcer. +# This file is used by `bin/ytops-client policy-enforcer --live` to manage +# both the authentication and download simulation environments from a single process. + +# Policy for the unified simulation enforcer. +# This file is used by `bin/ytops-client policy-enforcer --live` to manage +# both the authentication and download simulation environments from a single process. + +simulation_parameters: + # --- Common Redis settings for all tools --- + # The enforcer will connect to two different Redis environments (key prefixes) + # based on these settings, applying the corresponding policies to each. + env_file: ".env" + auth_env: "sim_auth" + download_env: "sim_download" + + # How often the enforcer should wake up and apply all policies. + interval_seconds: 2 + +# --- Policies for the Authentication Simulation --- +auth_policy_enforcer_config: + # Ban if 2 failures occur within a 1-minute window. + #ban_on_failures: 2 + #ban_on_failures_window_minutes: 1 + + # The standard rest policy is disabled, as rotation is handled by the profile group. + profile_prefix: "user1" + + # New rate limit policy to enforce requests-per-hour limits. + # For guest sessions, the limit is ~300 videos/hour. + rate_limit_requests: 280 + rate_limit_window_minutes: 60 + rate_limit_rest_duration_minutes: 5 + + rest_after_requests: 0 + rest_duration_minutes: 10 + + # NOTE on Rate Limits: With the default yt-dlp settings, the rate limit for guest + # sessions is ~300 videos/hour (~1000 webpage/player requests per hour). + # For accounts, it is ~2000 videos/hour (~4000 webpage/player requests per hour). + # The settings below should be configured to respect these limits. + + # A group of profiles that are managed together. + # The enforcer will ensure that no more than `max_active_profiles` from this + # group are in the ACTIVE state at any time. + profile_groups: + - name: "exclusive_auth_profiles" + prefix: "user1" + # Enforce that only 1 profile from this group can be active at a time. + max_active_profiles: 1 + # After an active profile has been used for this many requests, it will be + # rotated out and put into a RESTING state. + rotate_after_requests: 25 + # How long a profile rests after being rotated out. + rest_duration_minutes_on_rotation: 1 + + # If true, no new profile in this group will be activated while another + # one is in the 'waiting_downloads' state. + defer_activation_if_any_waiting: true + + # --- New settings for download wait feature --- + # When a profile is rotated, wait for its generated downloads to finish + # before it can be used again. + wait_download_finish_per_profile: true + # Safety net: max time to wait for downloads before forcing rotation. + # Should be aligned with info.json URL validity (e.g., 4 hours = 240 mins). + max_wait_for_downloads_minutes: 240 + + # Time-based proxy rules are disabled as they are not needed for this setup. + proxy_work_minutes: 0 + proxy_rest_duration_minutes: 0 + + # Global maximum time a proxy can be active before being rested, regardless of + # other rules. Acts as a safety net. Set to 0 to disable. + max_global_proxy_active_minutes: 0 + rest_duration_on_max_active: 10 + + # Proxy-level ban on failure burst is disabled. + proxy_ban_on_failures: 0 + proxy_ban_window_minutes: 2 + + # Clean up locks held for more than 16 minutes (960s) to prevent stuck workers. + # This should be longer than the docker container timeout (15m). + unlock_stale_locks_after_seconds: 960 + + # No post-task cooldown for auth simulation profiles. When a task is finished, + # the profile is immediately returned to the ACTIVE state. + unlock_cooldown_seconds: 0 + +# Cross-simulation synchronization +cross_simulation_sync: + # Link auth profiles to download profiles (by name) + # Both profiles should exist in their respective environments + profile_links: + - auth: "user1" + download: "user1" + # Which states to synchronize + #sync_states: + # - "RESTING" # Disabling to prevent deadlock when auth profile is waiting for downloads. + # The download profile must remain active to process them. + # - "BANNED" + # Whether to sync rotation (when auth is rotated due to rotate_after_requests) + #sync_rotation: true + # Whether download profile should be banned if auth is banned (even if download hasn't violated its own rules) + #enforce_auth_lead: true + # Ensures the same profile (e.g., user1_0) is active in both simulations. + # This will activate the correct download profile and rest any others in its group. + sync_active_profile: true + # When an auth profile is waiting for downloads, ensure the matching download profile is active + sync_waiting_downloads: true + +# --- Policies for the Download Simulation --- +download_policy_enforcer_config: + # Ban if 1 failure occurs within a 1-minute window. + ban_on_failures: 1 + ban_on_failures_window_minutes: 1 + + # Standard rest policy is disabled in favor of group rotation. + profile_prefix: "user1" + + # New rate limit policy to enforce requests-per-hour limits. + # For guest sessions, the limit is ~300 videos/hour. We set it slightly lower to be safe. + rate_limit_requests: 280 + rate_limit_window_minutes: 60 + rate_limit_rest_duration_minutes: 5 + # + rest_after_requests: 0 + rest_duration_minutes: 20 + + # NOTE on Rate Limits: With the default yt-dlp settings, the rate limit for guest + # sessions is ~300 videos/hour (~1000 webpage/player requests per hour). + # For accounts, it is ~2000 videos/hour (~4000 webpage/player requests per hour). + # The settings below should be configured to respect these limits. + + # A group of profiles that are mutually exclusive. Only one will be active at a time. + profile_groups: + - name: "exclusive_download_profiles" + prefix: "user1" + rotate_after_requests: 25 + rest_duration_minutes_on_rotation: 1 + max_active_profiles: 1 + + # Time-based proxy rules are disabled. + proxy_work_minutes: 50 + proxy_rest_duration_minutes: 10 + + # Global maximum time a proxy can be active before being rested, regardless of + # other rules. Acts as a safety net. Set to 0 to disable. + max_global_proxy_active_minutes: 0 + rest_duration_on_max_active: 10 + + # Proxy-level ban on failure burst is disabled. + proxy_ban_on_failures: 3 + proxy_ban_window_minutes: 1 + + # Clean up download locks held for more than 16 minutes (960s) to allow for long downloads. + # This should be longer than the docker container timeout (15m). + unlock_stale_locks_after_seconds: 960 + + # After a profile is used for a download, unlock it but put it in COOLDOWN + # state for 12-16s. This is enforced by the worker, which reads this config from Redis. + unlock_cooldown_seconds: [2, 3] diff --git a/policies/README.md b/policies/README.md deleted file mode 100644 index c590c79..0000000 --- a/policies/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Stress Test Policies - -This directory contains example policy files for the `stress_enhanced.py` orchestrator. Each file defines a specific testing strategy, organized by task type. - -## Authentication & Info.json Policies (`fetch_only` mode) - -These policies focus on testing the info.json generation service. - -- `info_json_rate_limit.yaml`: Tests the service with a focus on rate limits and client rotation. -- `auth_scenarios.yaml`: Contains specific scenarios for fetching info.json files, such as using a low-level command template for full control. - -## Download Policies (`download_only` mode) - -These policies focus on testing the download infrastructure using pre-existing info.json files. - -- `download_throughput.yaml`: Tests download/CDN infrastructure, focusing on throughput and error handling. -- `download_scenarios.yaml`: Contains specific scenarios for downloading, such as testing random formats from a directory of info.json files. - -## Full-Stack Policies (`full_stack` mode) - -These policies test the entire workflow from info.json generation through to downloading. - -- `regular_testing_scenarios.yaml`: Contains a collection of common, end-to-end testing scenarios, including: - - `mweb_per_request_profile`: A high-volume test that uses a new profile for every request. - - `mixed_client_profile_pool`: A complex test that alternates clients and reuses profiles from a pool. -- `tv_pipeline_scenarios.yaml`: A two-stage pipeline for fetching with the TV client and then continuously downloading. - -These files can be used as templates for creating custom test scenarios. diff --git a/setup.py b/setup.py index 9c38fe6..8fb27c9 100644 --- a/setup.py +++ b/setup.py @@ -1,42 +1,52 @@ -from setuptools import setup, find_packages import os -import xml.etree.ElementTree as ET +from setuptools import setup, find_packages -def get_version_from_pom(): - """Parse version from pom.xml""" - here = os.path.abspath(os.path.dirname(__file__)) - pom_path = os.path.join(here, 'thrift_model', 'pom.xml') - tree = ET.parse(pom_path) - root = tree.getroot() - - # XML namespaces - ns = {'mvn': 'http://maven.apache.org/POM/4.0.0'} - - version = root.find('mvn:version', ns).text - if version.endswith('-SNAPSHOT'): - version = version.replace('-SNAPSHOT', '.dev0') - return version +try: + with open(os.path.join(os.path.dirname(__file__), 'VERSION.client')) as f: + version = f.read().strip() +except IOError: + version = "0.0.1.dev0" + print(f"Warning: Could not read VERSION.client, falling back to version '{version}'") + +# find_packages() will automatically discover 'ytops_client' and 'yt_ops_services'. +# We manually add the 'pangramia' packages because they are in a separate directory structure. +pangramia_packages = [ + 'pangramia', + 'pangramia.base_service', + 'pangramia.yt', + 'pangramia.yt.common', + 'pangramia.yt.exceptions', + 'pangramia.yt.management', + 'pangramia.yt.tokens_ops', +] setup( - name='yt_ops_services', - version=get_version_from_pom(), - # find_packages() will now discover 'pangramia' via the symlink. - # 'server_fix' is excluded as it's no longer needed. - packages=find_packages(exclude=['tests*', 'server_fix']), - # package_data is not needed for pom.xml as it's only used at build time. - include_package_data=True, - # Add all dependencies from requirements.txt + name='ytops-client-tools', + version=version, + packages=find_packages(exclude=['thrift_model*', 'tests*']) + pangramia_packages, + package_dir={ + # This tells setuptools that the 'pangramia' package lives inside thrift_model/gen_py + 'pangramia': 'thrift_model/gen_py/pangramia', + }, + entry_points={ + 'console_scripts': [ + 'ytops-client=ytops_client.cli:main', + ], + }, install_requires=[ 'thrift>=0.16.0,<=0.20.0', 'python-dotenv>=1.0.0', 'psutil', 'flask', 'waitress', + 'yt_dlp>=2025.3.27', 'yt-dlp-get-pot==0.3.0', 'requests>=2.31.0', 'ffprobe3', 'redis', 'PySocks', + 'tabulate', + 'PyYAML', ], python_requires='>=3.9', ) diff --git a/tools/generate-inventory.py b/tools/generate-inventory.py index 6c05933..8f527e3 100755 --- a/tools/generate-inventory.py +++ b/tools/generate-inventory.py @@ -37,21 +37,10 @@ def generate_inventory(cluster_config, inventory_path): f.write(line + "\n") def generate_host_vars(cluster_config, host_vars_dir): - """Generate host-specific variables""" + """Generate host-specific variables. This function is non-destructive and will only create or overwrite files for hosts defined in the cluster config.""" # Create host_vars directory if it doesn't exist os.makedirs(host_vars_dir, exist_ok=True) - # Clear existing host_vars files to avoid stale configurations - for filename in os.listdir(host_vars_dir): - file_path = os.path.join(host_vars_dir, filename) - try: - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - except Exception as e: - print(f'Failed to delete {file_path}. Reason: {e}') - # Get master IP for Redis configuration from the new structure master_ip = list(cluster_config['master'].values())[0]['ip'] @@ -89,20 +78,15 @@ def generate_host_vars(cluster_config, host_vars_dir): for proxy in worker_proxies: f.write(f" - \"{proxy}\"\n") -def generate_group_vars(cluster_config, group_vars_dir): +def generate_group_vars(cluster_config, group_vars_path): """Generate group-level variables""" - # Create group_vars directory if it doesn't exist - os.makedirs(group_vars_dir, exist_ok=True) - - # Create group_vars/all directory if it doesn't exist - all_vars_dir = os.path.join(group_vars_dir, "all") + # Create parent directory if it doesn't exist + all_vars_dir = os.path.dirname(group_vars_path) os.makedirs(all_vars_dir, exist_ok=True) - # Define path for the generated file and remove it if it exists to avoid stale data. - # This is safer than removing the whole directory, which would delete vault.yml. - all_vars_file = os.path.join(all_vars_dir, "generated_vars.yml") - if os.path.exists(all_vars_file): - os.remove(all_vars_file) + # Remove the specific generated file if it exists to avoid stale data. + if os.path.exists(group_vars_path): + os.remove(group_vars_path) global_vars = cluster_config.get('global_vars', {}) external_ips = cluster_config.get('external_access_ips', []) @@ -122,7 +106,7 @@ def generate_group_vars(cluster_config, group_vars_dir): } generated_data.update(global_vars) - with open(all_vars_file, 'w') as f: + with open(group_vars_path, 'w') as f: f.write("---\n") f.write("# This file is auto-generated by tools/generate-inventory.py\n") f.write("# Do not edit – your changes will be overwritten.\n") @@ -130,7 +114,7 @@ def generate_group_vars(cluster_config, group_vars_dir): def main(): if len(sys.argv) != 2: - print("Usage: python3 generate-inventory.py ") + print("Usage: ./tools/generate-inventory.py ") sys.exit(1) config_path = sys.argv[1] @@ -139,12 +123,28 @@ def main(): if not os.path.exists(config_path): print(f"Error: Configuration file {config_path} not found") sys.exit(1) + + # Derive environment name from config filename (e.g., cluster.stress.yml -> stress) + base_name = os.path.basename(config_path) + if base_name == 'cluster.yml': + env_name = '' + elif base_name.startswith('cluster.') and base_name.endswith('.yml'): + env_name = base_name[len('cluster.'):-len('.yml')] + else: + print(f"Warning: Unconventional config file name '{base_name}'. Using base name as environment identifier.") + env_name = os.path.splitext(base_name)[0] + + # Define output paths based on environment + inventory_suffix = f".{env_name}" if env_name else "" + inventory_path = f"ansible/inventory{inventory_suffix}.ini" + + vars_suffix = f".{env_name}" if env_name else "" + group_vars_path = f"ansible/group_vars/all/generated_vars{vars_suffix}.yml" # Load cluster configuration cluster_config = load_cluster_config(config_path) # Generate inventory file - inventory_path = "ansible/inventory.ini" generate_inventory(cluster_config, inventory_path) print(f"Generated {inventory_path}") @@ -154,9 +154,8 @@ def main(): print(f"Generated host variables in {host_vars_dir}") # Generate group variables - group_vars_dir = "ansible/group_vars" - generate_group_vars(cluster_config, group_vars_dir) - print(f"Generated group variables in {group_vars_dir}") + generate_group_vars(cluster_config, group_vars_path) + print(f"Generated group variables in {os.path.dirname(group_vars_path)}") print("Inventory generation complete!") diff --git a/ytops_client/check_expiry_tool.py b/ytops_client/check_expiry_tool.py new file mode 100644 index 0000000..54d1ae8 --- /dev/null +++ b/ytops_client/check_expiry_tool.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +""" +Tool to check format URLs in an info.json for expiration. +""" + +import argparse +import json +import sys +import logging +import time +from datetime import datetime, timezone +from urllib.parse import urlparse, parse_qs + +from .stress_policy import utils as sp_utils + +logger = logging.getLogger('check_expiry_tool') + + +def add_check_expiry_parser(subparsers): + """Add the parser for the 'check-expiry' command.""" + parser = subparsers.add_parser( + 'check-expiry', + description='Check format URLs in an info.json for expiration.', + formatter_class=argparse.RawTextHelpFormatter, + help='Check if format URLs in an info.json are expired.', + epilog=""" +Exit Codes: + 0: All checked URLs are valid. + 1: At least one URL is expired or will expire within the specified time-shift. + 3: No URLs with expiration info were found to check. + 4: Input error (e.g., invalid JSON). +""" + ) + parser.add_argument( + '--load-info-json', + type=argparse.FileType('r', encoding='utf-8'), + default=sys.stdin, + help="Path to the info.json file. Reads from stdin if not provided." + ) + parser.add_argument( + '--time-shift-minutes', + type=int, + default=0, + help='Time shift in minutes. URLs expiring within this time are also reported as expired. Default: 0.' + ) + parser.add_argument( + '--check-all-formats', + action='store_true', + help='Check all available formats. By default, only the first format with an expiry timestamp is checked.' + ) + parser.add_argument('--verbose', action='store_true', help='Enable verbose logging.') + return parser + + +def main_check_expiry(args): + """Main logic for the 'check-expiry' command.""" + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s', stream=sys.stderr) + else: + logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s', stream=sys.stderr) + + try: + info_json_content = args.load_info_json.read() + if not info_json_content.strip(): + logger.error("Input is empty.") + return 4 + + info_data = json.loads(info_json_content) + except json.JSONDecodeError: + logger.error("Invalid JSON provided. Please check the input file.") + return 4 + except Exception as e: + logger.error(f"An unexpected error occurred while reading input: {e}", exc_info=args.verbose) + return 4 + + formats = info_data.get('formats', []) + if not formats: + logger.warning("No formats found in the provided info.json.") + return 3 + + overall_status = 'valid' + checked_any = False + min_time_left = float('inf') + worst_status_format_id = None + + for f in formats: + url = f.get('url') + format_id = f.get('format_id', 'N/A') + if not url: + logger.debug(f"Format {format_id} has no URL, skipping.") + continue + + status, time_left = sp_utils.check_url_expiry(url, args.time_shift_minutes) + + if status == 'no_expiry_info': + logger.debug(f"Format {format_id} has no expiration info in URL, skipping.") + continue + + checked_any = True + + if time_left < min_time_left: + min_time_left = time_left + worst_status_format_id = format_id + + # Determine the "worst" status seen so far. Expired > Valid. + if status == 'expired': + overall_status = 'expired' + + if not args.check_all_formats and overall_status != 'valid': + # If we found a problem and we're not checking all, we can stop. + break + + if not args.check_all_formats: + # If we checked one valid format and we're not checking all, we can stop. + break + + if not checked_any: + logger.warning("No formats with expiration timestamps were found to check.") + return 3 + + if overall_status == 'expired': + expire_datetime = datetime.fromtimestamp(time.time() + min_time_left, timezone.utc) + if min_time_left <= 0: + logger.error(f"URL for format '{worst_status_format_id}' is EXPIRED. It expired at {expire_datetime.strftime('%Y-%m-%d %H:%M:%S %Z')}.") + else: + logger.warning(f"URL for format '{worst_status_format_id}' is considered EXPIRED due to time-shift. It will expire in {min_time_left / 60:.1f} minutes (at {expire_datetime.strftime('%Y-%m-%d %H:%M:%S %Z')}).") + return 1 + else: # valid + expire_datetime = datetime.fromtimestamp(time.time() + min_time_left, timezone.utc) + logger.info(f"OK. The soonest-expiring URL (format '{worst_status_format_id}') is valid for another {min_time_left / 60:.1f} minutes (expires at {expire_datetime.strftime('%Y-%m-%d %H:%M:%S %Z')}).") + return 0 diff --git a/ytops_client/check_log_pattern_tool.py b/ytops_client/check_log_pattern_tool.py new file mode 100644 index 0000000..4e02f18 --- /dev/null +++ b/ytops_client/check_log_pattern_tool.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +""" +CLI tool to check a log line against policy error patterns. +""" +import argparse +import re +import sys +import yaml +import os +from .stress_policy.utils import load_policy + +def add_check_log_pattern_parser(subparsers): + """Adds the parser for the 'check-log-pattern' command.""" + parser = subparsers.add_parser( + 'check-log-pattern', + help='Check a log line against policy error patterns.', + description='Checks a given log line against the fatal and tolerated error patterns defined in a policy file to determine how it would be classified.' + ) + parser.add_argument('--policy', required=True, help='Path to the YAML policy file.') + parser.add_argument('--policy-name', help='Name of the policy to use from a multi-policy file.') + parser.add_argument( + '--policy-section', + default='direct_docker_cli_policy', + help="The top-level key in the policy where error patterns are defined (e.g., 'direct_docker_cli_policy'). Default: direct_docker_cli_policy" + ) + parser.add_argument('log_line', help='The log line to check.') + +def main_check_log_pattern(args): + """Main logic for the 'check-log-pattern' command.""" + policy = load_policy(args.policy, args.policy_name) + if not policy: + return 1 + + policy_section = policy.get(args.policy_section, {}) + if not policy_section: + print(f"Error: Policy section '{args.policy_section}' not found in the policy.", file=sys.stderr) + return 1 + + fatal_patterns = policy_section.get('fatal_error_patterns', []) + tolerated_patterns = policy_section.get('tolerated_error_patterns', []) + + print(f"--- Checking Log Line ---") + print(f"Policy: {args.policy}" + (f" (name: {args.policy_name})" if args.policy_name else "")) + print(f"Policy Section: {args.policy_section}") + print(f"Log Line: '{args.log_line}'") + print("-" * 25) + + # 1. Check for fatal patterns. These take precedence. + for pattern in fatal_patterns: + if re.search(pattern, args.log_line, re.IGNORECASE): + print(f"Result: FATAL") + print(f"Reason: Matched fatal pattern: '{pattern}'") + return 0 + + # 2. Check for tolerated patterns. This is only relevant for lines that look like errors. + # The logic in stress_policy_tool checks for 'ERROR:' before checking tolerated patterns. + if 'ERROR:' in args.log_line: + for pattern in tolerated_patterns: + if re.search(pattern, args.log_line, re.IGNORECASE): + print(f"Result: TOLERATED") + print(f"Reason: Matched tolerated pattern: '{pattern}'") + return 0 + + # 3. If it's an ERROR line and not tolerated, it's a failure. + print(f"Result: FAILURE") + print(f"Reason: Contains 'ERROR:' but did not match any tolerated patterns.") + return 0 + + # 4. If it's not an error line and didn't match fatal, it's neutral. + print(f"Result: NEUTRAL") + print(f"Reason: Does not contain 'ERROR:' and did not match any fatal patterns.") + return 0 diff --git a/ytops_client/cli.py b/ytops_client/cli.py index 3405eff..0b72f96 100644 --- a/ytops_client/cli.py +++ b/ytops_client/cli.py @@ -1,6 +1,25 @@ #!/usr/bin/env python3 import sys import argparse +import os +from datetime import datetime + +# --- Version Info --- +try: + # Get path relative to this file + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_root = os.path.abspath(os.path.join(script_dir, '..')) + version_file_path = os.path.join(project_root, 'VERSION.client') + + with open(version_file_path, 'r') as f: + __version__ = f.read().strip() + + mod_time = os.path.getmtime(version_file_path) + __build_date__ = datetime.fromtimestamp(mod_time).strftime('%Y-%m-%d') +except Exception: + __version__ = "unknown" + __build_date__ = "unknown" + # Import the functions that define and execute the logic for each subcommand from .list_formats_tool import add_list_formats_parser, main_list_formats @@ -11,6 +30,19 @@ from .stress_formats_tool import add_stress_formats_parser, main_stress_formats from .cookie_tool import add_cookie_tool_parser, main_cookie_tool from .download_aria_tool import add_download_aria_parser, main_download_aria from .download_native_py_tool import add_download_native_py_parser, main_download_native_py +from .check_expiry_tool import add_check_expiry_parser, main_check_expiry +from .config_tool import add_flags_to_json_parser, main_flags_to_json, add_json_to_flags_parser, main_json_to_flags +from .manage_tool import add_manage_parser, main_manage +from .profile_manager_tool import add_profile_manager_parser, main_profile_manager +from .profile_allocator_tool import add_profile_allocator_parser, main_profile_allocator +from .policy_enforcer_tool import add_policy_enforcer_parser, main_policy_enforcer +from .profile_setup_tool import add_setup_profiles_parser, main_setup_profiles +from .simulation_tool import add_simulation_parser, main_simulation +from .locking_download_emulator_tool import add_locking_download_emulator_parser, main_locking_download_emulator +from .task_generator_tool import add_task_generator_parser, main_task_generator +from .yt_dlp_dummy_tool import add_yt_dlp_dummy_parser, main_yt_dlp_dummy +from .check_log_pattern_tool import add_check_log_pattern_parser, main_check_log_pattern + def main(): """ @@ -36,6 +68,11 @@ def main(): description="YT Ops Client Tools", formatter_class=argparse.RawTextHelpFormatter ) + parser.add_argument( + '--version', + action='version', + version=f'ytops-client version {__version__} (build date: {__build_date__})' + ) subparsers = parser.add_subparsers(dest='command', help='Available sub-commands') # Add subparsers from each tool module @@ -56,6 +93,19 @@ def main(): add_stress_policy_parser(subparsers) add_stress_formats_parser(subparsers) add_cookie_tool_parser(subparsers) + add_check_expiry_parser(subparsers) + add_flags_to_json_parser(subparsers) + add_json_to_flags_parser(subparsers) + add_manage_parser(subparsers) + add_profile_manager_parser(subparsers) + add_profile_allocator_parser(subparsers) + add_policy_enforcer_parser(subparsers) + add_setup_profiles_parser(subparsers) + add_simulation_parser(subparsers) + add_locking_download_emulator_parser(subparsers) + add_task_generator_parser(subparsers) + add_yt_dlp_dummy_parser(subparsers) + add_check_log_pattern_parser(subparsers) args = parser.parse_args() @@ -82,6 +132,32 @@ def main(): return main_stress_formats(args) elif args.command == 'convert-cookies': return main_cookie_tool(args) + elif args.command == 'check-expiry': + return main_check_expiry(args) + elif args.command == 'flags-to-json': + return main_flags_to_json(args) + elif args.command == 'json-to-flags': + return main_json_to_flags(args) + elif args.command == 'manage': + return main_manage(args) + elif args.command == 'profile': + return main_profile_manager(args) + elif args.command == 'profile-allocator': + return main_profile_allocator(args) + elif args.command == 'policy-enforcer': + return main_policy_enforcer(args) + elif args.command == 'setup-profiles': + return main_setup_profiles(args) + elif args.command == 'simulation': + return main_simulation(args) + elif args.command == 'download-emulator': + return main_locking_download_emulator(args) + elif args.command == 'task-generator': + return main_task_generator(args) + elif args.command == 'yt-dlp-dummy': + return main_yt_dlp_dummy(args) + elif args.command == 'check-log-pattern': + return main_check_log_pattern(args) # This path should not be reachable if a command is required or handled above. parser.print_help() diff --git a/ytops_client/config_tool.py b/ytops_client/config_tool.py new file mode 100644 index 0000000..60376f5 --- /dev/null +++ b/ytops_client/config_tool.py @@ -0,0 +1,317 @@ +#!/usr/bin/env python3 +""" +Tool to convert yt-dlp command-line flags to a JSON config using go-ytdlp. +""" + +import argparse +import json +import logging +import os +import shlex +import subprocess +import sys +from pathlib import Path +from typing import Dict, List + +logger = logging.getLogger('config_tool') + + +def get_go_ytdlp_path(user_path: str = None) -> str: + """ + Get the path to the go-ytdlp binary. + Checks in order: + 1. User-provided path + 2. 'go-ytdlp' in PATH + 3. Local binary in ytops_client/go_ytdlp_cli/go-ytdlp + 4. Binary in go-ytdlp/go-ytdlp (the library's built binary) + 5. Binary in /usr/local/bin/go-ytdlp + """ + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + if user_path: + if is_exe(user_path): + return user_path + # If user provided a path, we return it even if check fails, + # so subprocess can raise the appropriate error for that specific path. + return user_path + + # Check in PATH + import shutil + path_exe = shutil.which('go-ytdlp') + if path_exe: + return path_exe + + # Check local build directory + local_path = Path(__file__).parent / 'go_ytdlp_cli' / 'go-ytdlp' + if is_exe(str(local_path)): + return str(local_path) + + # Check the go-ytdlp library directory + project_root = Path(__file__).parent.parent + library_binary = project_root / 'go-ytdlp' / 'go-ytdlp' + if is_exe(str(library_binary)): + return str(library_binary) + + # Check /usr/local/bin + if is_exe('/usr/local/bin/go-ytdlp'): + return '/usr/local/bin/go-ytdlp' + + # Default to 'go-ytdlp' which will raise FileNotFoundError if not in PATH + return 'go-ytdlp' + +def convert_flags_to_json(flags: List[str], go_ytdlp_path: str = None) -> Dict: + """ + Converts a list of yt-dlp command-line flags to a JSON config dictionary. + + Args: + flags: A list of strings representing the command-line flags. + go_ytdlp_path: Path to the go-ytdlp executable. If None, will try to find it. + + Returns: + A dictionary representing the JSON config. + + Raises: + ValueError: If no flags are provided. + FileNotFoundError: If the go-ytdlp executable is not found. + subprocess.CalledProcessError: If go-ytdlp returns a non-zero exit code. + json.JSONDecodeError: If the output from go-ytdlp is not valid JSON. + """ + if not flags: + raise ValueError("No flags provided to convert.") + + # Get the actual binary path + actual_path = get_go_ytdlp_path(go_ytdlp_path) + + # Use '--' to separate the subcommand flags from the flags to be converted. + # This prevents go-ytdlp from trying to parse the input flags as its own flags. + cmd = [actual_path, 'flags-to-json', '--'] + flags + + logger.debug(f"Executing command: {' '.join(shlex.quote(s) for s in cmd)}") + try: + process = subprocess.run(cmd, capture_output=True, check=True, encoding='utf-8') + + if process.stderr: + logger.info(f"go-ytdlp output on stderr:\n{process.stderr.strip()}") + + return json.loads(process.stdout) + except json.JSONDecodeError: + logger.error("Failed to parse JSON from go-ytdlp stdout.") + logger.error(f"Stdout was: {process.stdout.strip()}") + raise + except FileNotFoundError: + logger.error(f"Executable '{actual_path}' not found.") + logger.error("Please ensure go-ytdlp is installed and in your PATH.") + logger.error("You can run the 'bin/install-goytdlp.sh' script to install it.") + raise + except subprocess.CalledProcessError as e: + logger.error(f"go-ytdlp exited with error code {e.returncode}.") + logger.error(f"Stderr:\n{e.stderr.strip()}") + if "not supported" in e.stderr: + logger.error("NOTE: The installed version of go-ytdlp does not support converting flags to JSON.") + raise + except PermissionError: + logger.error(f"Permission denied executing '{actual_path}'.") + logger.error("Please ensure the file is executable (chmod +x).") + raise + + +def add_flags_to_json_parser(subparsers): + """Add the parser for the 'flags-to-json' command.""" + parser = subparsers.add_parser( + 'flags-to-json', + description='Convert yt-dlp command-line flags to a JSON config using go-ytdlp.', + formatter_class=argparse.RawTextHelpFormatter, + help='Convert yt-dlp flags to a JSON config.', + epilog=""" +Examples: + +# Convert flags from a string +ytops-client flags-to-json --from-string "-f best --no-playlist" + +# Convert flags from a file (like cli.config) +ytops-client flags-to-json --from-file cli.config + +# Convert flags passed directly as arguments +ytops-client flags-to-json -- --retries 5 --fragment-retries 5 + +# Combine sources (direct arguments override file/string) +ytops-client flags-to-json --from-file cli.config -- --retries 20 + +The go-ytdlp executable must be in your PATH. +You can install it by running the 'bin/install-goytdlp.sh' script. +""" + ) + source_group = parser.add_mutually_exclusive_group() + source_group.add_argument('--from-file', type=argparse.FileType('r', encoding='utf-8'), help='Read flags from a file (e.g., a yt-dlp config file).') + source_group.add_argument('--from-string', help='Read flags from a single string.') + + parser.add_argument('flags', nargs=argparse.REMAINDER, help='yt-dlp flags to convert. Use "--" to separate them from this script\'s own flags.') + parser.add_argument('--go-ytdlp-path', default='go-ytdlp', help='Path to the go-ytdlp executable. Defaults to "go-ytdlp" in PATH.') + parser.add_argument('--verbose', action='store_true', help='Enable verbose output for this script.') + return parser + +def main_flags_to_json(args): + """Main logic for the 'flags-to-json' command.""" + if args.verbose: + # Reconfigure root logger for verbose output to stderr + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + logging.basicConfig(level=logging.DEBUG, format='%(name)s - %(levelname)s - %(message)s', stream=sys.stderr) + else: + # Default to INFO level, also to stderr + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s', stream=sys.stderr) + + flags = [] + if args.from_file: + logger.info(f"Reading flags from file: {args.from_file.name}") + content = args.from_file.read() + # A config file can have comments and one arg per line, or be a single line of args. + # shlex.split is good for single lines, but for multi-line we should split by line and filter. + lines = content.splitlines() + for line in lines: + line = line.strip() + if line and not line.startswith('#'): + # shlex.split can handle quoted arguments within the line + flags.extend(shlex.split(line)) + elif args.from_string: + logger.info("Reading flags from string.") + flags.extend(shlex.split(args.from_string)) + + if args.flags: + # The 'flags' remainder might contain '--' which we should remove if it's the first element. + remainder_flags = args.flags + if remainder_flags and remainder_flags[0] == '--': + remainder_flags = remainder_flags[1:] + + if remainder_flags: + logger.info("Appending flags from command-line arguments.") + flags.extend(remainder_flags) + + if not flags: + logger.error("No flags provided to convert.") + return 1 + + try: + json_output = convert_flags_to_json(flags, args.go_ytdlp_path) + # Print to actual stdout for piping. + print(json.dumps(json_output, indent=2)) + return 0 + except (ValueError, FileNotFoundError, subprocess.CalledProcessError, json.JSONDecodeError, PermissionError): + # Specific error is already logged by the helper function. + return 1 + except Exception as e: + logger.error(f"An unexpected error occurred: {e}", exc_info=args.verbose) + return 1 + + +def convert_json_to_flags(json_input: str, go_ytdlp_path: str = None) -> str: + """ + Converts a JSON config string to yt-dlp command-line flags. + + Args: + json_input: A string containing the JSON config. + go_ytdlp_path: Path to the go-ytdlp executable. If None, will try to find it. + + Returns: + A string of command-line flags. + + Raises: + ValueError: If the json_input is empty. + FileNotFoundError: If the go-ytdlp executable is not found. + subprocess.CalledProcessError: If go-ytdlp returns a non-zero exit code. + """ + if not json_input: + raise ValueError("No JSON input provided to convert.") + + # Get the actual binary path + actual_path = get_go_ytdlp_path(go_ytdlp_path) + + cmd = [actual_path, 'json-to-flags'] + + logger.debug(f"Executing command: {' '.join(shlex.quote(s) for s in cmd)}") + try: + process = subprocess.run(cmd, input=json_input, capture_output=True, check=True, encoding='utf-8') + + if process.stderr: + logger.info(f"go-ytdlp output on stderr:\n{process.stderr.strip()}") + + return process.stdout.strip() + except FileNotFoundError: + logger.error(f"Executable '{actual_path}' not found.") + logger.error("Please ensure go-ytdlp is installed and in your PATH.") + logger.error("You can run the 'bin/install-goytdlp.sh' script to install it.") + raise + except subprocess.CalledProcessError as e: + logger.error(f"go-ytdlp exited with error code {e.returncode}.") + logger.error(f"Stderr:\n{e.stderr.strip()}") + raise + except PermissionError: + logger.error(f"Permission denied executing '{actual_path}'.") + logger.error("Please ensure the file is executable (chmod +x).") + raise + + +def add_json_to_flags_parser(subparsers): + """Add the parser for the 'json-to-flags' command.""" + parser = subparsers.add_parser( + 'json-to-flags', + description='Convert a JSON config to yt-dlp command-line flags using go-ytdlp.', + formatter_class=argparse.RawTextHelpFormatter, + help='Convert a JSON config to yt-dlp flags.', + epilog=""" +Examples: + +# Convert JSON from a string +ytops-client json-to-flags --from-string '{"postprocessor": {"ffmpeg": {"ppa": "SponsorBlock"}}}' + +# Convert JSON from a file +ytops-client json-to-flags --from-file config.json + +The go-ytdlp executable must be in your PATH. +You can install it by running the 'bin/install-goytdlp.sh' script. +""" + ) + source_group = parser.add_mutually_exclusive_group(required=True) + source_group.add_argument('--from-file', type=argparse.FileType('r', encoding='utf-8'), help='Read JSON from a file.') + source_group.add_argument('--from-string', help='Read JSON from a single string.') + + parser.add_argument('--go-ytdlp-path', default='go-ytdlp', help='Path to the go-ytdlp executable. Defaults to "go-ytdlp" in PATH.') + parser.add_argument('--verbose', action='store_true', help='Enable verbose output for this script.') + return parser + + +def main_json_to_flags(args): + """Main logic for the 'json-to-flags' command.""" + if args.verbose: + # Reconfigure root logger for verbose output to stderr + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + logging.basicConfig(level=logging.DEBUG, format='%(name)s - %(levelname)s - %(message)s', stream=sys.stderr) + else: + # Default to INFO level, also to stderr + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s', stream=sys.stderr) + + json_input = "" + if args.from_file: + logger.info(f"Reading JSON from file: {args.from_file.name}") + json_input = args.from_file.read() + elif args.from_string: + logger.info("Reading JSON from string.") + json_input = args.from_string + + try: + flags_output = convert_json_to_flags(json_input, args.go_ytdlp_path) + # Print to actual stdout for piping. + print(flags_output) + return 0 + except (ValueError, FileNotFoundError, subprocess.CalledProcessError, PermissionError): + # Specific error is already logged by the helper function. + return 1 + except Exception as e: + logger.error(f"An unexpected error occurred: {e}", exc_info=args.verbose) + return 1 diff --git a/ytops_client/download_emulator_tool.py b/ytops_client/download_emulator_tool.py new file mode 100644 index 0000000..e69de29 diff --git a/ytops_client/download_native_py_tool.py b/ytops_client/download_native_py_tool.py index 0622695..3c519cd 100644 --- a/ytops_client/download_native_py_tool.py +++ b/ytops_client/download_native_py_tool.py @@ -5,6 +5,7 @@ Tool to download a specified format using yt-dlp as a Python library. import argparse import contextlib +import copy import io import json import logging @@ -17,6 +18,7 @@ from datetime import datetime try: import yt_dlp + from yt_dlp.utils import match_filter_func except ImportError: print("yt-dlp is not installed. Please install it with: pip install yt-dlp", file=sys.stderr) sys.exit(1) @@ -29,11 +31,15 @@ class YTDLPLogger: self.final_filename = None self.is_403 = False self.is_timeout = False + self.has_errors = False def debug(self, msg): # yt-dlp logs the destination file path at the debug level. if msg.startswith('[download] Destination:'): self.final_filename = msg.split(':', 1)[1].strip() + elif msg.startswith('[Merger] Merging formats into "'): + # This captures the final filename after merging. + self.final_filename = msg.split('"')[1] elif msg.startswith('[download]') and 'has already been downloaded' in msg: match = re.search(r'\[download\]\s+(.*)\s+has already been downloaded', msg) if match: @@ -51,6 +57,7 @@ class YTDLPLogger: self.is_403 = True if "Read timed out" in msg: self.is_timeout = True + self.has_errors = True logger.error(msg) def ytdlp_progress_hook(d, ytdlp_logger): @@ -77,7 +84,7 @@ def add_download_native_py_parser(subparsers): parser.add_argument('--pause', type=int, default=0, help='Seconds to wait before starting the download.') parser.add_argument('--download-continue', action='store_true', help='Enable download continuation (--no-overwrites and --continue flags for yt-dlp).') parser.add_argument('--verbose', action='store_true', help='Enable verbose output for this script and yt-dlp.') - parser.add_argument('--cli-config', help='Path to a yt-dlp configuration file to load.') + parser.add_argument('--config', default=None, help='Path to a yt-dlp JSON configuration file (e.g., ytdlp.json). If not provided, searches for ytdlp.json.') parser.add_argument('--downloader', help='Name of the external downloader backend for yt-dlp to use (e.g., "aria2c", "native").') parser.add_argument('--downloader-args', help='Arguments to pass to the external downloader backend (e.g., "aria2c:-x 8").') parser.add_argument('--extra-ytdlp-args', help='A string of extra command-line arguments to pass to yt-dlp.') @@ -88,11 +95,87 @@ def add_download_native_py_parser(subparsers): parser.add_argument('--fragment-retries', type=int, help='Number of retries for each fragment (default: 10).') parser.add_argument('--socket-timeout', type=int, help='Timeout for socket operations in seconds (default: 20).') parser.add_argument('--add-header', action='append', help='Add a custom HTTP header for the download. Format: "Key: Value". Can be used multiple times.') + parser.add_argument('--concurrent-fragments', type=int, help='Number of fragments to download concurrently for each media.') # Arguments to pass through to yt-dlp parser.add_argument('--download-sections', help='yt-dlp --download-sections argument (e.g., "*0-10240").') parser.add_argument('--test', action='store_true', help='yt-dlp --test argument (download small part).') return parser + +def _download_single_format(format_id, info_data, base_ydl_opts, args): + """ + Download a single format ID from the given info_data. + This function filters info_data to only contain the requested format, + preventing yt-dlp from auto-merging with other streams. + + Returns a tuple: (success: bool, ytdlp_logger: YTDLPLogger) + """ + # Deep copy info_data so we can modify it without affecting other downloads + local_info_data = copy.deepcopy(info_data) + + available_formats = local_info_data.get('formats', []) + + # Find the exact format + target_format = next((f for f in available_formats if f.get('format_id') == format_id), None) + + if not target_format: + logger.error(f"Format '{format_id}' not found in info.json") + ytdlp_logger = YTDLPLogger() + ytdlp_logger.has_errors = True + return False, ytdlp_logger + + # Filter to only this format - this is the key to preventing auto-merge + local_info_data['formats'] = [target_format] + + # Clear any pre-selected format fields that might trigger merging + local_info_data.pop('requested_formats', None) + local_info_data.pop('format', None) + local_info_data.pop('format_id', None) + + logger.info(f"Filtered info_data to only contain format '{format_id}' (removed {len(available_formats) - 1} other formats)") + + # Create a fresh logger for this download + ytdlp_logger = YTDLPLogger() + + # Copy base options and update with this format's specifics + ydl_opts = dict(base_ydl_opts) + ydl_opts['format'] = format_id + ydl_opts['logger'] = ytdlp_logger + ydl_opts['progress_hooks'] = [lambda d, yl=ytdlp_logger: ytdlp_progress_hook(d, yl)] + + try: + download_buffer = None + if args.output_buffer: + download_buffer = io.BytesIO() + ctx_mgr = contextlib.redirect_stdout(download_buffer) + else: + ctx_mgr = contextlib.nullcontext() + + with ctx_mgr, yt_dlp.YoutubeDL(ydl_opts) as ydl: + ydl.process_ie_result(local_info_data) + + if ytdlp_logger.has_errors: + logger.error(f"Download of format '{format_id}' failed: yt-dlp reported an error during execution.") + return False, ytdlp_logger + + logger.info(f"Download of format '{format_id}' completed successfully.") + + if args.output_buffer and download_buffer: + sys.stdout.buffer.write(download_buffer.getvalue()) + sys.stdout.buffer.flush() + + return True, ytdlp_logger + + except yt_dlp.utils.DownloadError as e: + logger.error(f"yt-dlp DownloadError for format '{format_id}': {e}") + ytdlp_logger.has_errors = True + return False, ytdlp_logger + except Exception as e: + logger.exception(f"Unexpected error downloading format '{format_id}': {e}") + ytdlp_logger.has_errors = True + return False, ytdlp_logger + + def main_download_native_py(args): """Main logic for the 'download-native-py' command.""" # All logging should go to stderr to keep stdout clean for the final filename, or for binary data with --output-buffer. @@ -163,245 +246,422 @@ def main_download_native_py(args): logger.error("Invalid --proxy-rename format. Expected: s/pattern/replacement/") return 1 + # For library usage, ensure proxy URL has a scheme. Default to http if missing. + if proxy_url and '://' not in proxy_url: + original_proxy = proxy_url + proxy_url = 'http://' + proxy_url + logger.info(f"Proxy URL '{original_proxy}' has no scheme. Defaulting to '{proxy_url}'.") + # Build the yt-dlp options dictionary - # Start by parsing options from config file and extra args to establish a baseline. - base_opts_args = [] - if args.cli_config and os.path.exists(args.cli_config): - try: - with open(args.cli_config, 'r', encoding='utf-8') as f: - config_content = f.read() - base_opts_args.extend(shlex.split(config_content)) - logger.info(f"Loaded {len(base_opts_args)} arguments from config file: {args.cli_config}") - except Exception as e: - logger.error(f"Failed to read or parse config file {args.cli_config}: {e}") - return 1 - elif args.cli_config: - logger.warning(f"Config file '{args.cli_config}' not found. Ignoring.") + logger.info("--- Configuring yt-dlp options ---") - if args.extra_ytdlp_args: - extra_args_list = shlex.split(args.extra_ytdlp_args) - logger.info(f"Adding {len(extra_args_list)} extra arguments from --extra-ytdlp-args.") - base_opts_args.extend(extra_args_list) + param_sources = {} + ydl_opts = {} - ydl_opts = { - 'noresizebuffer': True, - 'buffersize': '4M', - } - if base_opts_args: - try: - logger.info(f"Parsing {len(base_opts_args)} arguments from config/extra_args...") - i = 0 - while i < len(base_opts_args): - arg = base_opts_args[i] - if not arg.startswith('--'): - logger.warning(f"Skipping non-option argument in extra args: {arg}") - i += 1 - continue + def _parse_ytdlp_args(args_list, source_name, opts_dict, sources_dict): + """Helper to parse a list of yt-dlp CLI-style args into an options dict.""" + i = 0 + while i < len(args_list): + arg = args_list[i] + if not arg.startswith('--'): + logger.warning(f"Skipping non-option argument from {source_name}: {arg}") + i += 1 + continue - key = arg.lstrip('-').replace('-', '_') - - # Handle flags (no value) - is_flag = i + 1 >= len(base_opts_args) or base_opts_args[i + 1].startswith('--') - - if key == 'resize_buffer': - ydl_opts['noresizebuffer'] = False - logger.debug(f"Parsed flag: noresizebuffer = False") - i += 1 - continue - elif key == 'no_resize_buffer': - ydl_opts['noresizebuffer'] = True - logger.debug(f"Parsed flag: noresizebuffer = True") - i += 1 - continue - - if is_flag: - if key.startswith('no_'): - # Handle --no-foo flags - ydl_opts[key[3:]] = False - else: - ydl_opts[key] = True - logger.debug(f"Parsed flag: {key} = {ydl_opts.get(key[3:] if key.startswith('no_') else key)}") - i += 1 - # Handle options with values + key_cli = arg.lstrip('-') + key_py = key_cli.replace('-', '_') + + is_flag = i + 1 >= len(args_list) or args_list[i + 1].startswith('--') + + if is_flag: + if key_py.startswith('no_'): + real_key = key_py[3:] + # Handle special cases where the Python option name is different + if real_key == 'resize_buffer': real_key = 'noresizebuffer' + opts_dict[real_key] = False + sources_dict[real_key] = source_name else: - value = base_opts_args[i + 1] - # Try to convert values to numbers, which yt-dlp expects. - # This includes parsing byte suffixes like 'K', 'M', 'G'. - if isinstance(value, str): - original_value = value - value_upper = value.upper() - multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} - - if value_upper and value_upper[-1] in multipliers: - try: - num = float(value[:-1]) - value = int(num * multipliers[value_upper[-1]]) - except (ValueError, TypeError): - value = original_value # fallback - else: - try: - value = int(value) - except (ValueError, TypeError): - try: - value = float(value) - except (ValueError, TypeError): - value = original_value # fallback + # Handle special cases + if key_py == 'resize_buffer': + opts_dict['noresizebuffer'] = False + sources_dict['noresizebuffer'] = source_name + else: + opts_dict[key_py] = True + sources_dict[key_py] = source_name + i += 1 + else: # Has a value + value = args_list[i + 1] + # Special key name conversions + if key_py == 'limit_rate': key_py = 'ratelimit' + elif key_py == 'buffer_size': key_py = 'buffersize' + + # Special value conversion for match_filter + if key_py == 'match_filter': + try: + value = match_filter_func(value) + except Exception as e: + logger.error(f"Failed to compile --match-filter '{value}': {e}") + # Skip this option + i += 2 + continue + else: + # Try to convert values to numbers, which yt-dlp expects + try: + value = int(value) + except (ValueError, TypeError): + try: + value = float(value) + except (ValueError, TypeError): + pass # Keep as string + + opts_dict[key_py] = value + sources_dict[key_py] = source_name + i += 2 - # Special handling for keys that differ from CLI arg, e.g. --limit-rate -> ratelimit - if key == 'limit_rate': - key = 'ratelimit' - elif key == 'buffer_size': - key = 'buffersize' - - ydl_opts[key] = value - logger.debug(f"Parsed option: {key} = {value}") - i += 2 - logger.info("Successfully parsed extra yt-dlp options.") - except Exception as e: - logger.error(f"Failed to parse options from config/extra_args: {e}", exc_info=True) + # 1. Load from JSON config file + config_path = args.config + log_msg = "" + if config_path: + log_msg = f"1. [Source: Config File] Loading from: {config_path}" + else: + if os.path.exists('ytdlp.json'): + config_path = 'ytdlp.json' + log_msg = f"1. [Source: Config File] No --config provided. Found and loading local '{config_path}'." + + if config_path and os.path.exists(config_path): + if log_msg: + logger.info(log_msg) + try: + with open(config_path, 'r', encoding='utf-8') as f: + config_data = json.load(f) + + # All yt-dlp options are expected under the 'ytdlp_params' key. + config_opts = config_data.get('ytdlp_params', {}) + + if config_opts: + logger.info(f"Parameters from config file ('{config_path}'):") + config_str = json.dumps(config_opts, indent=2) + for line in config_str.splitlines(): + logger.info(f" {line}") + + # Special handling for match_filter before updating ydl_opts + if 'match_filter' in config_opts and isinstance(config_opts['match_filter'], str): + logger.info(f" Compiling 'match_filter' string from config file...") + try: + config_opts['match_filter'] = match_filter_func(config_opts['match_filter']) + except Exception as e: + logger.error(f"Failed to compile match_filter from config: {e}") + del config_opts['match_filter'] + + ydl_opts.update(config_opts) + for key in config_opts: + param_sources[key] = "Config File" + except (json.JSONDecodeError, IOError) as e: + logger.error(f"Failed to read or parse JSON config file {config_path}: {e}") return 1 + elif args.config: + logger.warning(f"Config file '{args.config}' not found. Ignoring.") + + # 2. Load from extra command-line args + if args.extra_ytdlp_args: + logger.info(f"2. [Source: CLI Extra Args] Loading extra arguments...") + extra_args_list = shlex.split(args.extra_ytdlp_args) + _parse_ytdlp_args(extra_args_list, "CLI Extra Args", ydl_opts, param_sources) - # Now, layer the script's explicit arguments on top, as they have higher precedence. + # 3. Apply internal defaults if not already set + if 'noresizebuffer' not in ydl_opts: + ydl_opts['noresizebuffer'] = True + param_sources['noresizebuffer'] = "Internal Default" + if 'buffersize' not in ydl_opts: + ydl_opts['buffersize'] = '4M' + param_sources['buffersize'] = "Internal Default" + if 'force_progress' not in ydl_opts: + ydl_opts['force_progress'] = True + param_sources['force_progress'] = "Internal Default" + + # 4. Apply explicit arguments from this script's CLI (highest priority) + logger.info("3. [Source: CLI Explicit Args] Applying explicit overrides:") + os.makedirs(args.output_dir, exist_ok=True) - output_template = os.path.join(args.output_dir, '%(title)s [%(id)s].f%(format_id)s.%(ext)s') - ytdlp_logger = YTDLPLogger() - - # Use update to merge, so explicit args overwrite config/extra args. - ydl_opts.update({ - 'format': args.format, - 'outtmpl': '-' if args.output_buffer else output_template, - 'logger': ytdlp_logger, - 'progress_hooks': [lambda d: ytdlp_progress_hook(d, ytdlp_logger)], - 'verbose': args.verbose, - }) + ydl_opts['verbose'] = args.verbose + param_sources['verbose'] = "CLI Explicit" + # Handle output template ('outtmpl') + if args.output_buffer: + ydl_opts['outtmpl'] = '-' + param_sources['outtmpl'] = "CLI Explicit (Buffer)" + elif 'outtmpl' in ydl_opts: + # Respect outtmpl from config, but prepend the output directory + outtmpl_val = ydl_opts['outtmpl'] + if isinstance(outtmpl_val, dict): + # Prepend dir to each template in the dict + ydl_opts['outtmpl'] = {k: os.path.join(args.output_dir, v) for k, v in outtmpl_val.items()} + else: + # Prepend dir to the single template string + ydl_opts['outtmpl'] = os.path.join(args.output_dir, str(outtmpl_val)) + param_sources['outtmpl'] = "Config File (Path Applied)" + logger.info(f" Using 'outtmpl' from config, with output directory '{args.output_dir}' prepended.") + else: + # Use a default template if not specified in config + output_template = os.path.join(args.output_dir, '%(title)s [%(id)s].f%(format_id)s.%(ext)s') + ydl_opts['outtmpl'] = output_template + param_sources['outtmpl'] = "Internal Default" + logger.info(f" Using default 'outtmpl': {output_template}") + if args.temp_path: ydl_opts['paths'] = {'temp': args.temp_path} - logger.info(f"Using temporary path: {args.temp_path}") + param_sources['paths'] = "CLI Explicit" + logger.info(f" Temporary path: {args.temp_path}") if args.add_header: if 'http_headers' not in ydl_opts: ydl_opts['http_headers'] = {} elif not isinstance(ydl_opts['http_headers'], dict): - logger.warning(f"Overwriting non-dictionary http_headers from config with headers from command line.") + logger.warning(f"Overwriting non-dictionary http_headers with headers from command line.") ydl_opts['http_headers'] = {} - for header in args.add_header: - if ':' not in header: - logger.error(f"Invalid header format in --add-header: '{header}'. Expected 'Key: Value'.") - return 1 - key, value = header.split(':', 1) - ydl_opts['http_headers'][key.strip()] = value.strip() - logger.info(f"Adding/overwriting header: {key.strip()}: {value.strip()}") + if ':' in header: + key, value = header.split(':', 1) + ydl_opts['http_headers'][key.strip()] = value.strip() + else: + logger.error(f"Invalid header format: '{header}'. Expected 'Key: Value'.") + param_sources['http_headers'] = "CLI Explicit (Merged)" if args.download_continue: ydl_opts['continuedl'] = True ydl_opts['nooverwrites'] = True + param_sources['continuedl'] = "CLI Explicit" + param_sources['nooverwrites'] = "CLI Explicit" if proxy_url: ydl_opts['proxy'] = proxy_url + param_sources['proxy'] = "CLI Explicit" if args.downloader: ydl_opts['downloader'] = {args.downloader: None} + param_sources['downloader'] = "CLI Explicit" if args.downloader_args: - # yt-dlp expects a dict for downloader_args - # e.g., {'aria2c': ['-x', '8']} try: downloader_name, args_str = args.downloader_args.split(':', 1) ydl_opts.setdefault('downloader_args', {})[downloader_name] = shlex.split(args_str) + param_sources['downloader_args'] = "CLI Explicit" except ValueError: - logger.error(f"Invalid --downloader-args format. Expected 'downloader:args'. Got: '{args.downloader_args}'") + logger.error(f"Invalid --downloader-args format. Expected 'downloader:args'.") return 1 if args.merge_output_format: ydl_opts['merge_output_format'] = args.merge_output_format - + param_sources['merge_output_format'] = "CLI Explicit" if args.download_sections: ydl_opts['download_sections'] = args.download_sections - + param_sources['download_sections'] = "CLI Explicit" if args.test: ydl_opts['test'] = True - + param_sources['test'] = "CLI Explicit" if args.retries is not None: ydl_opts['retries'] = args.retries + param_sources['retries'] = "CLI Explicit" if args.fragment_retries is not None: ydl_opts['fragment_retries'] = args.fragment_retries + param_sources['fragment_retries'] = "CLI Explicit" if args.socket_timeout is not None: ydl_opts['socket_timeout'] = args.socket_timeout + param_sources['socket_timeout'] = "CLI Explicit" + if args.concurrent_fragments is not None: + ydl_opts['concurrent_fragments'] = args.concurrent_fragments + param_sources['concurrent_fragments'] = "CLI Explicit" + # To prevent timeouts on slow connections, ensure progress reporting is not disabled. + # The CLI wrapper enables this by default, so we match its behavior for robustness. + if ydl_opts.get('noprogress'): + logger.info("Overriding 'noprogress' option. Progress reporting is enabled to prevent network timeouts.") + ydl_opts['noprogress'] = False + param_sources['noprogress'] = "Internal Override" + + # Ensure byte-size options are integers for library use try: - logger.info(f"Starting download for format '{args.format}' using yt-dlp library...") + from yt_dlp.utils import parse_bytes + if 'buffersize' in ydl_opts and isinstance(ydl_opts['buffersize'], str): + ydl_opts['buffersize'] = parse_bytes(ydl_opts['buffersize']) + param_sources['buffersize'] = param_sources.get('buffersize', 'Unknown') + ' (Parsed)' + except (ImportError, Exception) as e: + logger.warning(f"Could not parse 'buffersize' option: {e}") + + # Force skip_download to False, as this script's purpose is to download. + if ydl_opts.get('skip_download'): + logger.info("Overriding 'skip_download: true' from config. This tool is for downloading.") + ydl_opts['skip_download'] = False + param_sources['skip_download'] = "Internal Override" + + # Log final effective options with sources + logger.info("=== Final Effective yt-dlp Options (base) ===") + for k in sorted(ydl_opts.keys()): + v = ydl_opts[k] + src = param_sources.get(k, "Unknown") + if k in ['logger', 'progress_hooks']: continue + logger.info(f" {k}: {v} [Source: {src}]") + + # --- Parse and process the format string --- + requested_format = args.format + available_formats = [str(f['format_id']) for f in info_data.get('formats', []) if 'format_id' in f] + + # Determine what kind of format string we have + # Keywords that yt-dlp treats as special selectors + selector_keywords = ('best', 'worst', 'bestvideo', 'bestaudio') + + # Split by comma to get individual format requests + # Each item could be a simple format ID or a fallback chain (with /) + format_items = [f.strip() for f in requested_format.split(',') if f.strip()] + + logger.info(f"Format string '{requested_format}' parsed into {len(format_items)} item(s): {format_items}") + + # Process each format item + all_success = True + final_filename = None + + for format_item in format_items: + logger.info(f"--- Processing format item: '{format_item}' ---") - download_buffer = None - if args.output_buffer: - # When downloading to buffer, we redirect stdout to capture the binary data. - download_buffer = io.BytesIO() - ctx_mgr = contextlib.redirect_stdout(download_buffer) - else: - # Otherwise, use a null context manager. - ctx_mgr = contextlib.nullcontext() - - with ctx_mgr, yt_dlp.YoutubeDL(ydl_opts) as ydl: - # The download() method is for URLs. For a pre-fetched info dict, - # we must use process_ie_result to bypass the info extraction step. - # It raises DownloadError on failure, which is caught by the outer try...except block. - ydl.process_ie_result(info_data) - # If process_ie_result completes without an exception, the download was successful. - retcode = 0 - - # The success path is now always taken if no exception was raised. - if retcode == 0: - if ytdlp_logger.is_403: - logger.error("Download failed: yt-dlp reported HTTP Error 403: Forbidden. The URL has likely expired.") - return 1 - if ytdlp_logger.is_timeout: - logger.error("Download failed: yt-dlp reported a timeout.") - return 1 - - logger.info("yt-dlp download completed successfully.") + # Check if this specific item is a simple format ID or a complex selector + item_has_complex_syntax = any(c in format_item for c in '/+[]()') or format_item.startswith(selector_keywords) + + if item_has_complex_syntax: + # This is a complex selector like "299/298/137" or "bestvideo+bestaudio" + # We need to handle fallback chains specially - if args.output_buffer: - # Write the captured binary data to the actual stdout. - sys.stdout.buffer.write(download_buffer.getvalue()) - sys.stdout.buffer.flush() - # Print the filename to stderr for the orchestrator. - if ytdlp_logger.final_filename: - print(ytdlp_logger.final_filename, file=sys.stderr) - else: - # Print the filename to stdout as usual. - if ytdlp_logger.final_filename: - print(ytdlp_logger.final_filename, file=sys.stdout) + if '/' in format_item and '+' not in format_item: + # This is a fallback chain like "299/298/137" + # Try each format in order until one succeeds + fallback_formats = [f.strip() for f in format_item.split('/') if f.strip()] + logger.info(f"Detected fallback chain with {len(fallback_formats)} options: {fallback_formats}") - if args.cleanup: - downloaded_filepath = ytdlp_logger.final_filename - if downloaded_filepath and os.path.exists(downloaded_filepath): - try: - logger.info(f"Cleanup: Renaming and truncating '{downloaded_filepath}'") - timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') - directory, original_filename = os.path.split(downloaded_filepath) - filename_base, filename_ext = os.path.splitext(original_filename) - new_filename = f"{filename_base}_{timestamp}{filename_ext}.empty" - new_filepath = os.path.join(directory, new_filename) - os.rename(downloaded_filepath, new_filepath) - logger.info(f"Renamed to '{new_filepath}'") - with open(new_filepath, 'w') as f: - pass - logger.info(f"Truncated '{new_filepath}' to 0 bytes.") - except Exception as e: - logger.error(f"Cleanup failed: {e}") - return 1 # Treat cleanup failure as a script failure - elif not args.output_buffer: - logger.warning("Cleanup requested, but no downloaded file was found. Skipping cleanup.") - return 0 - else: - logger.error(f"yt-dlp download failed with internal exit code {retcode}.") - return 1 + item_success = False + for fallback_fmt in fallback_formats: + # Check if this fallback is a simple format ID that exists + if fallback_fmt in available_formats: + logger.info(f"Trying fallback format '{fallback_fmt}'...") + success, ytdlp_logger = _download_single_format(fallback_fmt, info_data, ydl_opts, args) + if success: + item_success = True + if ytdlp_logger.final_filename: + final_filename = ytdlp_logger.final_filename + break + else: + logger.warning(f"Fallback format '{fallback_fmt}' failed, trying next...") + else: + # Try to find a matching format with a suffix (e.g., "140" matches "140-0") + prefix_match_re = re.compile(rf'^{re.escape(fallback_fmt)}-\d+$') + first_match = next((af for af in available_formats if prefix_match_re.match(af)), None) + + if first_match: + logger.info(f"Fallback format '{fallback_fmt}' not found exactly. Using match: '{first_match}'...") + success, ytdlp_logger = _download_single_format(first_match, info_data, ydl_opts, args) + if success: + item_success = True + if ytdlp_logger.final_filename: + final_filename = ytdlp_logger.final_filename + break + else: + logger.warning(f"Fallback format '{first_match}' failed, trying next...") + else: + logger.warning(f"Fallback format '{fallback_fmt}' not available, trying next...") + + if not item_success: + logger.error(f"All fallback formats in '{format_item}' failed or were unavailable.") + all_success = False + else: + # This is a merge request or other complex selector + # We can't safely filter for these, so we pass through to yt-dlp + # but warn the user + logger.warning(f"Complex format selector '{format_item}' detected. Cannot prevent auto-merge for this type.") + logger.warning("If you experience merge errors, try specifying simple format IDs separated by commas.") + + # Use the original yt-dlp behavior for complex selectors + ytdlp_logger = YTDLPLogger() + local_ydl_opts = dict(ydl_opts) + local_ydl_opts['format'] = format_item + local_ydl_opts['logger'] = ytdlp_logger + local_ydl_opts['progress_hooks'] = [lambda d, yl=ytdlp_logger: ytdlp_progress_hook(d, yl)] + + try: + download_buffer = None + if args.output_buffer: + download_buffer = io.BytesIO() + ctx_mgr = contextlib.redirect_stdout(download_buffer) + else: + ctx_mgr = contextlib.nullcontext() - except yt_dlp.utils.DownloadError as e: - # This catches download-specific errors from yt-dlp - logger.error(f"yt-dlp DownloadError: {e}") - return 1 - except Exception as e: - logger.exception(f"An unexpected error occurred during yt-dlp execution: {e}") + with ctx_mgr, yt_dlp.YoutubeDL(local_ydl_opts) as ydl: + ydl.process_ie_result(copy.deepcopy(info_data)) + + if ytdlp_logger.has_errors: + logger.error(f"Download of '{format_item}' failed.") + all_success = False + else: + if ytdlp_logger.final_filename: + final_filename = ytdlp_logger.final_filename + if args.output_buffer and download_buffer: + sys.stdout.buffer.write(download_buffer.getvalue()) + sys.stdout.buffer.flush() + + except yt_dlp.utils.DownloadError as e: + logger.error(f"yt-dlp DownloadError for '{format_item}': {e}") + all_success = False + except Exception as e: + logger.exception(f"Unexpected error downloading '{format_item}': {e}") + all_success = False + else: + # This is a simple format ID like "299-dashy" or "140" + # Check if it exists in available formats + if format_item in available_formats: + success, ytdlp_logger = _download_single_format(format_item, info_data, ydl_opts, args) + if success: + if ytdlp_logger.final_filename: + final_filename = ytdlp_logger.final_filename + else: + all_success = False + else: + # Try to find a matching format with a suffix (e.g., "140" matches "140-0") + prefix_match_re = re.compile(rf'^{re.escape(format_item)}-\d+$') + first_match = next((af for af in available_formats if prefix_match_re.match(af)), None) + + if first_match: + logger.info(f"Requested format '{format_item}' not found. Using first available match: '{first_match}'.") + success, ytdlp_logger = _download_single_format(first_match, info_data, ydl_opts, args) + if success: + if ytdlp_logger.final_filename: + final_filename = ytdlp_logger.final_filename + else: + all_success = False + else: + logger.error(f"Requested format '{format_item}' not found in available formats: {available_formats}") + all_success = False + + # Final output + if all_success: + logger.info("All format downloads completed successfully.") + if final_filename: + print(final_filename, file=sys.stdout) + + if args.cleanup and final_filename and os.path.exists(final_filename): + try: + logger.info(f"Cleanup: Renaming and truncating '{final_filename}'") + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + directory, original_filename = os.path.split(final_filename) + filename_base, filename_ext = os.path.splitext(original_filename) + new_filename = f"{filename_base}_{timestamp}{filename_ext}.empty" + new_filepath = os.path.join(directory, new_filename) + os.rename(final_filename, new_filepath) + logger.info(f"Renamed to '{new_filepath}'") + with open(new_filepath, 'w') as f: + pass + logger.info(f"Truncated '{new_filepath}' to 0 bytes.") + except Exception as e: + logger.error(f"Cleanup failed: {e}") + return 1 + + return 0 + else: + logger.error("One or more format downloads failed.") return 1 diff --git a/ytops_client/download_tool.py b/ytops_client/download_tool.py index 7fdd303..3448579 100644 --- a/ytops_client/download_tool.py +++ b/ytops_client/download_tool.py @@ -194,7 +194,12 @@ def main_download(args): cmd.extend(['--proxy', proxy_url]) if args.lang: - cmd.extend(['--extractor-args', f'youtube:lang={args.lang}']) + lang = args.lang + if '-' in lang: + base_lang = lang.split('-')[0] + logger.warning(f"Language code '{lang}' includes a region, which may not be supported. Using base language '{base_lang}' instead.") + lang = base_lang + cmd.extend(['--extractor-args', f'youtube:lang={lang}']) if args.timezone: logger.warning(f"Timezone override ('{args.timezone}') is not supported by yt-dlp and will be ignored.") @@ -205,20 +210,27 @@ def main_download(args): if capture_output and not args.print_traffic: logger.info("Note: --cleanup or --log-file requires capturing output, which may affect progress bar display.") - logger.info(f"Executing yt-dlp command for format '{args.format}'") + logger.info("--- Configuring and Executing yt-dlp ---") + logger.info(f"Executing for format: '{args.format}'") - # Construct a display version of the command for logging - display_cmd_str = ' '.join(f"'{arg}'" if ' ' in arg else arg for arg in cmd) if os.path.exists(args.cli_config): try: with open(args.cli_config, 'r', encoding='utf-8') as f: - config_contents = ' '.join(f.read().split()) + config_contents = f.read().strip() if config_contents: - logger.info(f"cli.config contents: {config_contents}") + logger.info(f"Parameters from config file ('{args.cli_config}'):") + # Indent each line for readability + for line in config_contents.splitlines(): + if line.strip() and not line.strip().startswith('#'): + logger.info(f" {line.strip()}") except IOError as e: logger.warning(f"Could not read config file {args.cli_config}: {e}") + + logger.info("Note: Command-line arguments will override any conflicting settings from the config file.") - logger.info(f"Full command: {display_cmd_str}") + # Construct a display version of the command for logging + display_cmd_str = ' '.join(f"'{arg}'" if ' ' in arg else arg for arg in cmd) + logger.info(f"Final yt-dlp command: {display_cmd_str}") if capture_output: process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, encoding='utf-8') diff --git a/ytops_client/downloader.py b/ytops_client/downloader.py new file mode 100644 index 0000000..80d57e9 --- /dev/null +++ b/ytops_client/downloader.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +""" +Downloader module for yt-ops-client. +""" + +import json +import logging +import subprocess +import sys +from typing import Dict + +logger = logging.getLogger(__name__) + + +def download_with_config(url: str, config: Dict) -> int: + """ + Download a video using yt-dlp with the given configuration. + + Args: + url: The URL to download + config: A dictionary of yt-dlp options + + Returns: + Exit code (0 for success, non-zero for failure) + """ + # Build the command + cmd = ['yt-dlp'] + + # Convert config to command-line arguments + for key, value in config.items(): + if isinstance(value, bool): + if value: + cmd.append(f'--{key}') + else: + cmd.append(f'--no-{key}') + elif isinstance(value, (int, float, str)): + cmd.append(f'--{key}') + cmd.append(str(value)) + elif isinstance(value, dict): + # Handle nested options (like postprocessor) + # For simplicity, convert to JSON string + cmd.append(f'--{key}') + cmd.append(json.dumps(value)) + elif value is None: + # Skip None values + continue + else: + logger.warning(f"Unsupported config value type for key '{key}': {type(value)}") + cmd.append(f'--{key}') + cmd.append(str(value)) + + cmd.append(url) + + # Run the command + logger.info(f"Running command: {' '.join(cmd)}") + try: + result = subprocess.run(cmd, check=True) + return result.returncode + except subprocess.CalledProcessError as e: + logger.error(f"yt-dlp failed with exit code {e.returncode}") + return e.returncode + except FileNotFoundError: + logger.error("yt-dlp not found. Please install yt-dlp first.") + return 1 + except Exception as e: + logger.error(f"Unexpected error: {e}") + return 1 diff --git a/ytops_client/get_info_tool.py b/ytops_client/get_info_tool.py index 15517f2..beec65e 100644 --- a/ytops_client/get_info_tool.py +++ b/ytops_client/get_info_tool.py @@ -31,25 +31,10 @@ from thrift.transport import TTransport from pangramia.yt.common.ttypes import TokenUpdateMode from pangramia.yt.exceptions.ttypes import PBServiceException, PBUserException from yt_ops_services.client_utils import get_thrift_client +from ytops_client.stress_policy import utils as sp_utils from ytops_client.request_params_help import REQUEST_PARAMS_HELP_STRING -def get_video_id(url: str) -> str: - """Extracts a YouTube video ID from a URL.""" - # For URLs like https://www.youtube.com/watch?v=VIDEO_ID - match = re.search(r"v=([0-9A-Za-z_-]{11})", url) - if match: - return match.group(1) - # For URLs like https://youtu.be/VIDEO_ID - match = re.search(r"youtu\.be\/([0-9A-Za-z_-]{11})", url) - if match: - return match.group(1) - # For plain video IDs - if re.fullmatch(r'[0-9A-Za-z_-]{11}', url): - return url - return "unknown_video_id" - - def parse_key_value_params(params_str: str) -> Dict[str, Any]: """Parses a comma-separated string of key=value pairs into a nested dict.""" params = {} @@ -123,10 +108,15 @@ the browser-based generation strategy.''') parser.add_argument('--show-ytdlp-log', action='store_true', help='Print the yt-dlp debug log from the server response.') parser.add_argument('--direct', action='store_true', help='Use the direct yt-dlp info.json generation method, bypassing Node.js token generation.') parser.add_argument('--print-info-out', action='store_true', help='Print the final info.json to stdout. By default, output is suppressed unless writing to a file.') - parser.add_argument('--request-params-json', help=REQUEST_PARAMS_HELP_STRING + '\nCan also be a comma-separated string of key=value pairs (e.g., "caching_policy.mode=force_refresh").') - parser.add_argument('--force-renew', help='Comma-separated list of items to force-renew: cookies, visitor_id, po_token, nsig_cache, info_json, all.') + # The new, more powerful argument for passing JSON config. It replaces --request-params-json. + parser.add_argument('--ytdlp-config-json', help=REQUEST_PARAMS_HELP_STRING) + parser.add_argument('--ytdlp-config-json-file', help='Path to a JSON file containing per-request parameters. Overrides other config arguments.') + parser.add_argument('--request-params-json', help='DEPRECATED: Use --ytdlp-config-json. Accepts JSON, a file path with @, or key=value pairs.') + parser.add_argument('--force-renew', help='Comma-separated list of items to force-renew: cookies, visitor_id, po_token, nsig_cache, info_json, all. Overrides settings in --ytdlp-config-json.') parser.add_argument('--lang', help='Language code for the request (e.g., "fr", "ja"). Affects metadata language.') parser.add_argument('--timezone', help='Timezone for the request (e.g., "UTC", "America/New_York"). Note: experimental, may not be fully supported.') + parser.add_argument('--prevent-cookie-rotation', action='store_true', help='Prevent the server from saving updated cookies for this profile on this request.') + parser.add_argument('--prevent-visitor-rotation', action='store_true', help='Prevent the server from changing the visitor_id for this profile on this request, if one already exists.') return parser def main_get_info(args): @@ -138,7 +128,7 @@ def main_get_info(args): logging.getLogger().setLevel(logging.DEBUG) if args.log_file_auto: - video_id = get_video_id(args.url) + video_id = sp_utils.get_video_id(args.url) timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') log_filename = f"{video_id}-{timestamp}.log" @@ -178,48 +168,118 @@ def main_get_info(args): machine_id = socket.gethostname() logger.info(f"No machine ID provided, using hostname: {machine_id}") + # --- JSON Config Handling --- request_params = {} - if args.request_params_json: - try: - request_params = json.loads(args.request_params_json) - except json.JSONDecodeError: - logger.info("Could not parse --request-params-json as JSON, trying as key-value string.") - request_params = parse_key_value_params(args.request_params_json) + json_config_source = None + + # Config source priority: + # 1. --ytdlp-config-json-file (explicit file path) + # 2. --ytdlp-config-json (JSON string or @file path) + # 3. --request-params-json (deprecated) + # 4. ytdlp.json in current directory (automatic) + + if args.ytdlp_config_json_file: + logger.info(f"Loading config from file specified by --ytdlp-config-json-file: {args.ytdlp_config_json_file}") + json_config_source = f"@{args.ytdlp_config_json_file}" + elif args.ytdlp_config_json: + json_config_source = args.ytdlp_config_json + elif args.request_params_json: + logger.warning("The --request-params-json argument is deprecated and will be removed in a future version. Please use --ytdlp-config-json or --ytdlp-config-json-file instead.") + json_config_source = args.request_params_json + else: + # Fallback to auto-discovery in order of precedence: local files, then user-level files. + home_dir = os.path.expanduser('~') + config_paths_to_check = [ + ('ytdlp.json', "current directory"), + (os.path.join(home_dir, '.config', 'yt-dlp', 'ytdlp.json'), "user config directory"), + ] + + # Find the first existing config file and use it + for path, location in config_paths_to_check: + if os.path.exists(path): + logger.info(f"No config argument provided. Found and loading '{path}' from {location}.") + json_config_source = f"@{path}" + break + else: # nobreak + logger.info("No config argument or default config file found. Proceeding with CLI flags and server defaults.") + if json_config_source: + if json_config_source.startswith('@'): + config_file = json_config_source[1:] + try: + with open(config_file, 'r', encoding='utf-8') as f: + request_params = json.load(f) + logger.info(f"Loaded request parameters from file: {config_file}") + except FileNotFoundError: + logger.error(f"Config file not found: {config_file}") + return 1 + except json.JSONDecodeError as e: + logger.error(f"Error parsing JSON from {config_file}: {e}") + return 1 + else: + # Try parsing as JSON first, then as key-value pairs for backward compatibility. + try: + request_params = json.loads(json_config_source) + logger.info("Loaded request parameters from command-line JSON string.") + except json.JSONDecodeError: + logger.info("Could not parse config as JSON, trying as key=value string for backward compatibility.") + request_params = parse_key_value_params(json_config_source) + + # --- Override JSON with command-line flags for convenience --- + # Server-specific controls are placed under the 'ytops' key. + ytops_params = request_params.setdefault('ytops', {}) + if args.force_renew: items_to_renew = [item.strip() for item in args.force_renew.split(',')] - request_params['force_renew'] = items_to_renew - logger.info(f"Requesting force renew for: {items_to_renew}") + ytops_params['force_renew'] = items_to_renew + logger.info(f"Overriding force_renew with CLI value: {items_to_renew}") - if args.lang: - session_params = request_params.setdefault('session_params', {}) - session_params['lang'] = args.lang - logger.info(f"Requesting language: {args.lang}") + # Session parameters are also server-specific. + session_params = ytops_params.setdefault('session_params', {}) + if args.prevent_cookie_rotation: + session_params['prevent_cookie_rotation'] = True + logger.info("Requesting to prevent cookie rotation for this request.") + if args.prevent_visitor_rotation: + session_params['prevent_visitor_rotation'] = True + logger.info("Requesting to prevent visitor ID rotation for this request.") + + # yt-dlp parameters are at the top level. + ytdlp_params = request_params.setdefault('ytdlp_params', {}) + + # Language and timezone are yt-dlp extractor arguments. + if args.lang or args.timezone: + extractor_args = ytdlp_params.setdefault('extractor_args', {}) + youtube_args = extractor_args.setdefault('youtube', {}) + if args.lang: + # yt-dlp expects lang to be a list + youtube_args['lang'] = [args.lang] + logger.info(f"Overriding lang with CLI value: {args.lang}") + if args.timezone: + # yt-dlp expects timeZone to be a list + youtube_args['timeZone'] = [args.timezone] + logger.info(f"Overriding timezone with CLI value: {args.timezone}") - if args.timezone: - session_params = request_params.setdefault('session_params', {}) - session_params['timeZone'] = args.timezone - logger.info(f"Requesting timezone: {args.timezone}") - if args.verbose: - # Add verbose flag for yt-dlp on the server - ytdlp_params = request_params.setdefault('ytdlp_params', {}) ytdlp_params['verbose'] = True - logger.info("Verbose mode enabled, requesting verbose yt-dlp logs from server.") + logger.info("Overriding verbose to True due to CLI flag.") + + # --client is a special override for a nested extractor arg + if args.client: + extractor_args = ytdlp_params.setdefault('extractor_args', {}) + youtube_args = extractor_args.setdefault('youtube', {}) + # yt-dlp expects player_client to be a list + youtube_args['player_client'] = [args.client] + logger.info(f"Overriding player_client with CLI value: {args.client}") - thrift_args = { - 'accountId': args.profile, - 'updateType': TokenUpdateMode.AUTO, - 'url': args.url, - 'clients': args.client, - 'machineId': machine_id, - 'airflowLogContext': None, - 'requestParamsJson': json.dumps(request_params) if request_params else None, - 'assignedProxyUrl': args.assigned_proxy_url - } + # Determine the assigned proxy, with the CLI flag overriding any value from the JSON config. + assigned_proxy = args.assigned_proxy_url or ytops_params.get('assigned_proxy_url') - # Handle proxy renaming - assigned_proxy = args.assigned_proxy_url + # If a proxy is assigned, ensure it's also set in the ytdlp_params for consistency. + if assigned_proxy: + ytdlp_params['proxy'] = assigned_proxy + logger.info(f"Setting ytdlp_params.proxy to assigned proxy: {assigned_proxy}") + + # Handle proxy renaming if requested if assigned_proxy and args.proxy_rename: rename_rule = args.proxy_rename.strip("'\"") if rename_rule.startswith('s/') and rename_rule.count('/') >= 2: @@ -239,7 +299,17 @@ def main_get_info(args): else: logger.error("Invalid --proxy-rename format. Expected: s/pattern/replacement/") return 1 - thrift_args['assignedProxyUrl'] = assigned_proxy + + thrift_args = { + 'accountId': args.profile, + 'updateType': TokenUpdateMode.AUTO, + 'url': args.url, + 'clients': args.client, # Kept for backward compatibility on server, though player_client in JSON is preferred. + 'machineId': machine_id, + 'airflowLogContext': None, + 'requestParamsJson': json.dumps(request_params) if request_params else None, + 'assignedProxyUrl': assigned_proxy + } if args.client: logger.info(f"Requesting to use specific client: {args.client}") @@ -343,6 +413,15 @@ def main_get_info(args): info_data = json.loads(info_json_str) if hasattr(token_data, 'socks') and token_data.socks: info_data['_proxy_url'] = token_data.socks + + # Add yt-ops metadata to the info.json for self-description + if isinstance(info_data, dict): + info_data['_ytops_metadata'] = { + 'profile_name': args.profile, + 'proxy_url': token_data.socks if hasattr(token_data, 'socks') and token_data.socks else None, + 'generation_timestamp_utc': datetime.utcnow().isoformat() + 'Z' + } + if isinstance(info_data, dict) and 'error' in info_data: error_code = info_data.get('errorCode', 'N/A') error_message = info_data.get('message', info_data.get('error', 'Unknown error')) @@ -387,7 +466,7 @@ def main_get_info(args): # Determine output file path if auto-naming is used output_file = args.output if args.output_auto or args.output_auto_url_only: - video_id = get_video_id(args.url) + video_id = sp_utils.get_video_id(args.url) suffix = args.output_auto_suffix or "" if args.output_auto: timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') diff --git a/ytops_client/go_ytdlp_cli/go-ytdlp b/ytops_client/go_ytdlp_cli/go-ytdlp new file mode 100755 index 0000000000000000000000000000000000000000..c1b1267a43f5cc592c741abee031b0b7104cacc5 GIT binary patch literal 10700800 zcmeFa34B!5**`wnVA#Ba1PB5$!B|6UB@xt&fKDKRGcbW56r*5?s4=z_LNWs=RHKtf z#$oEXwDqlB?5?e?U2IUXngFt@B%sLRf~auEaf#3u0;%Nx{ho90ELn)QZ{OeR|05qV zcRAa0p8cHjoO5@c`tWpbizO+=VzDISZy^54`&lfh7CY`NmSOl4_*=NJIDcB7eEhd?q35QR9(AvtJJe^h z$oiVI$Ei#+*Z0T=AU^-0K9=q7ALwvv#Z8NES@La_qa%Ee zTqq=b2hUgOcb}0}t5khY8EC_E5B1%=6qH4sUBj0*U)5*!56|Xpm)C_0 zZ(dq)!{Q}og&q6rH0o>37%j`=`Hb+9t}DS)s-CzetG^}jR(yUR^|Ja~^4f5&;xLo0 zlneeNTniU@Z&`ffElbLlo}oUIn5ZvR747m@)>n2*MU|?q6MUt$3ci&_X*{2?p9>c* zfov%etMF8Is&B7R-&2Ns;aTb-6FBzzYV!eC)VCD%L`AG4{vN4Q@Fh7_hwoY#0K8nd(5Me8;3oN;Sl{$NEBNkyQYnDxKUeVeSYKJ?_1{5-9pP)b zX|;sxzOVwY=^MAkRV8mbz^A%<^NouZR^3o}OUL@IpLM&e=w_pq>#tnZLw!bnV{(>& zzpX}n*Zf+O^=$pD^(|g{0}FJ7FY~{H628pG`^x$<-@8X%<9<5y*MP6``Xx8tRMn}z z$v=Nc*7uu6Rc!Ko-;b+G-X_#XzN3a+xX|sIIis+^W%>Xn-{}f2OX)+Z9^;vf$cEHg z%f+~6MBmX?DSJhpe+qxpZC}OT|LcFHKkFS2cDLY+wpgljn2LUvj!L$a-ti+#j_FKv zQHn@q|3kGuvdln7TQB^H#X7u~#6q!={~u#47XI-cukrd*y^Z}kzxor)A7hWvyR&d- z$6tT^jrCSlj$M4qqOmvLy7)#s^B>>m;g7Jy|BL2D{yDc+d2ag7MUyW=xmy;gT%V;} zX( z>9sS8%IAHfV#!#~(rXsyTwH$J+^=7D_2Ol>=8pgR6!+Nj6;)NszFjnZ-m-#)H(q<$ zWh-WsRgRtKaa^@HZ&q1R?zeBu_lQewy1|n_y05+c#qR#OgEwZ?6-_9!X?I@=+x89N ze^1=`K?_hI{54rL{rbs$DHah-Tbeu0vYuvMFF%xG(WmUk+kna<0#@O7XW2EI)2a^l>h5&pGV53z|r!Hag`FUZQ!A4DViy$4t)9}C|~v1p?T z(AWXC9x(E^H%6QEo$RxEU0Fz<&w@WdLH*CXb2ED|&4+1%_4JkX^=~s>R`R6mj|fZ^ zY|}1+Q;S7#byl|Ued{f_N~1-6Vmv=|}dW&O;Ib z1z9%zp*Qh3PBvj=ZEqmR#GdfAYO9~s|Hcw6D53Wwl)u3vpv=rMQ977Vw(xBfrF9L& z5HR&9u9~fSD=!0Uvi_tV_Zp84>QOfyo77{g@pur|+=hrV)`^-;tdGx+;2P406S5%& zG5JdGa}aNCG+>cEM|XU!eO5oCfAkuX#;_X&Ouusz8Rfc-M5k5%a|yAAyYO#6Z8tg2 z1=dxu%*;wP=r#S!* z=?~wET7zkxd#=D`P?A?}G?LjXox! z{Z(gfe`EKc$&`Sz+;=dk`&AMYcw886@k>Dpz~1URc?duH|on)bU_KN-Z|8Fa8%d8 z0|Q#UNI%|~3hZ{BMfg@)E#0Qd@4L0u_@}Nru zitWO`N1WI%9L+1fDjfSNM}kL%e>-0_p1M+G?uN*mEBrg6QlrlR7JxKQFa6HnK}_`F zzwc$ym*W;fGD#4H1qBQTW>Ss4#cIOWHb-T2{NwYt3!QJs|x1 zHU^5VBB{y0!$2p}dz~pgLz zqaERE{8(zCi$!qo9gj`3v}bJ>z6P-oKTiJS@u!$S7177oyPOWyzZlp4iga|s)3^HM z+1_F3NzO+3Q+;x0PFafTbfyG2%%1at=OI2FT}-Go_<{6p19yY}^d>9U3Kk8$iHQ))kkF;DB=|g7 zuvkPc!LKti7QcCsG59TxoQL0v$VmBSsQeu$-}l3BP9!aNm$W~-JfpOAm0;52-d?%K zb9Y601#SZipA!Bi;Xg)}Hd#BNW5$D&-wO@-wMLma-P;$$bwVty(AbwjgkaIslQNby zvkgg${JpDcqwiQt5BM?UXRze|jtrWpS&}@xpB6A%k+i4taM9Kk=akj%Lce$A9*?Aj zL~y)SICfX{gMNKes+15dvG6zR>6Kk5S3s$)1EWWw?RMB!UkeDR%! zm@!0%dKUueyCV5oeMP$S_~uSVV^AVuo_sD^aIR7_pg+v0CQzu;eV(=nqfWIyPz9I)UsMA>shpU?{TBq+6bjhBdn zuKVY4v18i0l{rbLv~}Cr!^GM}(gxw!xnissbCTNq1e}*6=+{Z8{GCwwmxC=0`WVcq zy611zcc5!={s>^dxa>rurMep%2G?Dy?{mD-+D&>$B<(#DeS1pR>MdvVFSL5=@y6tC z8V4$6dUwDfo%qe&23ARR1zkyCUc{8Me`sI*wS0ez@9-Ti|2E-2DWp^9 z6vNs@=4p|1H2-)*wRfW{n4bin5VE@CzLa?UN&3|`czVf3VCAkuUV(;2ppg;RX!V|? zBvE@{^`-t3vWcyvjM>Bik@+T?a30@M?cD~YmE`SHy)t>B_F!WqId_|X>vbD3eo1UE zk$YZzk@`iudo`FY5t6>7JN)F}40al@j9;eJuO=+VSI@}zxB8CULB_$zX7>@;JM{Os z$UFhHCi}Y;qMn)}g3e@7yKD7ecu!<8M{{MGLc->s!)a*5Pe@-M0zQ<^YH}MoD_j=! z2^4(04EVHq+wl#_2C~ioqPF%&DA6OBn?%xn;QauJJDvUPYZA9Wt1sx4EcJK$YImW( zG2eF(1l|q;yLKN`9d37Jwz`ux<{xjY_CC)J_dGt6GwqP>n$iR0(JYWzGDbKjCkcYg2)dM7Z%;#iGCxCUPDY{Rcz z3;r2vh-KQS8a(lByL!72Z^f2^tU|sm!%dNTo?t#tfoc@~{e>7n_s{ojubx^rw|}9l zz0u`|(rfFT?>lq{`sO*yH~iGr1^A=Rb+nD&{ha(K@B@l!-Dh)uJ#Qs zf17A*M-A_whIUVXm+us7_zen$c(3__vOIGKC&Gr_kvuavqOTmEv&pk(2D1ju3TBNc z0y0v3FDFM*T>gIw|LUv~xBtgkb?AY>wOU>cWccT1tu!&@#zUzaqqy?Ing$OlusZ8J zS+m^!8?!3lwVqskg~jcE2ZE(vG=Q{lXH~@TSG!HS`&|ySjbXZ0KR0VuosGjw!*_^qc1jT%^;RqhT>OL93{T$;N@1Q%o#!rpmCN?ZSlu6bF#IowAxvV;g;oebH~?()iJ zUKV3c3*Y9`!hb@XcvCcE;#8tqPRU#apXm@=qpPp5ighdG6dfMf06(`;T+0Z)F*k(dn>JH9I zDsnUy`8O14?nZWRf^Z!3rWHAwBi9kZ#bid(gD407pUeueYJUrj;y?*e0Wtx@=*iY7 zcmX3H+i>~8aoc0ys`^$G&{FvtKNbF+23&4fs-)Axu^k9O{vRtLa2)g4(4PLQsyI~N zx#}fR z3X7f>;pl1PxaY@No*scr!oS5V9*7ptZO~hmqikfGS+)^nF+cEr=W!NA!%iO0E~qQW z8l$h6Mr2^-BsT0KJ?*eA$O2WDH75K(TbIKyscB zCFI83n7znxVV3t{mp>~j@_lMSwr{pQjBWQ3DH+f*d#~H5=+`#fC~A{MUzbJqL=|X{ z<=21q^5?L9^h9|ybSkb3ZPaf|tl)qI;8ha<>A!!?z@pXc$=8gUNWT;C-IW`k!HuGSd5K^6=ICD%oGN12qCcEi zyd!>l?BBz&{uQ$-)xY-&{)PT6B-ZANfDOLMwn#65X+!uQ!SQf3COYxA5-!=rXLiWk zNiQphOcp*y{9IlKd;GUy2}t^Z#5CR99{V>x*1rl?qWYKiS^H;lL>dOFgEx}aR!Qp? z3Bmg>n(67$q2Iu#p&~d$XpGDLw#iVSw16L{%mhA5fFLw5FwCpb-W?#crCFG zvwTX8B`zxlLlDppkkw3|5vzYi1}9sfYXtn`$VHs5xPT9l^DtLoG>$AonEWUf&iS131D&cSiers`D3Nz05nmmjO!#B%50t{<{G9E_@HhB# z;Sch4>xZ8~zWPKbvSS_mKz8tO$1Wv2oBhL-d%S}AL>7(2^h*r>VV?_rv;LbD|0a3n z8H$P%wH)7Z#b_etY9Dw|&D92lh>RCO;f!2_-@HgRev2cc@mmoYA>RyUYq8-}|3|{$FhUy8N4_etrBtsMcxseqL@v=8P&R7?YWcKh%J){ocdpFuvp@cME0yc^U|K|e0t--UiWb?KM-@!!{vG5_!XjsF?* zzdpDB=y>Nd$j296KMo3fsUN%0kEZ|q|8Ra_*ul@he_w3-RlHrhe~#Csg}y`IC$v&i`Jg=6~&+|9!a&{eIimzSQqu>i2)ue>eTNFTDO9 zSN)~_?m~ZK{^H~-|G(FtKMZ@+IcB7K87tijcfGuQ1H$T< zVdch{#2p<8%Zn<0Bg0sm&v?2oLhN+wD8x_rHf0za%cQL9b4JIDd1Q-G4YwW=0t7JN zW`@<99uPLi44b#V2+YV34v}$Y2xR<-8NY91aEP7lW$Qj{b!1GIoiSP2`gq247+cdA zsKt$Ej1SP)XC}b)L}K?|WPE6=iVtl!<3oL$su``w(7RBimm7g*h()hGpw&1KW|_M! z%xF!qr4dX0?TBCA_T9b+ePlfR@klPhRkbyLKt&?BEJ?*D9MkR!^2L}PBDe}s0v+N4 zeJ4{_|Mx`S6S4!XCSv5`j*LdOV0LClnjG0pDaHE3V~BF6l$yG|T% zVe?&4=KiAKh-AJk!MbjWcTl0WEC!Uq zbyE@7X-m$pK2_$O&5ZflgBxW3eW!YRzfz#B8_^qC+pb^i9fmmQ-pS7D_Ql?_u$Szw zr4xWRpLLpBLEPMQ zT98$a{i_fER91b(bErC#Mf)06mn+6=`;AQh)5m7j@8gC30!zheeF_^cMIi0@h3Hsd zKm#t3-q^up7wy}?K-%}#pyKM485Ym`2zbHfq!L@rF3*zc36_wzoX~&^3J~(Cw(%nB z)d>dYrd`CBwf#i4-|r5a0F}q4(XemAz>Pd1d-`E$Ei%6G_xb-uJ=< zOh*{6;{PTs_#E0rzqfu9-FIx&d_4LCL7JlRV46EH@-e)21>7l!?i-Qfc5L%P-d1Pj zv4=Ts|6cvS&aqf(8oXaYSY(!yohp@>WdvrPB$^xXumWM2;GW1Dv{TAsuoDc`To%Gq3SMMe@bUZw!5TH+22I1l%#j>5XR4$vUV5^ad=)F1q@MDxI30*&Vq zjVZrYXa+Du^FMwi(;xfLWP^9`g7jI>g0af}$jWZMqPIm~gdgzNd|ZHpUtL#WnN~Nq z-!KN&Ia;+~EiwYc2k{<#s5+l+XGlcyv>OFAjokj(+F12e*pXpaHgeU-QqHY8>ni5q01up{;NDm4eG zLUtfH;0eeU3OJGa1C?5g)N&*B`zo~*skf@s3bCb{5oO$$!uxw%&(9KK#47B z2aQ|br2kr7X|Z?)>AUiJTU`DW(DW&!x%@ML=(IoJR(}W^;S05^+i!9Q#-E>qssgE5 zO1zyGUu&`!e1`H87!X2c5g2(VFTRb~jX8J;Zyo!p3K6vaJoo;K;3dDiZVL3{n{dwk z1HOrL-=v`i1jQxYfXU)R3v=q8cD+Ecu^$$J#RaZ@W_0SaKr18gbyR# zH)-x|iiv_)43;q_7yO)6_(%SNm*9*~1++aj99gYACIcIetQnNR8M1~eL?GR2 z#P7?huti|5)h77?H+I}$gTY*D4sJ4d5fNQS;EcnW!|g+Db*pW0Ocf0|Z>no%i;V9`f+k;kSzbI_vQpSA)Z;1;m)Tr5V30~1 zUa2-6&H(Ig{jUHjv5%=)1fi-AesTVS+)`Mx%`+H}X;m72dUIC=rGs0~tn-FQ6$ zFEY2*rTq$88(O)**Vs-_h?|5>ZJ8KFtK%OxC*G}ygWc7zbl#Z%!tV7x&RD)Y9 z{s_q+{Eft48hWJFFUXpbxw~%Ubmgu4dkp=Hh8?J6`M)}Ds-v+Z# zbHFnMSoz{c2gwYM zj#+^f_6|fzTZ>9n{`g;g^Pt|A@b%bP@FMbBfsOv!FF++5Jezd5(-|0Cj8MB~WiYIU z0}+r9Z~J%LON)(u1gpF4(Q16`GRMc!QSMlTs>|`GaLloJGvU=|e$@;($6CeV`YsHXTKN&|%RlXSIi?!O>Dtn14U!v7I)r)KL=3JxIj#K5! zqvgxB+EJ=}126;v0- z8G+%Y97? zGhSSi(M6?(Z1MuX%)hrq`*YuJPM}$n;XmX4egD1|tuZ5*I;@*!dOF`j^Z+2>!)Cx1 z(H(>+kMwRDm;Kv^!i^cgaF-z7b0*}X{M|j^uf=}m3@z3(r{80(+1CB(1wFUl6a9Ni z?*aY)Hvjy){MC|qX8g7EIdgiC{MiEndgR|85Zohw_tgG>nE!k1Uk~uf`R~Ez-nbf^ zPsgyd4inw_p1$$wHL{(WNB!$bz7y)tK>d+gS$~2rqE2vIme7c_GuXfLoY_*>*X}ik z%HLDET0iJUd7_FQ(`kd`@i~LF`iVAoaB31PoHK8yODn(*cG%wCcO1t2es^$GPa3g| zW3~a9(|NY6(sCutMWi1dp1=dvL3&VcFXF%ft!6ac7sqa#faBKO8(qFut9DmC%Hr|5uCcFatQ6&y&|{ap8n@v-hY(#!v-knLfGYY3Bq#hRK`wL z4YP}0ZF-=fSL^hEyLT%dJzk-071}nD+bFcZZxg$9f$hxE!qbEOUjXnH97*DtfhpnE z74Q}=jaa7#JiS`Q?vHT13MPW(+bj8YH7ecxG0!>CUKZL;k$b|Ej(T@8tM4%D^-7=s zslc9E$8POs4LE{D1Pk)pH+GE#tu_-ykT%tZpINp<6lt|>n8jzRovHSYsk@ET9H!=U zO#MAq0DPOBK&2BX#$T&byZaWrl}^nIruP8CFHxTR8Zk+vwUtcr6>dZ_a&gOBdy;C$0JprKk2>U|(Xx9b9m3 zgFATPxtk7scO@jp7z%a1;rIa*9E!i2?Js=j+4sU5o-jza8-FjcL-^YUgt(GkBr~`j zZ{6PHc6{t9a{J%vwj5{*C)+Oi;Mlr%u0(H~=#_~)>=NF6h}aSe)kGHx@Dfkdxcll! zzg>*(dD6m1F%?U%T#X0s!6~fnAeKMFrxN%A)P{25kX6$Ct{2gI^`sJ6M-}xQxj`x) z&_SvfT<1}W7V@Wz60?*)%PAdqP&%*@uTKn+0IUgacUs~7$>{LQ1#EVRGyspoA7&igOCdVtchr(-?s?Ve3UC91+D zO1lvIw|uw=VCiru%0Sg^!5>tfquKKgbHgs3+lrCQC}^@~+6|PjcrL?;;timJN-Iuf z0iVdc`=dC0h4BuGLSKzJ0hQIg;W20=9DA`<)h_*`_shMW)gC zr-934V+-Q4eGuNGvh`kZB>_wlV>aXm7Fhm?GBSItv6k@fku~7S`zPBP8du4lFeedk zYOQs}=pxn8lOk}Ln?3tjrYLYD;agNbZN+4>F&7$ZWg(eA*4oG6Nz9ApKzg+I+qimm zhci_@wiUU@@}as|*(c!epMO$GYh1tr}Vaf_xv9B1jsquCN+}jY7p;9S*`r0wV z28k#)OS}`g$^?f63M>a@xh{(Iixr{CT99`TQg;v%_Yb4abTh*S$eC+P3LB!xMZR4< z>Ed#!S-;KoTwiN{%~u6DT)_%k*@?|9npSfa5)5p5hJNBwhQc+5OUFw3%R*(^<=1Qd zv8uZ~!=1EKI8Ic4$X7Gl`EJF%B6xedNZRQh^P0;)p(`E8=?7@Mxmh+Y_1Zb9k?r!I zf{xG13qSwQE^TI<`h?jz}>VI4+$0&+*~ zsrKwH@A*J4t!X8k%q>Wa+Ds-|Ve}-vsce#f0vu!t;~9<`&tWJLHI^vhdD*eQ3dh*k z=oiJ=L;t?rhZ^6vF~{Rft)5f{_Y_8P5vq?G#e<0kQR)+uWso%V0M^*yKf=zf)=d)! ztEiItN%m0Sf_-+QEeZ80w1=dwXh%9ESS&2-fPCRc4c*r%-LPnNap|#{Rg~mxR*qKh z&dN)eI@X8&Lrpe0)g)>>{nh;F1yDUkO<3h**a)W0N*QK=(3)+SjSZ&#wW+|Oe-BFw zoRPv*nN!bv0wW>Jra6T_A1qja0`%PP##u|qgjuqKDLjPgsRy!L_w&Cz&g>nLv|BVE zz&X}fUMoOGXreW5OCEWk9ZN%)q3Vlh9vtb(c%dnx9mj9|eV5p>nS3C$f<2snlWXvbG47?a8HkSoFS$UB^>cL^g!kPKb9Z20%whY+iX&aYMT88| z{I+U7n0C39W$hWREXEdY8$9-HpS;F4{MJAh-d{5M$L{?rVYOZ;eg@8w^vJjq?qFv-)d#joA-m z_?u1X2vzg!Oi0I3@MmwAe?$vHm&j<@B2b07;FsM2r_DDhT{e4AFOtm;FADT;VT1wj z#}mdSO~y1c%)%meACgt#ILqcVY8K~*?q;*XKM`_^X7_y;QA#+d zEeQweRiN3QTubB~)b|WwZzn{V^<_vA=IRyOU2~3%+2NssGjU6%#O)N^cFl}ooDAL- zGloN5uwqa1Pjdcekf~+$BA`Gi(GGH;S9NYRvcWw6ll9)4!wh{f*lGh32$FEot7 zyf{$4OALwQoiX%}ZZM?qb2Nq@D8dDdcf?Valpz|coDk-hd~e@Djfk`BF#U!YxyFwW z0O16D%56bBf_5YXR4RjR*{kkDW|Z8mXlQ#*4}lsey&|#0im7S;~=U z;HvVFNCC9kGlx>D5EF7KtI38XeOl@j-=;hz@rtwX(1EkqXS?3-zdIq1P1(b{y2PNA z-%CyTHS9asUB7|UQs&|+;8d;)N+Gz%hXy(VpFyHWutw^lbm(SMhoAq zHj8%z6(q|@|4MJ8Qza3N%8HRC!{S{nd=6{X9HVA(67>SqYO~&-LQWc1-!|HUFPos9 zJOKCBcoVoCKT^Qu$y{3f^wGXglIo_T@l8(U_pog}l@3?5*+}2mJwfQKpti&*s8)9% z-{uOWHz@!S2nz-Sj7|C=AVFS2z(~}Dun;ptZKi zpaSE((uSiN<{bI-GRC-g^ID5jt3Qr7aS9El+uK9#%*|}STsBQ0Dnvk4%=xI=<5~^H zH!8GJSVM03r#u8Vr$~EZ17(=vD-y-m*`@e;q>NSsD)C{I!bhU%G;L;E*jWiNPHo~z&}C||djn9Gg~ z*MELi)Ynz-pa+InUH(;8kZ6b`CwibT=>mfXrXXB;H+_fS-VyDfQdI0EEe0DuSr1X9!RB9{n-h?$JK zbO#^HMZ5I*70QBXUpWflx&$=Vm*C7r8K^*9g3nM9c@la8N}JkmKuafPRoZWE7rqTS zSn9v44c&v93aF{4I78evAmi^=0lx&wUIU+Z0DA^!cIp5FwX@-UvOD>P4XD_iQfV@ zcqAK#;A%B}@PdwNNUJ$bq=PVdq?waA6j7VC^&-MDZ^H?aXbVS2F2l~N%umRmk`~1j zkWy}?sToYlf{1{s3JQ5jd5E@T8}~kCxU?rek&6a&)1<+n228HPu8Lkf6CcN{cjpv( z?pB0v@55_xU^cAFq2#hq)fnHTb#6mnVp1X^+1d^pp*JFO*X184w!pZ8hS=GZpCeEC z#PTQ5maY7k;oL^cDls`xBDE-V^?s3Z4UEx7YhW2wHD|q^Ih;~867j!m%L6e=(XW^F zr-&`^fKWeHMDp|W7jI*AjN_v4hC-WDZPlPBy-oFt9zxO*weAmb20HV{kyC12*r)J> zF3H+KzS;mloP2raQMNoFuqjqjqK1PVmw(Nas6{*<5{*KlAa@vIOGitLmeU{j0$o38 z7Ua_Xbe+qWII&tXrd}ug$Bv9XB3iRm*4!1NV{Xl{{kxMyU?s>j@wE`=M^1ZmT4kFw zKo|w+j~NONfYB}inaC{t2Y57*^+GLFigtWC5%^g`RTxF8?hePJ68h;t&-h#JHgK20 zin4(K03$hg3Ut8^z11*bHkjFGVn%8p;ma0V!0=%C_WW$1CnH{2a(tw(*W&?-+#98w zx>54Upo1v=7$*vZ*uldXbagL~%F)9v4a3KSrn`t3#06eu) zcM{h0Q}KBUrxnY1R6OvYnxP?gdGlkO0-XT90piO50~MGaHob=&s3L=7kgly`Itc^W zhxAmTVYptq@}F`7A1NPzDa&C{GX7iiQ27?NA#n z3a%JEB_z@_sX7vmN68v!*B)NqGkvwY&-%~cw}|+4eFpqaiQ$)Qo`~Qzx{2VO!LAWZ zWzaK*#lL}KDSWzixD$@09tziYi)6*WU-`d`-wTeI_+7g8-@&hM(#)w;{+hlf*_cl3 z#i{RqAMZcJx#!*K|A)Ipy;L9y|M)PwsXxkv@d*=-9z)bj#w<>0NCZ}uC@t$#NzSs+ z?So)@DWtjEp0Of#_<31`o)yN_m;emc$Pc2lH&f(TM{Wp_yyRy_Im2Q>2r(Jhm+K}; zCpj&|ug`sVkuz_zH{!;}VhDrVk#F^+;zLCk>tIYtsfHiIWe{YjlPwP8Csr=PQFm+y zs-M|75R(n4g^3Id%^wm$tR46^mCLytd3AG%Bk8oOQ3oluyYUq(I`hHbqFTBay(_=t z#>i8G5txUs6KwaS@gqUkx&!LlJMO?&=$Kz%O8+B# z1??9y!Dbs!8hR2T85^u31ui^{z_@16GuR_BALZ zg*eA8vD87QNMc`y76IpS0NwJRpyf)#;{7I2WoFZw{ARmi;!0M97`XB!iR z_RfZDyD>x2q}A_=z-~?E+{6a%t*=>6CNXW-cG@m;&cW$&R9L;c*!gTy2xDycEUrKX zTm^hSLvMRS_TSEamymaGhpaR(rDUL$WT1-3eN4hVQRts;^-a2KGGw_d+4#Q31? z(_UQKa0M{BtHSI=UPm+Q?Apu{YG4u~li-U=snmNN?3yX0M27Xaxd6owqwX{t`-r2Xs=mc1|W!B=p`=hpv{E?<=Hq}-<)dEAGoXY zIEt{@^wko|$va5f3hHuhGSCXnW{epjzrYJKVgY_98(PW#p4t16JrzhjP zik@4_cT!R6cp{DqRg#C5Y(2zV(77q&(FG&#P64d2rSHh4n?3{`Y9?{%W(SvUw3^!} zps2dkT%)1Rj81);Yc$S;H5!mKx<&)F#8p|fMkDZjqb>FsPxovDdQA6ISoEl#Kyxqj2xzbPM zuX>vQh&w-MIp$%MBH2_ejPrx|UB!o_hFF{Rh`c_V^{Bi)DL+7n=!6XAP~haA#*cfu zGq>dK!UrG=;j2QtC*mW@cTl|XzQ`YzPKZ<~bPiXl>D<5#6!atRlu`)#OZjkrC?6S0 zV$B5_`K`vHKvouCzi5j4L5Tai;hzH@I++zkfr&DG3spg@@*j&3x=~XFp`ui_Pt)rA zPt31Ad0ML-3zmldE`s+|I|u1S{zFCnwy?l0E5qFL>MW{(f6`-A;{XE%U<|N`ffszb z@K3}C-FRvboah4J$vi9R3JZ7OafvW|J5Q^b$I7V~+!#ry#TJ+LEcV!8`V2TY>GExX zu7JA*UQ)}vSGtvW#p#L`x4*5u*^E%6&frIz1U~ghLYd1(@Qc*45oB|%1W}(|MmAB| z)XI?uqhjrempiT;RhZg^3r7YU@=zV%$}^CZtH6hyQ`9ks2Lo>t?66Z!h!DPSw^8Bz7>tWw4A~Pm05& zUWnBcCDrn@BB)e!w6h~whW*vAb}igeQ(=B?!kJ@0mHv*Kzs^y_sxh`SXGAS>GY55N zxq|)s&?Mu-uOhSA9hgJLY<8y<+erBAqQE(nz>So!y>k49cu#x=81_~gK~i2ZHpJ}! znSqnmyYT_nv-$B>V;S)R-=ry*(p4Mk#=^_3Sm1>Rjw4mhwlb*6<=-*gzxr%zIDfOTI2q^s#r!KZX4tj+41^Ak?`W6}1Fh&=6`Q{(CNWBhxpKBp zL3O*5Ut+C>b}-d7{IHn1?yRU0ha~;5)%3$82fRiODDv;1dhV#1+cCU_7pQb@GM7>ok2Ca$2TRX3GH0F$T%j+= z>MK}WY~gW4mWW*{XWvRI;uqywic^2(dQxAGl~E(sHKSg_n;3h5m$8;|8+jQ#FL@bj zDeV`Nm*rZ@VA())Ed_Ew?S}D}zA9ER7ma{T$c#;_H-|ACa&P2M{OkgT-=2Y3d&#NR z*5#+RH|8sEo2us9XHRYT#q$x@Kt*a<zZFRrUYBWBs?)z#N^QOUe$SeVYFvSqD)d#d6_U_)BbZosj1}aN$&|`HQ?m z+|V8P5cF&;L2pEB?Ga2DB0L#_49hYi%QvYxhjrTYzOq7aO$Bs6?JGkLfYcn!DP-s$ z$k-Cj0{QbXg16`g;$CEveGtH2Y_FRQzD`HT_-vkxY-5AVK33&#@#&w))fZ_o;5QP!A9h0tfJhdmbxKpN< zx`HdMf#qP&?6Za7%=e;V|fA-_&= zDgs@1d6qFkE##fU+KYh!Cqs?aK8ATIt$roq45ovA4M`_TGKi2!tNi&cJ0q zvImN@5zH(yv3nGEs8y@K%3fET7nu&>Tf7!?f?omJ_5GAaIXLo7`ta*gZm_9ZKM!ld zawbxM4`)~lw1Jo-;}8mWD+SKqD?wrKS}_``0EKSHVXm)f-`fiN1D+BV?^GDeY&X8@ z-o8_0z9s_mfa>&9K)F>ey7tBH5!V>RTspITlf1HBoBl_vo=UE-5K6X+1zHf=y6MS0 zDl1l|mhAvbQW5gQRVn;CJF7|6wd8=KV~C=o8-Dr>t| z`(1z}qpv>E$3Q!SKW`+{fN<0d?amS$?aD=LMThBj*{%Ak;zG*JsB?U)z8Fej(S;yI z8RApcLVTH9zYi<95;+R)u-uGAS02qoFkYF(71j_=e8rrQ5%+`m1nkXTFwjZ}e3Q;% zou1+P8q`!oyO_JBu(w<(pI`Te9hoA0cRHW+jNT3d9k`oXcKDYGHCeI>CKn7bfwcUrF}C zSR{wP`8-(9f?eh~05|g#c7)>F)=+{uEDk4OWr-NGQv_GweML!l8UPJpmK(d5TKPT& z$$tDf-qO~5joQB#>O?>YdIsFw&e2#afG#cw%mXKka2SzQhv5Ir3@p&V38ml!Y&8Qb z)Jj(1JTN_ew;yhNqn?iAJi!ztO78?KA*Dwtzl07xvl4NnVq1q811Dj#gj04P{3CDx zfjt^cmidz)4eVg}HpwAu3-4i+pxnq3h?SXHiZ!v2D>wpU63G&XOir5~SkMRSYSRJ2 z_7K~LHzrElV-SRf^)$aIiS%b{+xaF*fNfqILoUdq&tHTU+W06ga!xV0Xbzg);r64; zCi9TvkX9lWpnN>x3AlvB9`zl5%ayp^ki+7qW3v*Ql-zoIDU;&4ZW z=eVx`iiI%Z~6o2E>xgbDPS7`kI%ff+drsE@I8 zpfn=%)U=j<&UWg&2eYbC4Z;_#;2tcfZmNKf(Y_Jub8XhDUvorOiZNaKu>9j2s=bF@ zR7ctqZH0m2^3crS2(6<&p*MjU+ES?WBgl}23^&TrKLvPZ;6`X1%!8G4f}q3&;~-bd zpETy#zKO)WYQY@Z533b2Vo^?36}-&JAhqJXnv*yl4s*@mcO8Wn5 zyl7%8Jeqy%zynup>g4r6zQb*?v5I7f4nNBOSsUXI=-~pw#4vH7eWkVzz5^HQRzei^ za=g;#T|&v#1K(}QmgVvsv^@5t0#sZ|fLChuGwfP@MXxcRB<>S~$~%oreO><4R*Z#c zKOCQM&(=!p+1iPDzR0pm9r|59dX1hA#qXMDc7$m1BU-_J6x3~ zPQr7van?ZO9>D&x6mWrL0$?8RaKv=#sQlv%)!rj;TN%&0u^=#0#`ESS#`ESuT_8%( z9r&gh&x;K4@w|B;vkVrHYrpdp56A^)GC&g&&CJIrQTA`2~o+c z*%F8H6ZRcwTZZ1)^<^766L=jG!g;Vi9mpDejGG_dbEf%mkeEm;K^!3H3=!iPlQ(ew zWQtQ;=S(GBdAc=Up0)jhT{vqS^ME*C)WtKlvD%~ur^zJoB9hKR65?l#v||0fsWQ#| z9MW(^oki)l2}Zimm#OqQze2h-nm*h}cj~iM`l%g8-Ku<>P;KYwxhlQt38eRpmOp@W zz22FE=}K%Al)GWKnvoWg|MD{`7qMSCdJb~IHQ)csC;(2J&u!G7qc<0#Cii6g+{}#7 zKhh3DPbo>Im4a}~x%L#;5FoFVKP$0S7`{6vO7)knL~$$YI8WB$0oB_vA#2uUOrSvQ zRYlNWrxBQ5AmtsI^|hz~FkWH8sJ9X0zLB-08oeTtAf4??xg0db|xq zF4Rw6frs&Im-S=ggpL=0vGun`Gy2ucelZgDmsHYWGYRXrOdG-mqDTe% zEj=c_R)!yE^K01R1|Sgg%LjPBTvhv!S*`vactP>UdM3rHhN6nEI;9w11;T91>VJvq zNi$3zaL&ITA^=xTA0jK^-HTJuTyHdu^W#~uoJPYKfJHb$G6`wM7h|qt#UE9?KB|R( z24Od=rT)^_PhvH!dKs&JoLK5@RE>=svT7U(nOHSzeM0sx{226m2X&!Od#;nF2p@w3 z9MuZ38>RnWjOl-jeetD@2I9eo?;mJ2f5UC{s?j)b^M3hq6*hRmW99+J=cvgyOr@XL z9pWHFtH(Ou25a~#x|_gUHdSa2I-Z9=3<3;8i5=iUoFQwP$SyZ*b=XnrLUaA0Q8BLo z|GH7{jrjAX+~c7LPL@e~Yd!|knSf3Y-e_!aQQ-_f#VkF10r`;H3z4r_%iD~C94SI6 zL%C0*lhNQ#$;WsIF5iCNJ2K)Hp|R%Hs_48hx*0+T3qb3<$b}teAmdl?xhn4oMkcs} z^BLlFfQL%Jx-C?px$j_E{q<>q8&j;af+JFBQwkj~dI8WZ%zdQdzz`!A<84g!8xKiG zL!vptYBC&sy)9UYw^Y3kvs{XR-oDWSe~J|V?xp#%k?Hiyo{i@Rz@HILHEf2W|B;UL z=Y*y~e-8FL(mzYlKei9+#2(OpWr-78&m;rLZ>9Xl7+`ah0S-hakp9<1>A#5dm#fW+ z1AaWd(h_@2O;J3sQStz`+;qhQM}3DS4`72?7d+4sVkZ?7H+$C?^~~GbflU=&C2XFf||TNzy4W#7DGWsBk2B;zPOF?uf|~iA8@H zUo?R=Li+G$I`Ky5_^u10uEPhI4uF0*&QS?!Dy4vzgUKq$BAzS|BfSl3e=wLv>EAXv zyN#S+y9(-b<_7;HsDdo00@Rj6Jqq>MVaMrEW5;Q!{=IF=b5a@AX48?}Y+5-wn&`@I+r`^OVsG{c@uNLi)T38&Zp9d`gRc z*-_TPU?DV@Lr++?g(*`%0^rql)Z^SQzYP23cltl!*&y66e^L%e8${q%n+S|Q!AU$O zDeczEKvETn;Eb+Z!wb7TwS7yO|8{fb`gX2duYi9LehoN>0>OcPPPP0Azgbsd0d5tf zaL}DnhE=c^xYEbv&TOt^$&>4hkfa+q?ovSrJ56S8#ki?GGLtK&C!yHdy;$UYrO5wQ z;igm<7>&K9T z7p4Uk+p$^URWG#s9E`g*gr*16M{@{|!Coiy2V9mQcDAz6|9}maCV0VIQw8@$ zK!OZ9R%&bb*r*i?{!f`;5;`w(QO_nAXk-#7L9NbjPHRNL1`LnQCoWB zMNzP0#&@uQru|r2Uz`y&ne#%blb6PFN7XA4PWnF?Pk*vjM!0uuz^3TGuOt08$LP<{ zXk~w6^zYIBUi%pt;9sVHya}EN30iPfl>YPMOfcg2f@v9;_>WpHg=qL;|F#WQT=4ZX z+F#Ct zXO8&q?q+Eee`I&Gzb6vxFIg>SfFC#(HNY5Othz?ER!Y{BcB!1%vpcAQ?9XU{`y&LK z!<8JRB;|1r$^xGkwZP?xd159QbHg$QHQom6*UI@j42u#C@JmNcmHc#x>Oq$V81bPk z`nvby?XR3hF%7WzU$VeFLRT&@eLf3p|Lhjn{jXSHOL%by`-|8T!~Q;zXn$wX{<^xf zzjLC-*0jH(TLWzWmkjV*_*^zM@&(A^9NaJEt1J1+ zrhog{<*TZbeEB9F8h~l=vJgZ4_Uc2n%cW;`pf@)ha)t4KQ2uzh%|d*?P08PkIdSrr z)3pQq+tG>g_cK02{>IMgLjIoleP{W*J3;=IUuLXLVQz<>DyBI`aq_`4Oa!Ipis9N` zt~@;PckrR|xzuk5%Q~xbQ<0y>=DNDNQ}RC-<}|iGrq;uZ{d*kker$MSTFrY=XH3nI zz!}%RCyyZQw5s-OH1J%`2%~2uT$O+wPEZUb^uX7uC85tS5r!T+qvxw(GO-u|9Y7o7 z^spwTMvg65V!2KZawXRGy#TCX8iL#RxhfV+TXUXtZq>stgz32z&o(@Fm2iDrE;XuT z_*y2S3SUzm1Ym(RQLBl@a@|&U3im9X@*R;=C|jaaC|trTcLf}rCs9L7gt%iS1;OfT zC88peIU7@sU>{d3whzNo8QYgof~OhQ@KdSOzu~o@h&#BDvny{Q#q)GgphV5H9pUVX z3xS7j1Ra(a@#Dt`%vsGHga|@(1rU>F>`uk%^^?EG>UDz`hu$%q3`!bmPGVO^K(dh0 zQ4?N<$U8B<@UeuzhCq%T6ymZ+N70+eplsc)KlW%Rfne9|;fgMtK*gVn&di^!QU3fW z%AechyZ^KN2`AK)Y6s%|$e;a87g}NQklpaR)ni+s$)=CZj1DkV(o+dJ{})&l68PCz z73W)!S#JijhVSjbsO$eVM(y`Y$*AzZKM$kw;5>s(6S2H{R2-JO;?}lbcf+lVB)4*X zu}OWu`z?gVNhNyc2al5Z5Ld93S8C8-B8`CuoXlY0L4&+Lo7E(*Dx;i191r!h>#$*r zTo#T z%RIeP9fkCX6v@_j>r!D_7ZGP&@Tqk*H5s{*`^&a@(%m36d#kOhOl{xH4XBJrKZW=J zPEMAS`(MX|1cpZF6elMeq!~FPcV5YfU)JytYTzafgoDoKs?1(DHm$n+|AZ}};>@P$ zh&L!|g|rA?@q?MzVNO@>#dPHX?ph*aKtC_!Hh%(F6qvw_(alvgc!#%?&l>=toXnhp zsHh3}7y%|T&yxlQ>D#p0DY!54??acudA!AUH8xs7M&Me+jm#4>Oxq5`d1|hGHgnGNyI6Vk&nsr*)I_5!qjW{Z-hW6imM&I<1S55|)Y~ zKK?_d_;`itZG(Z9m5GjRRD1#hb_`AQZ8!26zU`cX&c3a(=ldRE-CS@3Ic)SDGNe7% zl(gOQpU(OgyRe z-vhjbWx%otB73OStMNuE>+^u)8?G?z@<2pU6A$28m52CPsdO>?DvX28!-Vm284nY_ zy(koaG#gh(wj*lP&__^AP?D`|+liTWU=|C&a3hOl>5eY%uE@989vXjE0w@b_<+%XE z9%Epl2b%%+0&k_#1Fp@GE*c+jz|+udK#r5w2B1IRj-?=U9`P%akrqr%RttyN=dJu- zM0*l2_`m3I%=vY7>--{2th4hA{d7HfrG##8ht1v{9cAfBR6@MN>X<_Mq`_JAdv_A+ zb}}ANL%=XV&y#>jCzW3DkH{7|fU~49P@;}xqkA0P-CaWeoaa$ui#KAchf#m;a0Nt8 zG?HJLQAE?qVE$^h23*fB#ok}y9cyOe@oigd|HZ1v_(AF@r&6wuVg7e$4B6(oGzpnO z`~L7x6xYN_Px$7TBdq-2xbsmJe|`uSH2CunZ@cHujT21%Tm}jLeEjM8ALY-=Zus-o z-$?#M9Nk{iH&aY{aIoQ<8Ejf{zG72;E@4xA8W>go>y`gOKK-0rD$gVM1L*?Z{=dVe zu%Xz}jjtptE}b|8+9eU=OVZ;pp1`a6(I0hURPb+jevDVUnqP?XkFQQk@Q>r>7c!92 z**lK&ipPNB&mJ(1*`xV{q&t_GvkRuW#rhtGym5|fN540OJv&$mH{1;UqThAt_?{;n z->=5!tm%}~s3WW#aMSHH++K6VUGM)}w$P2cyKleQ#TT5ai;qYqI)l60qCc}sc{~j= ztJyBc?j79SZp@23tbRPuBtti`jfUnoZW*3S@LY(GOzz<8&XZfn=W$~p*05hK^U0-n zPE3S9;PP9%(ZEvtL3ofV_8A1p?#AK$CYI+nnalH?76(j6whNCp{8w~8=IVaU^?Y=L zq;z`m^OETzWA}FbhuWxWlz@Nxq4IUvIjrpO2s74@OAU<(V8Qn#Ffi|f?lpxk#Csfz z5YEDtjwhmFzfCtdLYj9CGa(w+)qh<3o z%s1k*xv?@8R|GFBhhLo+fKK8NPG{Ztfvg9?;awdW&;TsMCs*gEk0GZ^oe)erxyO|6 z_a+j}3W;P3W{4f6&P^I|E_q)6M3+2=&;G5N+#j(%ud`pSPyIftPjJb@m0ddG(*OQS z)c@w5J;xliW2NuY=K(7HNB2ejZ3r&V0w-U8&p3|QN}FzrJl&P*m+ufgUFPWLox*VH zGRKT$uLF0fxJ&hpWvq4Hj9&Qa6Qi#=KZz|vID6DF!vb*8_?>{?>BWW^#KJ><@g%%pb?n~4x8J=8n?d-iV@`>8z|#)&4)F;g?6yjd z25iK-n4H+fA9QY;ZMCYIj#Ti}?TD7B9yB9wCoE*H%@s?s6ZeCdk z->>(gBsB;S3(bL9S&RM^0EI216Af*Gv#XR}I~h0AGZ6eKld;`YDw}Jw&1Bq6&(ZaJ zyXn%{2jba6@=tgIbWhh^lW$f3{%M+S-v<~087J@=Qx?)MpFoB0YsW`qQ+elT(`t4D zC)hu@R()yY*{nzK%+V8Z#VIh(Nkr>%OnT?zWtP=9W2b`i_FUoLFUGVHAqYXvb=G;Z zF2cTr{Gm7?57w+6y7rKLU&;n4Kdz|Osds#LNcYH<5May#~D!IMCu;U77UYImQD;i=qf z0T{D#3iCE;H9td#$lq?qF72-4#5vTRQ;a#r?T{z%{jWBB9C2Qf{9yfF1&BM#N(9<; zYuAwAB(1eosEK;iIXyTY7JPXU{Ve6t7?gu;kd!mWnvVbwfK`x11QL1VN!rI^cd#re z}V%e+@2$J))+6I*F0W=R6Qxw~E10N=iN?q+6i~R4o8vmK<@@+~ja=fnH zy8_WkK)x|Q=6F>L-h?+|%u#$S4WCdJC-#fRQ&+k&Uv&i+qyb53tv{Ix^q)p(BX?T9 zPkyXz_%6QK9M3i|7v*q?ta2Q!I0<+zH<&6RYup3R|H6Pftb6|4OU{*j@F!jm5*I(0 zx6S%KTshvvtMB2K5s!}TTF?rnDFgm@X?N!s{8KU&{A0_{`-fKZQy@_B&vxxDW|bz~ zJq8D-nEd0yXSzh@ev#A^=XRfKv(0ZSGpQMoHQ%Ac(A1?_#M7Tk@~gB@rD-U z$D7?_cJX6-aA8dTdBvRsI(AlE+rZJR}Z4o(UFVdxCOzPJ+l;)OcOb3G~z;d z{m~zz95qYcZqwJuE6CoA{VBK)(LgE7FXt`P9NXJ#!FOm-`2Yk>&HIJ7#%=jj_Jb?L zhJCcN1}AF})Yy5fI!FILim-?Iv}AewRv;#Kv0&p$YH&L?R65;bFctP%6m?C~^5H|^ z$=D5{w%Ez|R6CR~iouMcW`6b}7vcEaJL>i~P(Cg~XK;R^{+mB^*Qx&9b!uGKcG9qq zU^}CK=o*-p{VT6wDE_XEAy$age(rz`)cQM+plvKN33cAhQCP=hx8S8LSz zGS-AIAWHM3-wfiwMPvWC09b(ccJ2hoKpc0ZpEP1l;<`yV8ny3nL{@l9g8^nbH-@{$ z94ky}ay#~EcXN6K;+#Gl{cJ8A)8q~oClxq0cpHm?HzXA~UeVT6BZqs;UPgPI*ee=a zuXJY~6iIvCjyL6o>Zyb{i)ssl$s0j)e;ap*NA3nE0jAP8SdBGo32>GKm{tmr*eJ%~ z(R9$3O4TQi=!*)J?8`wn_~tkQD;S4z5d$nR^A8rfLEg6L!*T6WK4_4$;a~bu1c>S= zCDfAHIR9(?ndeUwe=UjeS251NSe=z8f5B@7S*4q?dCh`DpaO$OJ&%DwAh{jlf%l+C zaAfJ>WfmzQE!v;QX%B9!Ip7_SoS!}?b7FjmyaRgU>{#L0>U|a|5t)J`9Maxy#%N^- zh|t~~`30V5OQxcy8cgehYNh0gpmbfqAOr*Ie(*0;)u=c);EzX;Z4Fi@(q2J5EP&H& z(;nhm$!;hN-hsP;x05TT%C~|QVPJ?J0JQPj04Rp$mm<#G&U@?x^S_KQoMRlC-VYu} zKj@`P&XTzrJgJ!H#=4A&fyi^LD@Etd00TS(Ey#2}0#{TvGP{ql?fONpCc}-5e)5Im z!?raB{ld$j#XEV*MS(Iq&IaVt(qx<=F4kB#x`T_e5LJC)mN(+^XJtj+>r^Q2g;^dO z`B^@p_y@B1;kewLZ^<9aFEMnFjnuH~*>9m2n74pB9z5djBs9s{z7OOaD#?j1+OFHE z#*f3%zcY;A0(CU0jyimp*0LgvTZ=XZXD9hXeCvA~%Xq2kguyO`W)fHl-)aLsNo!#Y zRyOOZHhR4SD&oJw-<*sbJ z=Rd)bhQ-c1|6d3nx|u;=9~`QWzgv+pJN&H#MK0l6m;j$FVkKW9?nwqb#mI{taX$+}xm`5k-TI8t)0WCdITHUR`xdJr-VqW&K@4t! ztn0S1UaF=4zV>ddt=jhGT9tru(`o=k5nILUtg98ZuYjoee}6O2v$+s$`~LraWb-`F z%$%7ybIzGFXU?3l2ud~Ya&NYgspdWIP3T(RkVB5J;P)W4SYLIHkY21p_wyA0C{(O+ zl1g6k9-%MsxU%)D@3Vf9MxQ%zJ7BcHyxtfvhW3Qpd(`>K^-u2ne*dBx4=2Ppr-CRe z#ajy=p7ac-JarFM{!QFxirZWEdqO_)!uGkj!+>aTY+r4dT>7w^=4m8qznU13=f*{| zp#_>FDpHn?Q;(>BmD}d@vUtc2Oy8K7TrLzOUrf9XjSpdpTuJ~oUu;BPQq5g6!>M;w0sw;eylPAf|6Evp~tt-NIV#r$9K4X^TGnMsI+9ofj9 z;okXkbOR}uu!raUbUc(fdll9E(~vb{sLH$$lz`Fi;WFYkxOvgagsie0>Y!kgDmZ+m%bR znmkaiPwRCfuTzWk`n+DZ@H+hnz5Y$FJ3^KMr^O@JzN?>)XvNKc5__-2fe4ep7f)iE z{@^w8UiaCw>d4I9h5CKIkPjcc!w>1zN~iHIz27vF2kIl8*spYfEnW1brF*ILVYc+g zk()1x0^+Y~Kt>JtT!mc&Ua$@LKn?h-a%a>4m#zl9U>op3H>GU@Ua$@5wzL}Xf^EPD z8jw{&1z@(R5g$?0b#Ig5pB4$*3ia`yYQr|Q;iGAOs#>sZ;54P}SJKwMt>{Zis`hP% z`6XXfrhTdtO!{@cXzTn;b-t=P_fe-!Rh=)Y&d;c`OG#Vji?+^gN~+EmRi{cmr8@U~ zX`L^s&UdNvEw#wUbmW8g_AlAB&dw!OCs?MW=q*6iRrePbc0gM2*KYOtS=GH$b$7>KT}#`#A+Ikht-5{wYTFcPIs&(F z_^c=Y&Mf5T++u#Flxa|Yu^P(P!|z)Chd;f>!!!iadW6pE;0ja(j090D-?rGN?L6%W zNy5BNGZ15X%&XQct+uC#9>~a>^oGr@POqcfF8+7S6?OA`qV=BDh z-YY`Q$?$}iz(hJ+I6yOZ_&(t!9lqr#`*tU9=tTp$1~BvR{`0HwJG(fTWhwUbgWrWhg^am}q3k-ocLJe(((g3B9q0pPHDn&> zuKchgTJEukbpiFdDlh?YBfF22Yp>T@$3zqI+44dK0&nsjE^G(JUxG7 z~^ZOG7Kp`8;H+UOjxt zd1vy_p#$$7|DSZAKpohwjkf=|1DDuK2?M)IX402*z|!9-Ir>GfEp7~%X%M>5Tuewk zf;ypwZZPNQTbH1=)3{%VsNWsv^5HHt-aY(v9pEEiEc`!q4}ZzOh96ng)$o9tww6XmumE#pK_u(dps84kJF$ukzV{F#GmUGZ;vR@A&n%LZ;u~ zPUf;^vt)Kh{AT)f8AnHdh48&UhPCV(5B#L1Akt*_9N`)QW_X8Ui)h$8fJlF|q+##C zx?_F+j)uKOk;b+5<6)7eEox`V!{$>P&0MU7Ok!>{RF9#guZ&DY$T_zlJJ?^IU=Z@Z zvwusV1JbM?eBDWS5jlJhuv>v!jUqpQP+NlVeH+ZU_7A*${dfE0C`E`c*_THDGCyRt z?GBK1()Y)U8GzK@iuA;f8$Rthr%(GAx?0cnqKteSv*(x>AKV}Qi9Y-t96d0zbMT)@ z?+;&`Kp3tTk@-}acc&9;J39MoT9QF-QS3B@NS)1I-d1z9s2JPQ(_I@WARg=lE@f0> zt3!Y||FLDknQ{TC5HxH*mzpfJvZRlW9U9K6YyM?#U$C0H46@4Rjb>~`Ob2$HnFVB!FsQjwp&k!rq z`>Gr+-MTwi7l?Lu2iHK&7X0grw51V|B`rfYbn1@^s{`H3GV6Xlp01IVtDE_X>D8n153Ws3`}4=YIjg%E#k^xx-GLIC@wEKEXp_ z#=}kJb(VR2Q=LDgjWatAX?A(8L0uj!{FB?^<2`b)h0b0XqD>YPCjrm3+#VPNT`l6j z!#h{ht$hD?;18E*>7ox<|Dx+Vb)-A^kwpy@_WPjw!RtmMpzsbJ3rIx$FhWT>dDG8h z>*TK0)FTm$o1->sp2o+~ez3Ie9?g@#MkH91UAtI5h|@IY>HkI^O#3*HH=o^)P@Xfe zlTfy%7}KLu!^N6KnX|e`=fMA^oAyURey#2w|G%|Aznk`-^Cs=@tM>o;qoDn#sr}0i z?xy`?W^_dSSGWJRcKjDMRQ={f>EK5;R6SxJ1~pVYU?2K5uM26G5_|IJzsR6qa3`U}TxE0jBU03$sV}r=_7Zyv z=l|X1Gq+xcGX6b_YfInbeA+8c_f?%RyRcQqYy z^YiFK4?zC^sRv4W8DO)2=*&QFeyxEy{5r}S=3d671lkgWTZ`Fg>96YusVma-1yI%J zD7z;|<;mUSsKws*2%b!~H}Kes7MYWtQ914KrY>+_aYjjp$6<;zk1S$e!lmIfpB_}p z$6a2eMY|IN51L8*#9MP2CQ|tI{%rUv1O4vwUR!9x>ME&OuJOzy)Ah1=x?a-Z^#cI* zdONTSrpqNJ9e$W)Soua%kl+@^bpM66H&Oy^o9twl)@!m~iTUN#2e9qh(R(Q2UkP*7 z+2Hr29Da8Ug3Qw4H%9Tbl&@wJADS&?Xn)nLy2dBN%pFXfosrL5r}1U(JOjyuw-CD^ z>G0J5nt^_Bwe3>fMuwdkwb0y_$5<-4nq=ZXUPro>JEoO3cOBo{s4hQ@5oKWCP_LQK zD+kZg`2(-=!h)HRhUG(*YL7^liHiOd$|qZLCN%x0Py_NgIc!~n&C|$vFzXPMs5eq#UFdd6B^Y$E4tP(zc|h1nULxt?CW!<8OHX&L!zrzff&5DYE~*|}IH znFl=zX&%(Q>Mhxd<6|gv+xnDQ!5hdaC8JW^)pQ zXA{&2=p)i}1Du+zpk2-nkt(%Lu(knvBf;ay#E^M@fW_!L7l2|L!uD(Wpra49QTo6M zWpc&YgOp6RLqN>CeAIK#bQFDPUt_v|dfj~aYjW*mkB2;%u9NtZ~l&^OFFT2J?9$h>aYN?if z>ige~lYfR5_t`!o-mvm-QG=WMisw@Uyvm6%?E`c&vh=Q!2I_CzQh$iNwUHLn@V~fH zH?=X|ASRAWhG&ldlv!qdw3Ms>kgY|o|N0^pPgG4u)P{5^0f#896Ow$=W@Nu<%IqF8 zU*RfxQ{)g51tF({5WJOmD2k_q7<~OH!vjtxwY4=#oPLvBtFk^CRju)4XkBy!f4tgi zT^IDp1|j=Mb2gL!uh*oSqo9FO1w1xWfJb<^L0@C*8}vp0C8{2?1^ZD)f!V@-tS;49 zAGM=U!$pZDlCfSkVJC}O>J(jwkM6o zEgD?iaKqqGUGG&V@EQ1F8$uiL`0j-9q2zXs#)d+cWX0K0;&VnEr&lu=7uucRbEMt=c2!{Q)Pn0rfm-uz3ronaRek1H(3s7}blLbL zz683tBtq%}CZOt44i6NutbW<+c|oD*aIU#dYB^*#gJb7h+Om!b7-#XB1>6gU8R!INmy9nuX3akWadzL(hE6>h;$YjT7=G!y|00+AKbCmyRmw(?~ z&R(of5k7U0;$R-VKs&zoT&2a@)RFaz%+=s zY&L7A(WJza1sJ!NU4mltLat252dDqW!HN%~LSmnb4O6gTgXXT9%0>DUY3j|d1UcxH zZ*hXyBFul)>cLcW-%?WxnIx)?e@f!RUd+My9{FWiZv*b;DqHB2XH@nwU_lDDY)`QY zJ;w3jQ#oYuBC+u5lY)dR6bDlS?Xx|uZsmeg1{RBU*Hw?q!#q_=5Lz?g=I&+` zfkAU%a<-Mp)Pl_?J?K8$7R~LnqExhl`Nis7`6bxrF765|@nnUFSdF#*W9jgrciJ(Y z@g2ta!~;5wv0Y2rLAL&I<{<(Us{wAL)g3p|*6}2B?SAvP9p7X8u_3FDLX0V}N{?IL zEGc01mC(B0w9?er)NdaB zW#^h{BkeOUr@o|SfBgLL!W{@=fB&dgkX?QooR0lxP5yLbaxj2+S`PTG8;Qx1XUq^} zSzmnwOv`M~u?IU8N8;OLxn;~qi={%u(LE24#9v(7GcvSWtts2f{&BiS;|-Set@?W* z%at8<6RZH&`fdqvZ4y-@T5_yfoV9M+;C*5d`p4*-oEE;m+A%$(GFjtOY~iy+fMZWG zceDMrzYa)V#uuVXZgRO@;bDyjB&yb|rbgY*bpixq{+HcRtCcKL@1&Sy4&pm)aN6Ce z_J!pfkvq}unpC)sCN3y6pW33M?UA?C4N-2rg2irR`k%*)KHj3uOJ3heyBlwd6VLwo zsF`Pr59iqv2(-H!tacY^{Ig8OsbS@-ZQJobeO$_2G;-IvXyYf5Mn&pCzW+qIl6Q6; zlso-FVLGsZWc-1USt3!j?g=%YnfJmXvjfcOe#P};9DXfAM~XCAJ~2L{+1Kk9vykt8 zm>t~M6a7!nUC<)NZ;6G^kp4+cR2MTf4zsn^e_FcpckPD@!S0T_DC=U|yrEudR$)+n zxLp~gu17JN1$t+K-dUjcbRWGJ)aI0D?znyDadmGHqNW!8$M0Y2g2{p*{MJf)jx=U1 ze%Y-6wX1|$>)sn_yqMNxo+Z(VoKmwK#>iHC>l=hq{fG6S|1bFKWWALsa0r(=@CnIZ z<|Q#y{d~%Ph=Yr?Rv!dl{`|p9Efz#UDa_uFbm8*)pw6*k#&r1e+wIJ-=zL~|0U-nd zdY=1q37@pS-PacObpq3TQ6K~l=o}yKE9HNWcKJ_>yOaO3gY$vx-*9JZ-3ivP#T;;u zL-kcv$Su=#C5)S_0s-sv zk8h&&`Z=fD_2)E(bHDXxKDpH5xjH`A04P?Jiv>}q3(@mA%HC>D^m{^tmmt+;#x+_# zgGZPQq|r6etv2VfaGjE>Jpx!~m^EaZrt@vgc@Bceo1?!{D47Y(&#;=>Dbhv&1+dWcW9wqI*Sji%yLWT`+8Wqedvr}>1^`& zK$YVE$f8C&0+Z1SIItRdc)QnAcgeY>sI?E&G3n@_i@iRT4cV>c)_0`mYap2+ZGxFZ z!+g}{)CenIq)MRvJ5>UuB?Ui-2 zW-C_BWa5EErpKc4OqR{JN#62^G_obUWT9I$M-i5}og?#vGB-zc+a)Dx>lV~M%Np}HeDe`Lzva55(9tBat?b~~QFt%a_#M7CD# zaf=32+cik9Rmbz01+tYI>F~x1yFl*6$J_$hv+`541XKWemHztpDRMb_O58vLkaQc& zBkc_!EBF727u#3Qcd(1v?dlmJSc(u_aI~smNsQ<32D>B{eU1Kyqh(jW7TQ`+C#sGX zaMnkhxc;$5(wt+#bG&&nlOVy6+Q%@rYQ7G-v=Sw<^dc7JPG+RTx33PCoY#IqW!7lL z8 z!&$HF{9AY2S6HvC)#_a4;pQ%8E8Xz8A@c-x5wTuZ2ee+FtQmBw`h^NlFaUkml6|sU zvTJ>2u@Z1PljyIS?7aHTTISUicvzsm23oyV)n?qc{Ta$C^>#s>xue6eykPqIiuLm? z&cTUE(9D_iFV@eBZs*Tm(Jx&;|A2MycZ!rS7j2yMjvCGjDXmRB)_UpIWcRbbqCx$^L{US zKd7PVK096Xjw}t|!INTUG*m6H->}&AKaOmPjd_dvAISq+Bj3UY>)VDNi^F|*=-$7? z>AwTo%F8R2#@K(4XYY3 z%C9M3jl`YyhwDS_n+;s=d)JzA=)4b(P4z#Uq;Vi}PGb)*v2yZJ)N%7bo~G()4qH4c zr|W5Uf$;llnjoWsXPdHo|E7fNUYDaV+x)gI2y@S#!sPgk-5JpNQ#Kczl4_Pc%}s*x zoa_G-h~m6tB6kGQQdSqHGnb#;kET zJW68F>@u}z&~RE593UOW;(dLzv90bgws&6+&0v-q0kaV$V&NACt@)-|UX}jNy2(pl zPeRq~o!SB(v7Tc?hq0v7;iKJukLf6H+RKZz?Ce=HwB?L+euPuYUL^jZSGkr$i=0xD zA?xSXL)A*JL4SBi1RuWKR_O;gPKGD*n&m`w_rXG=4TXtY?q7A zo4e#~;d^$~L>!ZQ8BuS2V@U&lHI#VMT`VL`*9J*EeV=V~+oqc2-(tJ}hC@Qn7;!b? zLtl=k`xSCOG}o2CR=xwKPXEP%2!^;OnKf^0lvfDB0=$%~httNbMX_|hff%SO7mL6a zRI@%B;?HqRct7#Ntg7=g`a4SE&R-;+sGImM=xzagvnase@KH_)LT;3W2#xS zia?lsndU>R{)2wHh(@%niKmaL(-U7O$I_EZ*od8EZPtaZ^>9eg^f(?6ldD&1!=om- z75m5GwjX!-sJg|ngKER8D%C8OP=)ZTb}iC_uE|ySMw6@1%T>s18_iWXqhp0$u0r$B z2B9rio#i=Ix9~}gnFJNV3wD~zuGN9;oh$7~za$rnz&%lu@k2S4lMAJBtM5YX#cN&Gx)< zG0L7Md%PH|KThd))-Km(vpFOS*-qxf$T%~pNHtzgK?g;hMam@SX=2sKpQ{~JkbR_P=<{{AELr{1Uwv$i>rYO=o# zzz6aH(V)dj>a4Oj7sgcHv6^Pr&+#CJDEIu#n&MTSQ84q{7@xReo<=Mp*{@$K56Tux z$3Qkp(clSrlV4)%pAze;anKkyL((F7YM{+Ng@QjsFoQ35cK(uU?tTw${`d@f^>Gvp zd}xqi_>P=Ws8K631HT4#32|C3T$E{nIGFkpRr7u)eBp{%VD7lwt$)(}FZlg1x`(yM z>PARTOBlr|FCWT!iGZo=euQtcr7r>&^^5#qm2LaJzJzjOcVXqFmwm7P^3yNATJv+| z=K3S2Eu33$_3-+G8|L;I7HRx}CXdR^*vH^*f6&rO~c8{b^&};(*Cu)+8WD7M4FabF#ReVew<#P z`I!oIM2(;xsxl+WD(m|KbyVGreB-)m5@su*qP(*H8WM)pU7Wp)79Lpt-TfdOX4^Gt zczu7+*RNs(VFCgeHfi{kBX z6@P_R+^iG{hMhsSl*Bqrww!`CWk1UgCHAz{otUfd5L@4i$PAxG-)Ey#>xFdxt+#im z&Ac!kkHK9zt7$u%1YCi#(hpz=O}072=gn;k74_I>dFc9%$xwn`W6AHoL{94XZ~cLN z(C9JoTJfhRPfwO5%EaqvoZ6KNxZYebt}DlKCed{@$j#&07m3F~^n^u50)U?-C3QPPmcflQa0$RcphT_bF)P)R(Y%Xq|PbU zBt<*J_Z~Msq|h){K8?O!^OO}^;pd{&A=Bp?s-tye$0cft^AI1D$m~PkB}RmCW`rODC6wpHr%jqA?;;#=t2S32r#kuQ%E6+ zfJ=uL_({xjd{HJl&oH|%V1hswCPA4B;h{2l4yp#leslAQIsXp3hSX%EddzXOP}Bwg zW~OwQY;#oA{$TMJ_1jKGXB0G4hU$q&OH(ZUtXcx>q#U*gKWF}WX;<2}%PceUba3*g zcCfi8ho8^d@l&BDdPMHnvhX_WXV$*8!J@%>9@w;zkCTSxn^;rmwLQz8UgZ1tC=0Fg zQp5^)`liVQ8_GY5`KP*JPgPx5A%i?E*dc0}@GMWWuqsuwFU(5GJ;YCz)y49Gz%;UV{w|nUe*v2Dt*mkdFFEQITDCU;+ z6!1^2;4@$z20M)EDT|?2i8k2}m%IzX+F`Lv9g`9vX>8Pl!sg75j;om*+Hw!w$%I@Y z57Pe|oc{k^^lUqj3*Xyg5!zT%fZWie!rJ&idm}tT@4Lqh3uU3O4NiZYHz&;awsjJdmV%uq_lKG(O2=sDKykE18-gpWSkqm9xnPN{ro3ZLS!IR4NRv!^%GO}pB(oQ* ziF@^uzWFsUb9KoskBBn6%wH}OX0EXdmitK;22r^`S)_@;*O;OFTD?4?G+t=xT_u)J zS>>$eW4H^uYjaaca`k2FL$}^C*BS3 zS}y*?wtR&xntV>OwO?snGIt_^0ye`hnKd#^p0lpJt&SMQedi?V8Wh zRljH*7J9r`)jxP`ekePnOLBS*$ww+VMKY_AJv>WyZ6B%*BXyAIeE*S(H+2{(kZuQw zk?%M@1;7RA2Mp~nJ^|A0Ah9+QmNW<(^w%G(0j^y3I~@lLH92MK#DMs2XN#Le$PW2`*Mc0GUd z|3m!?>p#GcE&mmr_=+T3OFaF{k|<<%y(0+@Ff#V7m_YL5>HfVAzb_o>?_T=g17#xB07WDK)nTyx`x&i5ugviaJokog_{ z3NlbAn<6)>Uvb{m7bacG#bn=wI&sQsEK%3A{c@GWoBBO&@kJGFHV9QbePLd6jbh=Q zK#StwML~_mM3zoEC@*IKYWQ?+q)F~;pt)XXo)#leo>#eT7IJ=%HjLWax9b|(*O&Hn zZI=vbPeR`VXFiq@B6GvjtdEhz2OuDpd_hqv6Ke*>uwsl~d};paHR-AE z)g<2`y6m@VhQ1afo-N~cZanF3?TRD%r8OvoyR477@r)dz&bp|Gu>CTf~v2 zBxEuj_le#5^q>bejSsbza0asf->mp%;1FmY8lP_$AN}%?_2hM`iXY zPrCoRSNLT%=-pLPYXs5DFEsOV7o+1y8njD ztLS_q^u?>n|5V>2Pxw`5IBcr6PPh8kJXJqRrI)zUX9WG5u6Nf#&-~dhz5tN0jl$}nH>U>?%yY9$pfwo&fN!_|HaBKukM4*RScyWul&Dr z`3Jl4sh{unb+Xyuo8)yq$4s%~ul(~|{s&!~>f@1RetYiq^G`KrsWBN|vlD4a)Q5~; zj?GzNuJv^M`oJ)T%pOqakpB;j z9Ni0&#NWd2QXphTs}diyHo$2R{yn2R1AmC@Kv*0=FiZ4-UeuaLm)WE%gCxhl^927q z!QX-An}Z*D+*aV39c;^}yx;h^gI^i|F{6F>o|)+2_X(05{KEzR6M$sqb%Otd4}V@~ z`0p|9w0}b=K#u5F^?T++*n^}8gCqxkl}PFy!9Tns{7W2o_WwG=zum!~9sn`(a`5kP z@XrX69Q-MQKT7adq2=cALALbsxKfFWxnf{mprK_01ohwMW3N8c6Eo zV5BOLduM1_>c0r=5O%p8IL?O3(47>33q_n-e{hpPOm98dwE9PYSjxxWx+{^X_1hGkE073Wyd6lDzBXiIm^wTLmmf8*%@Yw?5 z+))AStV?F+iJj-iJzqXQYJKR>k5N1CWUrQSs94XO&v?+as6z8wsX1OX=2N3>{}Lsp z!}+&-!=LT?DETd0L@qt_q}V&2<7zlQzT^kWmyxN|8;a2yx!~n(u%b$HZ<&|OcRLkq zloa-n{;`jH&AK=jqJd)(pFhweh>5JWoIza^N$6l2p5T2uHdg$H>R`^($AsH0Z!sP& z7af)yX1NV&C%n%zj+&w*wYR&7KpxLxJ1BmTT(m zZ(7Bg zY)S<8BCNJIoHz@=8{&9Lfrj{abNH8a&ez}7zrTQEbw&M?RvW8qoqw$D(%T2orBnPa zt=8Yi;OvkElGINv!z{ zF#cs8@u6$HyiaBOz?Qy2Inp;1z0_Y`Kt{R!j|Z(MkK6xfC8O-oSQ^hcewbb2tE?;) z_vdd+{=KoY|O9m}f(dQO?!zLLeVWV_bf<|jPQC69HY*vcPQX@-xvHg3O zHS8&mmKrjLp}wB}ZD)L!DNx{|_HSc}Y){Hs%ijW*^_1Q&tH&oH`X{#QcFoQ;Lgv^> zV7TSC(wgNTv+VqDh0K;sdc|thVlKz0hQF4+cX+S{)Xv8CkcTG?3$3t?fw)$yCM-A) z@YlTXZ2*=%hpB_&r9pAYwK@IBCH#&xTp-6E{(=Fstgs zUoE_3F2~ewo<5eX=VNn5fAP0Y`Z@dZFX*n%-!hDflr6H-;kqeRPQJmv=JJfX8eH({ z+lh6wcC9@|1%@zunCd|iGf0K+TLh=WWtWTznKNsI?5KuIe>H^Q$E!$GHQ%bOd|ad4 z?S=}k66JghJF~$jcD!ArltN0DQcw`eU{+e=6=Q1jvD$%BJJ9FQGX&|HkXd{gC`sY_ z2@R}P;aq-p-*cqx%a?ZgKC{M^*H)G-#tbo4cI@e1`p=d=I+k_p+mS!|=E_?~*kAz` zoC0&#iG#0bX?N>^w%1yhS-O}-tto~y=?ZW*VZ%XjV(EH8onsUUJcpfE|;)p$u-X;_Vm1o zWh083rYN%HYvj-$!(DdN6Z1LCJD>CQcwu0lUPB89Cie7>H0`sfrUEMgfl#24Gb@u_ z99$_-$W=qvSqPOabq$1VvF)JpQgh-j?0kD|8w;otRsP4!#1tv@E?I5<@LQYsOaqCB zkvNU{PpSdwW#$eN%}KxFXM%&V`bM%93C7Z=!y&Wz=eF?1b$mUVuTH-hohv`l0e|Zw z(slC3*FQ*iUWDa>|I9D>dB;JTnuC8H@M--?<{Fn$IG(^cV}Jqv{Q0`feE5K^e^oy4 zC5C+cmvowclDhXCIn0%+p~=Dd;)b9H|3vyUn|~Y)bNB8@4c_n}l})%t>I4U&jnD%2 z=Fhx@phtjs^7dpW z>AB2Hew00f&nf(b@|Z1J`JhXNM)9eT$s>DmWGR22^)_d?RjGg|Wd&aEO-!B#wDDZX zGy5XtpFkfHQ)|5^&OYd-CyzVp`$N3)=e*&AzTZfb%V`My$gg|l-0AiXzqrMhdBaCu zuf){c0%N|J=dPZlAHNNOIm43ynG%1o>PnB>C!rS8w-D^%Jqa{LBDx_EU3nmkmYASF1n=0LOO* zFySi!ux&Jdx)*tDtG9j~Eo<{4W4Ccv1@hpF-Z%ATGf!r01~IW_FFTQ?GYjnuV1vCC zV6Wi(CgSz1&l!cNci1Q1?rmQ0552AX5^u0^Gw@C?o!^=Z6<6qH4_h`wS=$pQzvR!~ zNk5nW&3ELxZUZUwf7U)rFHVizWS`|3lNjBzS%E${FyGWxf1woY$=>>&sHn6l{Q62r!)+4H$QWusSlc0t`)Ix$hEaefh*Y8WI}HqdYa+sY#eFG#3+jT&?t%6~j7;r5uI3@qL%( z`QuvzQhH57+4zNq31#Ec*HAX>wHw5Z$A_tq-W@MN)$;lQk@Hu7=#&hHkT4+1cJ+-WOimdfwyd{9QHaXkl{= z=WsdGABj~K)$fc|dIj}65F%&?UHCnRSpUJ{cWgWVc(K%ZWr>y2vYBDmB`JFF|=MksEIFYoD+&opy8=Ja5nd!&3{%b zTlWv^p&_ON0t*oMu5GiO%yW!LeSph8iOVV!dqzQhWphK-_?yIqwt{qc)6Lh0O#MBy zQCa|I`4xQ7rDv%@59^7qlQT0U+S1|3tIVgz(IE57wG>E)N1eb2IVqSs`QWJAEI!;P zZ19&^(k)r%hC#*=7(az5m{pKEr*Qar4D%*?xI)AHkxn-b-9(RD3!542Rt+}S^WwdX z$a*z)D`F#Bu=dLbS^je`Ipuvjg3VXn?-x#quNzsqU|{r<^?7yY@N{V6ouj=YPIKR>C$^2GikK?yCJCxrU%~^y7pRhIXd!S7~P%J3pcp0~> zEn2et&o6;sR3&$oA* z>Js}MM#))|l|2!W6PK?a*4r1!E{P|9o1Rt%b+>)Y*ea zb(&r9Z?k>OJSjN5M(r$=x~LDlIJRWEIqU+U+2-H|IY?3Tf+e3iY$J&TA2};G9RK$l zrQdVb<waEI1E`0KS z>LNm)MuR(f)kH?Yc7U^UYUm8^T$TZ;ELQn+B(2qp=9fr9#{nI_TiyWRMJ3TOsUuhe z`0?rYozsrePQB*>LG^yhw_WdR*~zqlqfZOa?B<^(n?Nks;ehbdHl4sx+k@~D%!05J| zeubK|5Q(+GZMBtij-Sf$TQogwy(Cd((=p&v@M|-x?2}J4WUOFO=cAU%_1X#vOl$}l za|w?eTgeNV3o3(o6|bj4(}#e;LJiCt=D9YCB6T1UwGL;(c_e{4dO9_%Mixnf$B@1vu3NDocev^f*8| z{p8j88?sGi;2}(zijtJ;nR{^ko8R8QLf1MmxLS|E5AT^c2K)1A((_K5Bi7*dd8H(& z&>DGlPQad+@Cq>^%q`N&Asud6@-3EB` zKVxiv)c)CL`Lsptq72TT)zZj|M+@UC>($61s*6g;Bpe}&{}-KK@b@A|BMt~{PTD1JvToi zYI*x0X7BD;ZSL9DKi2O*9qe4c3^%Q&C|9`^6)7Y$Ei@PVp0E4Yd|c<6|Ly)4;hK;) zt1z-`1J=%4(%(RX>9;BSwP~~pwi>c}{BkHczUsX-z$ThZ-hi7RP{Ao3(&Km7O;e_Hn@wp)TfKI&XfHax}{q+cO4tQc0AB z^P_#ZpCX`Udz?nY+WkX1{8}6?UG;Uzf2(!G3L5hgRlQXsSFUuHAAi0FUj5)t@M@n^ zq)YV8Vx0+uo|#?PkySDi+I3<#emmR84=o9yzb4o~&~XgAzhP$?3@ghE={Q!ONW)Jk zh89AIjJhm7!&%lL_Ee75Hknb* zOb-O)Iao0s+vKfZg9MX7V-pIeh2(90%^51Tctcy&J+$u)`U|4h$hX={k0}b4J!9_H zATSrOBozX+Cl|T>b+ulGrwxz0j=UXd*r;!5!h&+gP>Y^gb@2Gz*o;t7K ztYo|=Ka+YEX0$;dntblm148EAOCW-`_;LEt4!v&Ur@5UT+$4DMfgOp$qA2rz*E8OZ zKAvx4}9P)9D8w^fsGQ2Spg}=_ssQI3T3iS*t6C z2C=|*jUkBQdofqB-*lqd3+Ie&aW*US=U1Sc0cZ;aKV*#K&b?L`9`xRY)IW$pi&|h<;1EkR!vdl^i)-eJ=Cmh$sA+DYw#V zO@_WKiHgv?j0Y7-2Rg1t?s+oa4{sYmY?Q!k}&7?>qrfmd|yPk@&BxS z{JWD6FZz@K&#zl9gT0jxVp(54AXEe9`nkRa^1&J-v%Mg9IN8Yu(K@l=Bp+Hj%Lj$} zl_raj=-s`Lu%=3HD+XR?%^Zqgs)O;<=drdEp{Da^V;%Wi))4}q76+o@&^v_VuA<^& zb;OAZ*C#}UHHJ7*;qO05V7Qf`RkAw?jG$B92@G5c0w+8e5SXW4H2j(bl47dvl#+$DNcmfA}tx@mu};k{?TdOL*u&CsuTbii^$AJ{>8> ziC78DOvn49L_f0!=k(8X_^I_+Y>oxBX6K_oHamKAvU7V*U+end={F1*f*M)V{41xu84TeHNn%L(+!hI5- zaYx-7u!Wt1?W84oVU7prMT(zorr>pe#@s(kvrkg9H#NHrt5Sdcsn>M)kAKHM2Ad@3 z-~4p8N5}Gd_z!G)`d!ksYh-To(+getzVF-g*&mQDTfMo`PcQc6H|5PCxA9Z%T4H+8 z5}ieN=}R8zG(IL(&~a3n?Si%|Qaijm{Qe$rU36q*ajQ00wwvuzj|u&7Lrx5}yjJM- zw+L_91FByG)j`aTVvebn+M8pn$YP2%hv}8*>hoy#5$fS$p+qXZ^!t5jx;84w&qSAR z>dE9gDA&JL4^unWbT8v;iAq1n2S<>OK70B7&6L{(Plvwg(bC`f0sXN?MiOE2(^Rv; zTKPl@xwt$#wZtBP_{if|Cs=7;C5=tk>xa_eA74o;2CEe%xmK9Nf)-?d*rjs6{>Y-d zcKR*$BL)QfOO*p6O|3xmCBMS0;i&c0D4|G7OKPIx2qEFi)Y609?HGvsiDm0<83b6_ zNpHRqJ1HjZx*pjD-ul&2o6PB7$DfOB!_j#*JOkqb7Vld~`-iCgm;ON_PZ|{W)2(X| zBz2Q)%Ny>w7yl`WV|F-3kW;L@!U5p`>h+3<%zff(%sZH(_ z_#XyiX0qX6yzKe+y4hwsK1qP06P61 znjUoeEYOyblZ3CAu(&W6ayGMzgBBa)cHNQplCWNsEd_ab^?jX=>4X7RiCbyCJ+fLs z>qR%j#$!mj;J&AA3%EbfTewfCTM%BQKS&u%pQ}dp2yyI;D-gpkO&2n)4s(>3bi4Ep zquY+!52IH1$qkLU>&k9>Aqruu<@gpUw?8cIlZHE)1O@X2+gh8s7+u)01hl-blD6CPS|`x<<_?O}VENX;Nleh#lCt0L z+y*OuF7xZhr0oX?8c_EwfuL&at*Za|VMxGJeWpam;;R4H!ZCcc$W1BK_x|58qolgF*1BlkqWf=b()P;(7tC9O zB<9zBPU@fiBIQm{xhqLrZkOwJ{@g*jIV8bPxvSyyFuob1$m!32czLa0qQyT1=Za>N zBDipd`!Ib&q{aV0zi$QH|3gj?7=$onepfJJ4PW;8&Lh_mKQ+PDP2#;;jbZ8z`A3Pr zwGbLYYTKHm{bn`VMFpR<x4wE55@`d18{On0!|$-{bkp966hny^Of4%Rj-8d&DO#J?aWL4^Dn?lgYa$ZBWXFx1VQ-Bg_rIp8HILv=woV;P6snF&7qmnWE?~R99arX*S}U! zLQF&}=pR0;i+mAb{5zmBw*f`SjwC*#Z4TK&t>TZ=6Fy{1_Gr$1e{j)t_}XrYE!?Rz zTYs!_b6u}^TDL*5hed0-8LF%%dB=3ntVcgDtD@?KuZb&adT)pk6OyjP_&~Mv=p~E- zHIZ=}2s6U6G8n}c;ddPK=*VDtJoQwuq(6lzBBQ8A7Yg`f79gv!_*_!~ z{Fk`{ob1Pf|B`=~1m(YqfA6Q_K)#i4bnJPP4do>6iHK24%^hS?OCT8O|4f^W1ZHfR z0g~J@(EI?#^b;qRwcREeAVn0Qi3UX;^_Zgsut<7+%jG%SPcn9@sFf zXBH=Kaf)Ku{Wexm-#XUvE(UyS&_G8!CbDv8LG!F5*=`zgRvo)bm9I+mwVG}U-{VI0 zgO$7$5p^e!yZ207GVO-b25;fU5O@)DM=)tZWL7ug3bicpQhgX&bfGgjYDYm4BdkUz zO^w4k7QJ_y4c=`waTf6W-8xdLgM#0SdrY+jt9{HIPqp@2LkbPO4hY$r@=xNGtF^i7?z8mkPrkJ9jW&%~@9jaep-#7!IG4hrn4rFhn>Tvyk>CfO{uAOrVr@ms#yS)Jp_XS0nWrUH|{ zFBr#fz@z(-b?$@u8*@=V>qhW`V`gReiMiK?Ztl-)`rTLjRU?Mw{y*a@i!JtD2Qn{Q zlt+JCiJzMe-~3l{X>QQV0Wb2R80XF>!Oib?wa;Ir(-KviF1#XC=;rrG(=^gpL##a? zFEqv8k#|75{meQI6yP-QgxCNN0<5NyTgK}pJ(K$lyrz#LO>$oa#A1QqtNoVX8~C;5 zqoLVcDi*$!bp6LQ1{dj3{2ysLm5k-9oh8U_VM^mjf3AG#K|{u&aV#CtBiisq-s~Q1 z*Q7`Gq4AMrr;YdZlZ9;}#QJp_$Yr)rc>257;a`6>t`4lf%RGuGJFuW(H}L1{uw}h4CnL&QGgV zckfmu6taN))&gu)Lq3^Apx>YJ@V6|XR_|{1mP2a#?`E{AE2e`k8?4r;bCDBKY|aIY zS%1G35I^_~0U=GtEVUnS4oeO?@jQ78_AlYje&#MBPY&uyLjBgvgPM@QuUW!j33~UB z34yTb)1XNpyo2sq{M=;Im*vvmy%}BWDOc*-c;q{&J%SdPv)$L$A$)Z!boM964!A~P z?}Pak)>Q#4!CxY9vt7YWr%=!;(Qo~CV4vPM+m@Lhz%*AnfW!UvEpafH{(+*$2p4CN zzTA?b)eon*_TRrAvWE~j|D+1TuEGm)71mLq`GKpeFsMuI+qb~B|4By5oa*Wt{u}Bl z0hZEtkdAyQ2$>IE>ZqV5r+=;@-Q42-S%!yWNh(U55HZ}(TnwXvcLJepPdH4XnJ6O>hR3tl;2)-rL&rX-T61z3OB$hrG zmI@p#C|iSXF1FviO|eyuMbLHfUXBSu<(9~;MGUT1^3bb=O3Gw!j1 z)`XtufoytQWtnEo>6}|){IDL>5ow!FMH*R&(;??Fq&>b{ZxNtQjgSh4yN_#edqaFF z7D>E?u&QqC5lfBO;>#a`$2G~2B=i3CSbCuWLttAB&9J8Y7x(a(h

4uKDOjhZuDS z*P?YCabg}F`zBG?*-A%6sMI6WI@hcxYdjV7e5ZuqR465+|Yk8m1Mf+iNBh`-$ zb$KV>Moh&~r1$BNUi}z}q3S1!`K^hZ{z3HFfWe-7^iU^?9FZ3 z^Bl_26NOmi49xL_^y!D?SF`5v&)Tg?`hy*q36lHcXBRt7G>Hu!N-i+w zoNjWmE>r`o)mD$#wAs~i=hOVJD%_4bbMZg+!&>J3u8S4$Q#8BI#|@=v5(-9Qb%9y) zV?M;&t-H7CpT`vgB!nRsnc&`X=Lfv-13F>$-+-X;E}}tLl%Lo zSZqZeo4xcSHMW)URu2X;6$s5mTuwS{T4ihfx2G{GheVdercP7L&3yoo@6|!-W`$fk zqmF(LuXF_&Bzx>+>?R-L8cx5-g9{iJsTi=cJ(6puRaOs+ELx&AEn7`^Ne=Hkye_ir z`l)OH_pC0Omb`vyWZ4ZgVfV}d(<-;J--FnbxlTskX&4g^nFdo1G;6Q2thnW$aNB`E zGsEh708+-SWvk6)a1gWBfv=I;SE&R2)Pd#iOH;{*X%Ou|bGFO7BFHQKc-1Ud?LhM(mJ$m7_*K77pa1>zL(FEjCxRh1KljDoMw(+H|b|8dO z)?CCHmFDCh)i(AR>kq1Kcw93Y=Azj8@zz{q?*fdX#-tYDJ3pZ!7WvDD>J?G)#3Exi z%CX_5%kjxSyKFpO5IlT$*tBtI4Cjtd_v<|_9m|_my}RXGq58gjeOhsgw`51-Jr&z( z#zmHGQSq9?cGL_d0NR9#X=9UTqnRz6FtxgEH7^sUQsML(?uCl?Jikbxit#U=B*&Aw zl2YA+j?1x5_Kekms$HwXv^IT zr8>lN;*9BDau}mIAAj$WM-*IvVQ6Ln_n)M^>B>oiC~j7*nD^2E^qf+r_!K}3=Hr}3#+ zt#UVXTmHz5Mt=>2SvP+jeYdvLxA&jX_wD1M@2Wiz@tl(p+r7LN(=#U?wLYp^2r?px zOQ?ax$~6;YZ#k_OXH3F3Yh+tYFRyB^%`%ECSm|YdsV0w;9~@R;f9ODxlKVElLGo3e zl9!z}x0(0}L-MS>eaP6}Ya>h7dR0yoZ-e|B58&>;b$DZW6*ZB+Y_47vrH-1&*sZa| zr_q~cL9LU^CZtOS^Dtc`{?_d8PDmdycwBmlNPI0MevriP2sYThag&~4xQx1X`>2f|A zP1w@?vFt}c&bv&?dd6E(uUu>^lySU;n5EiSIEt}8(gx0#(!>lQT7k>wUdsIgTy}$R zmgy70(dIIL*Qo>1mwO_5E(NyplZf8hZ&qbQUp~!~#{Gfp|K#YtB<#~Y_XILAq1#yi z=1HPE=m{>MtsTVBzfbl@s(whHP|ue1AFa2f-`9Wr`RrT`V_8HbbVAGta1V!w#4s-L%w6T?KvoD&EKIx=!>1#yV zPeI!K`T7A={oFV<_{PTCo=iT+Wj(R2Ss&MztPod&`s2O z!Ag}rmTr?fmc&2$Wl`Ex5!ZAe93$~ws)d55o2{y<8*YTyF_3a9dk^yQaNnI#=})6)cH+8|X6342_ZY5YyO-Tm%WUR3 zToJ&RGiq>vwPf0j?%DhfT>EadxE3!3 zI{}Gb<0VW$&qX6k*9SAuXv?e^Lo@?T6tliu{TH8EUx_95MQ^$up1K%jJ?U#appK8L z{9yKXn2^3Ujsu-f{EK`${YCKc{ZJT$7wAPQ6&1IuTA? z!^=cCb)q=+-I|_P`18;EVo~@sn1nQyl(B%fEPAp(1;Gsm*egDRNl3TgG6_8uOK!K5 zkj8VWn~7LzY9SwG8X8&Ma08rq3SpZG%WV?)2?{kwidL<|2`pVFJ_MM|M8!Je8Le&{ z7ivya-BoRQ#QYWZ;kNi#Y8-!b1Mzp@lktVIp4AAJ@r5S+sbjC)yr#A=IpDrZ&!P=g zXIel9J3iC;VaMx^prdLB^C|o$M+mKthe<#2^}Ex)nC|pDCP?w|o7(ZTIc){%!~coj zj%?>Q(|T^ekJiA%$Dm~d*PU&5F?)|^-H_K)2>&kN- z)j-F8&Cu)rZGOA`H26<)A9|SKWIq=1zFuNpfp-4aXTvT!yHk1AaySDW=Qk{drxCd) zPrp;;jB?kvH2zws%am_^y-#G&S0c5TEA;eD|Dk%Len%k%soyLaiBCCiPZx@4sG4Ab zS@k{;xG%h>(jC?Nb}9%+pz<@H1O_W7^n$!*|0F+GQgriw61=`f(|WbWUk7u!`U1m5 z_7Fc|*)^&|Cu*Bm5^b(yi~9!lMDJRo`PJ^iEq1GfA#yC4fj>*6@vjyioMmWOa4Dyt z9=ESTi<)h;$tp)8Zjo{e)w3+VFf3R=x~glT7OU*9uGVTbYFDe_$G^dapjExT$zas? zb1ec~adwCqVHr7+sSfLYOxxSp2#|= zIarqt4A!N@pUJIEmD>~OGB@jJkS$jiLRv-U;8z^L4zr2$$A52qa|g}y>AQ%#l&p-H zZ0A4i-;D4V=e<@IkS}|J=ho!CRx-*^YwYd(OQ9C+zE;FIs66N^wdCL3k zFKwuLr^*LE=xv^2mG}7I^M>fXq3YfWBuk|6J_2UTc0aj z9$IeMg&0X$%ShT|ysbq-{+->}AwU)OruH#bql(NaFL%U)npc$hw6j5`qwwNc_?bQ4 z$Bn%>%}XU$({uTQ^s)D82@F(o$0H1*cRcn3_tdoICa}N3y!@ygxI(`*=E3cL8?wI$ z;Lr2n7h_-Tsg@RIN9j%_M0Wa4-Pt3w6%l4d&nxYR$OIxBJ&k@k&#uI*!;jp=DxaQp zTtNMGt0XxU@Z_vj<5<7EsEUJ}Z0jfFF5QkIQy8>NTvC|1SX5&Be@sBzI$PI%>ecPf z{})g#U0O)y>6e1;Sq#zYDS_9m3fMcWDEI+*}B7rp#Zmm8>b4GXWBgIK&Z3fjD9M97{b8&)ZqCMmKWdsg8Oc(S zzx5-9((+4Kf4vay>@%%VEnw^_?0tZiKFiu^YfPb+x{&ccqZrz9uuYNbL&RXO(+$B8 zz4;YTvuhpy6-5@|auLuca*83mhz~jW4O^azQ_$8(6OP&p!M;#27eV7?8H;HcbaSH$z=5jvC(NsuO1hR+L7QnlWyO}1H6Un3UN!xp2)UDIy~&_*ULUR z`gyHo?bIc3l9ZM`@gyoDhqLvh77#S$G-9sY%$^O0Uc~i7U*(o!3@Qr>Voq?(A`9n>=)v^nBKmW=g#pr)etX{dTeu(X5oHujmi=F%EGUlm$a`+%11Y&A~ znem>y9kj}&kLmDE;?Cj!Y(J_55q$i1hMz5`4gyUu-u$LVKF+(G<_>YkPyXgYLtrY` zYN04T6lcR5kT({~LU7Vb3TE(7<^VXIRzEaSt=^1%D;?hZ9pr`E0Yv|whse-)-9ipi zvyC9RukbclH!^+=QZOsYz=z4d<%4w)|E>E_`gK0ovpDw(M7tbzQ`PA~5Fj_&j8IYL z?EP)6ZvNg$MLrK8@CpfiMz7C-7Pc$+PkO{)uX09V=_yUP@RE(+C*IFjH} zUkIZ^KaN$t6iGCasbOwWi2GR4kfM%QEkz|?j%#{jnS-OD#8PrE{_ka?A8Qpkr=Zy` z!B_|1gjot-M$Ot^r)vXCv=S`9VZ2a6z#RMaZgv2pIpT&<74P}~W$j(yqb#m~?+s)j zAnOK6G;)z3Q9-yW{9&Yn8|u|i7Ib30#v!B&drTpbvZRIr z^Hn-e9c!7+T{{}z62P|;l#ciTv->&qXPBpL4H-^rnZIk|Uv}-^+rhbrt0C9TY#q77 zTqT@d8P%H|`4XZ-@t-WqhQJ*trtfjQ>Hw4ExC)-if`q2VPtY+q$!V8Eoq| z>xSBHs(XIiCNpdJ2gF+HCd|UokI&ZOu-9z0uzR{c_sH*t>?^MQu<98fQDA~aTbRC> zmW`JkU$l3^39YS%jI3^Ozdqx)YB~FetZptnO<)%6VOqueUsH6aR&qQ#UIwyT)es>k zn1*)S!8_aVI?But?I{=+G?*hq2ejjnf?Qsl`azv?{6sVW-NY%+@eOYYS6;#v=?ARW zx%_VPr9pHkAyIP~U6A=wF6he_mUy^4v2BLb>@PfTdNpdUhP9DYe`t??_%^odM&YGv z^wt^8?A)ME@A3Y7SCn7=9p@hwB2Th}&7JOI-I{$}^XJ$PxWE^i=wV{z!23BIy;q8` zJe}rUF_Z9KjhUR33D3-pv45b8sLV6sUo^Pw$g6ZkN~14)s3MFz<7AN)hZiPo1J+x8 zS6rJn5;jK70l{4sj(5XpKS4LN{nc2l&(Hd9Z6@B*;q!CR4``HVi%{+`fSVJvQF=r` zlhHkJ%305hk$pYd6O^s{l&0|>hxWtYIR1p&K=v|PJms+W8rl>Hy@7ta!MpHKXqk9w z-rBu_IYf4F%U)!LKdqkV;4=>ignI_kz(xa+X2mGnj2E_dk#J-0ii@;2Ao!wO!X(lS zFew5~sLk3>C#MURRccpK0l1@4VZEyN4PTEjvPW5@PGI`NS%f2hdsewfg~NCI7t~9t zYz=?{B0ra?uJ7l7`Mzysk&1f%z8(Hx8`k*xvLS0Z#NI9;2eBTO9m%E>!AF}3&fVHi z7QoPYs%*{RF$r~yc;~T;C;8YD^Lf{7VttWQ`%uFVQL8Bqe)9#Z6|7sMi9POJB%Y4W z{kMht%D!wGf# zsf(omx0YCD;+ZWdrXgpBN@Yi3zQkGD>l?nsS2!nQ&PCo8bFy&$RQ$)ue~v@`P+FU5 z>SifFSD2w%9I2#{mg6QKMk{G<{S8e^!j!IKSaQ$JD*3K~=@bZ`n;R=~D7Q)ivZ#xV zO2chtx>G$Qu7(f_bO4gghD>qSZ}_rDU^uFQoC)4mqBr6ATR00I-(6vF%Z3gd8IX9p zY(Z=eM3x9OlySCxq7d)!1`dZP)iktrFHt31hCj5yzYjKZ9>lRdVF2E$H@R(4@;Cr_ zY$zMDUdaO$M%GYY@X$(*{O}t z^US`MPyGbC-M2Lm@zVz4T1tmD2Z)8x{x&o$`I=N87}6;5UWe?ZH_BtItkA)8ct<~G zZ#Vish>v84{-y{E?6p?n2Jc(T%8Em3KkN@RMUzLc^muF8y^jY8t>Gz+Xf2=jkIMQb zK?&89adT`m5r1o6s>Hz-^)CzSMkz(%qghN;CSh-sv^I3ZiOkX%PR#NA>LHvx2z<}s z2+H;fgppZ)kr8u`+>9IW$nRP|gZQJxOJ+`$GX!YDh2wLO|3N6j` z;Yv;OfIgcgY=x@Xu&fGH3%1E%EC((7F^}RtM+jFzRkJB{c7*2w-&-rXBPJ@Xxh(wW zN)(c^5F3E6XO@MY(l;W7hSKxO!kpm}Mg=`qO5A+~92t{oHc)wDiyrOWTN|WKEhh(L zhHsGtZf;p5YoxeuRyaWX!PkgFTKMO=0Gzc**9vEaR?a?OocUN{Wu_FHMV#0Wag1In ztEUMG;iOz-dVHXPA#NW$wWfC2}=P5?}cO8uWwzu|56KNI!mC+Z*9QNN|OhM9PH&LYPsC+}1DW2($Q&yRi_vT={g)fdWgM6xqNF%8M1}|T zu2`1s_{5OYQ;~g4jM3D@i^cB%_L7NJ>dc4xEa|Yv_8KQ1AGY(MR~cIgok_>6r7AgJ z^j#HElZ8Nq@KLDHpW*)y0iN*ky&tg)!oN?+=#SD=$O!Tfk5y<%#SVzv+_lq^5fqe6 z^NeH&v#LSPWb8QU@ww^Q2k#ojEZ49W0pSQX6~an_=baI2yw6b@EOF*!GUML=9)4eX`bQ#Blfd`+!c_@&Mlk!!vvkm(v+ zfj8oO$*T6hXrI?M7H+;uuq z{3#e`J`lUXzz=+p$r&Ps%?{v_TkqRPLe>$e@Adw0p|9mDM|6v?sB-5EE=E3C5;scF zp`rbRsY}qH?@2|CZ}Ii|Fg64SeP7Gx^g<;ApOp-JN48@Z^0mL?8}dGa41IG(9{D<1 z5M>4kPLm|-`NGoL%v<}#TCen{_|{}tqmIDOec{`%%zd9hEQ#=8@OqAEw~iCpn4YWX znH<|_uQXoNBtc8K5>@GIYu4m+_qO22ASvsRC^Ns8cvN{8$r=)fOct3Hq#X&;>TwlE zko7l-YdE6o7mBG3+`|hPyr>dF2>9{0{QI0pR%2P@E}^T5kuim%X+m2LWjl*6WA1idINx*x};iBc9B&OOgO>* zhM;_3i*M*rzFvq(M}a-)30*@ZO#~&ssPTKsLT~z8{#BN?77Ln{bpBm7O0J79hO0m#iu#-R~`Ji=D297&erqK@VA>ZPOSql;YcEW;5_C zYd-m8f5#_WX_R-ExD988-bStpwij3DL?#snKdzrv_`#f2(?XT`2m)egiLNVUF5oWI zID1M!>WQ42hWA@!;3+uDS*dZee+Zfu{%`6vcOo@Z4a3_LN8!}=>=hwj+M|H+3)K=8 z`GgTrP&7XQRt}ne?uIL>gK*AtxT0lEo_dEhwXU-$xA;5Qfj^cIbs&{k|8VFj!ZoIw zZ%@N5csGBj_&y1x)EDgO{>Wqwdq}e?%%%yHIpK8sS|0Zw5o>b;g`?3WzNzd)9tX$u z)?NufXMMMoPW_Hb^=MviDm0&vs=+k8E9#{|EDF-9u#aUpPz2E}-nEk%6p6~Ig zBmNVIUoDi>ov$lX=p~&v({8n?Es;zr@Keu=#7wpg9);#Ox`O{uT4* zEP+XbrpWqNU!5zPA&m=;rcVTYcOQ5c#Ge+&&$4N5&WG|TISWe2IZ7INj00qfSyrl9 z7v#l*Z}fJurd#;)Z2puxXg@*==ko>4uiFZz|Bh75SIV=d$Vxfg^nMwu{WTD;{eGM+ z`KtkdK2pAaQodhEs^+)eBV+|CSrF9jfgM(cJlE`c*&!8KdRR9k0|2?L8r5eju(KOoD5EE&PidfI?|H6rqfkULBi2$)M~zg5^vVD4#qS%2{?YoHx_{e>I6 zi%tZc)1nhngLs_*ikG!R0&zvS_D_>5wQdPmky!aQl(Rg zKXg!}8iEgMm12Q8A~u&DiX+RynnZ47o+P82Z!X4PS#UaCMvz9TTcEVm+8*UIa*6RLpOK5kq);4`211Vt%rZ^@ zh7KtU_bwxFU2Bmrf7=mmv#+7u<`=s~&W4$v$!o-Y{NI?adfej}CqLIr7KRydpCl(iXa@0NHOuOo(C0Yv%?cdi%q9!u z;%fksM@eUXU(Pg*xL3)Mui^6j@g#I8ESA4NPDbiX6|+j@40Sh`PH}Kv`E0IT|0A9Y zu9f5G#l*+^Se6|+soV3e;yD)xhS`YUIBUFoIjq*znBMInQBH)e&9#8iPRmC*TwZi! z&HjgE;;IFOFmpp`=m2Tx0wP?Be!d!~1vdR%#m2iMzECMMvg^z0WH!{wZH_7K_}Z(` zwvn5$B=m!z@x#T`A#ou>E%JTq_tflNQ4r-)j^Jmv1tPvQp}C$j`zP7oK9M%Vu140k zR$vuDXR7XlZ2}JJYTXRe9a)c()%uZ?@;-wc(_T17Zksp*#>tuJy{+fUyc+&~Aaa)a zDZ}yb!bLC^c@O<-C=@?{mA_4-xt5rG)bg=EQjr$u^#Kxcme)7DO=Mi`n5`cJB=}KJ z%&xQZ`N}$fv1uS8E%x2@eC*>jG7v#j3`YoKO}<{=x3VuoYg$_W13NFS8_j(}ZBfK9 z&cQGh4`j@WweC|!|DkXAJ5tBK?ZI7e$n^cbv`wu)r;mY1Nl%c6{kd;hsHG&h>o$Mr zP3%!vQ;GD6=qJz+-aZfXY6gQJ_ni5b;`ei^^{yC)P0MLo`}!zA%OaChzTum|kgsTA zN!=)I5LK2TGcBy2vq8W2O2ZpI%>ZzY;v={=18bbG@W4zn8=?6?yyh=$6LtiKd_eqO z0r$Sxr-JQi)dSUKqVMWL8KGCGQ2wP6HV%vQP!N1ND8TZ`g46nf6bbETA7FU1G&uBq z8G}^VFlR8TiMGk%&{k`_aE~drYtP*O7%lZDusQ?#wm=tEl<5nl=b49y z#+?$ajvYRBsE`7PD-_9In|=e7azR7Butq%@Gk+I>G|q1kmz{bHm8*ry_;*{3ttGs) z#7#fLyzvjlaq4OLP^Lqvq>tq+9%F;+K!Ypk&kGBXwL59zYr&7 zz;o0Wai<^3?)y7M^hz2?xRbEwvPUaji|d|ES0yjvK1@nKy@Jv~F;19==^pl>2a(iLp%UU*}Lci{p)sB?tHTxRGm~c7O?F>csEko78qM`q^rjltg z2-?j)H2Wi(oqd#Tc3<o9-uRd?6i-T>?mk z0DOcckW8bv$3ad7LI=JAwLR%0t#e;3C2BYFITcNz4KgG1qgtM^p2>JfS#?9s^@T6C zgBEd*%*8i#gIV&r3>CI}mCr2am~}CpkHtbP;p`_L77`yD`?LJ?A!wGrV$G~wX0 z@q~UP*b!9ON_>Jzp$A!Tkg4JI1d<5Xt0_kPM>eH5Ds8a-ZNsl`dxg zKs;r?6JtDP!7B9j1qbWEzBh85jB$AW)_l$&Ue!LsMWDbc7`*HgAlYNoIG2$}Lg!(c zJQPoBI!k6+#J%koj7JttkP*mlZe_l6A($`^=0GuKHP$?4zTU$|eR6I*(q}z*)tQea z&Ha0+;qO`l^g)1X+AnTU-e)%QJ5a7;j1=8B*p92Wa8^atVDHMaM|oGyn=*XAcO}r~ z$qd~u06A*(eavm2w1d(p0QwzP&`*aqy|*FSrEM`ZGh24Q_;@<}@ohT1TfoBOz{O-I z3rzuZw!Vfqh5eR0bhuyQJeobA$#$G3pQmiA9JW;wXIn2zk*!iB;$Ds>CwQ?nnQrTV z@2D`&`J;2+iPL~d|C=-Ht^I+RI%kvL?EL*ZL+H%koUq6IsnQjW%EB`(TDY-lROI6C z3%6H~)VXQifENfo4Krzf?d}SD)=*g4<>Dps1uYWSZT-tAXF)NU=JzM44s^SyN6M;t zut828ne14MBk(GnyCNUHq^6V4aZ)5N%M2k0|32cunH9EzJ0GKhKD5;w^UFj}`@@r! zz+3ZEmtO6r?caH{%<12BE?#Z^92#@>FM|i5t)bF#=pG&4b9~!$I72#-bas01Wtxn0 zL`~5Zz2jcu^)a_;dIVLG4TDMYO@+yMidROrB7CQH0pC2kcl*YWzTp3@qMLA(JJtVN z!<2HBWHj^#@gWKx=+gfiAxErbF3vpWfaPC4Q0dy#3PJ}4KZ2P1gmf+m-f40=E^@^g z=Dd2y1j)tSD!aYJYNK(*gfChsrstZXhkC`kup#C#>Cy$nSukBJUAf%OcDxjybb3?% zf6<$n^k(^w^hUvO;*TlEdI(Jw$oXR7Pt!OiKSB(v$c5&sR!dvOVv?mOGf6@(h)Pva z08LGYvLf!1Zjee2eNz55;Cv_{?0)>K8H7b$AS*!)k*S8r{U4E{yGY&bUH)^4vB{~B zsx9HMhy1aUP?517;jw8vF~hUGcY|Z^!kZZknYOTMVRNglpep1m9ExN~YG1EYnx%hf zhzy+zk%{f>@5^AMjKc=}02qfVOd2XN(Pkm%nP1z_3J7`slq40XE^c1Lw8gB>lGRAK z!Utb+ALB7!=?Kc`w2ol{Ya^ru*k6*`qxMPf(kFkkMHS}fwng6B1x~?YSz@JCoZs1N zo0U(q9+9+s+pM89OF-qDn~cDRkxwf;V-&g=ln!g|pgih-Db3Y-1+#Bj=#2d2z8off zx%kVDzD$REBp2B8ZweGppr$CTXPiD|ie2%L&?nqpi1Sjx$o9LkjOU-pwN7}A;Er@JsxL2cXoBtoaH}{ z@==_>P7BVP;<7^#_S|w`rHe~v!?P-IpX(jDseSQ0o?^X18yBBl5!%H0A5JWu&$O8} zMMPjO@abO*BZxDBNIL5vQ-1|KCU(LPb1LAUlGRsb0vX**Seox!hSu_FY+4n}rabA? zrgP(+($c2kF{fuLIkb-Ig1g4`Gui%j`iFjz!S1VYSON?Kr>%6>ESAWYy~0)pc9Xi% zAhOr>lrWN6<3*#$h3rS)i+Xbsmm$j3IYkB>u}C)R8b0!b%f$t8BRg%mc5uAUZ1Q!N z&C&8obAYQSWCU~Q65E|RR{ZKfO~bynKHe*{`_cGNotfy19iM-8w4c>A`~5$#rqQl^ zj&QW-Gr~C9BXRo9NF!dmH&Vj~fpGfT_^!S;xKvX-r4)BI_0^4Lm8*|*slCd(n%x6x zrGa1jo{GI7Jq(G-b067e(@t&@K!Ew?FX*OXi%DMmd6xdq{-rZ^=%=2i z4u}iJhUz23p6Lf?xC%Yf_&eJP9Nh~fxYgB)b9$ae_t$9{muUd{C_{K^3(Ipq@L(2| z0n^$q=K(nOhfYvCUM>FT!+V`Ji<#avYXsosSpz`@gSr z)w21T)glhMVqhD@!4|IzLg|l@O&J-xnBltwg%n9!TfAgcaqZVtiK_;1?Ax8uUulL% zQHK;~$2LMKj13Er`qv_^n$1HIMBd^LJzcOIREvoQ`V!GZ`Q-BBDe%QH{OOs;pNDPH zrw)P?S*=W8r(}UyOi5}f?j7F~`fh}+q}W_%v(BsHt8Vhu4zkP&TUY-%q#~*SP(5+x z)2xFJ^Gev+YO=6xY8Db?6GJWbO|yf134h*L6W_l&Qi5`#Z`t*8$Jz1uVR-~H21GYM zFIVDz4-*2-H!J__OdRC|0-U)?5jkzi_6bFFk?I)Lj;D@MRcvB^+bX*Mm5dDZi%7II6m_UE+Q*eKXbk z)MDCTfdCU9)JXQ9`#I{Jrl4!~!Cyi|d^(J{Kp+TwD5-YPAA$`&IOOvtmh`xQ{!^sGaJ=p<8>yP{Y=Tcd6kWC$5x8g<& z>{f&BT=xOz0>=y;yx8A5>Xs+1e~i<95LOC@Q4TG)udvpcH7L(Q-?{Snuz37_ned6` z3+i&rT`6#sNMa9Z{oX~Mj`2?9gp-Tc(9MRcD?k6qKuGoJ@M=>T#rjR zn|;o1gUWy7a&<0MJOTY{9IhvaIdjRje}KG3+;x@l3AMz@_c{Gz!jgaU-Q?@m%{5N` zFq^;QIL-GZzMtsi=i2^8^WKywk1#8#q`Rj^)`Q=kD-mYZ~c_|)oybQ3DFMZ znMn^Ok9{lmASd(aK;)6UN|Njq*2ls~bgUM9kpXw8DVp`4O19i?0KnJpo$6X6!pZ6h z{4xKMZKEdl@_){7Ng|Jb<59A>|5&(o73KjM-+3>~nFN?<_5up%U8f=UY#vFuK~h+r zi3q&4eur1zUkP6>z(!gmcgtmBc#em z_M7d~af@1+KAypxS&w{GM$j{|vlQYWxBSTU$?mWJF#`-@KiW2HoZTgfjP=q-^SSga znZtX zO3M6P*2ewISoioBEPAV*;o`gkw{V(WYoy8jz)7@b0u_`Kyd4WO`>Pw$70)_8NCLMK zm11dL9aD&E15_)wH!IMH=grGTxn4bvy3J^TLv&Ux7s?D4T_&)i4zTJ3uzAVAdL@7f z92}AHzqN4wH9ZB+-(~?vWs5i1XA(GCPT=hSzN1fvCcs?x6JX|AFt?qwZHf0jAP|IXo527e52c5WpZG9{kSsM@tHz;R=XQ3*3~-_>D`z z^8W;^=@!|?k_u?j5p-&h z;BJ@$NXrSJ3zGm{n*h{sTN2rI0j9!&**-0$Ka;KmjF_iP!?zZ5T28=xCCtbnxby^= z%YOn)xdk&T6-@OlfH^_HOme_zIRR6rV9RtKx`D`w2ik3#fN0pyihWNN3QV zZ)|_GoB$e})Sod4FrUxwKwH=2Q)~e}bY%+S9wh4dt7_q@a{y^M0kq;^2jbpX8ShUh z2~byLP+-BFk_x7h2>;yEmx%UX7|8&Z(jV` zqD{*Q7;`{+6Ca<0E8_jxFs}n~+MiBYpH6^sEugeiKr{aX^X@McXK=+67N_L|(BVmdMkE0BN&=+)$yi0jqQ6>Bnq$G- zH#G%uVZ7^JRY72Z14hdUm}mBPkily&#L@QT&7IKJ2~4I1GddN_y4L}tx>$b)jFuBH zJ_QpWpUDY8{+|GpVF7KtJf%MmybU0A>|4IaqD{*Qpby(R`tuEYqn5vYep5$(obl<@ zBbNnqSt=lZ0+7!Eq~!$A>?A@HmXp4`}8n$^Y;x^6gKwMY-F&5DA zseoSm1VG4G$fphkOz9+fj{%XdS75PFxEhov1fDfV7+d`qQ@^ zXnUR=SxW|cXWRZjx~HdBb?zH+)Ep~S#f8p_q-w5DRP$U%HEoxsbbSk|)T^0bMfIy4 zgrrVXgQ=tNbOPE`r{)zV9<{913(yfhWo_=b4zX9*mbu9lPDKlBZ~kISzwaQU9? z{vpZ0L~7|ooU1IHyi_>9Q_Zs$%*($@LEX+C z@j39w7q&ZEPQYybx&zE-?8(~R{L%r#V9rdfYRL&yH3TT;VyP-Vm|?A^ua!4HQxo2wG^_2DHcy*t+vXN`@qq$aiP?dX{&QO$3i zYLqCt7^lZA0_UfKxF0v>SH(7ErV0q?LCXmO?@R*oD7%{$fn$CG%!3w8b}AVE7{DA& z7VCc!1n1)u1 zIV~q(zS`ZCzs3=F@s9!$ z*I}-aPK7cHU)xtl&;av|k0oYKQ)@8-EN{;*cD1pPdTeYHqQxcpBvZn63b(CIOh60C0{201t$h@*Wp4 ztEo2cS$_EsFIaWa#~aT36eptFy4^yczq?=4V$SI>HmhfT&>C^`AvV7$H0Py?l>Yv! zi00%8LjU}kMd%c5&bwcB^tYAEQf+^mDu2Ac6#$r=3gAA`a+N`hcK}oJk8E z`~-k<035g|r3cgSn6moDN1xgrlq-P4k^mf+0FeC?0DJ)WLn?r)xaUR{PRIV{K=CP* zzy7-ePYv9xYVox428$;hwtJmnd+^7HO^c-rTO)>b8Mb2DAjxwzzzm8j;Fj%xmtT21Vf_~clWT+OPaYTjbc)sFeW>l4_iNUi3#vJ8tM z!P>u*tGYg^s@oG)t?H=ClUmgidGU^Y^Knwgj!mj&T%wwrI;v^BAO$~z<1I2zCs*^~ z=io=edETw&N?f(Y?s>dZp_3~1)j&A=x|e5gEdPA2q0K%LI04L|T();1so0^<>We>F zINQgM*hkkE``d|bQ3DF(8Zz%4{}5zkcJubF%@Y;Rn@Najp4Z#wTzTGVpY!F}@Lcm! z-fn(e)z1b(cjrZU^zqv!zu}j(7NYdz)$`8gU}VtbKBHW#1?PBljkt28t7LUgfhqfI zE4xb*|N4^Hn%Um^5ORL(e9&XhCzxf#aY{^tw^}fr4Cq|#+hStR@O zot_{ueMn>nHLPVqZGDMW3?Y z<)^0Tbs^Ad4Cc|+0n&^gqJ<1<;>uJi_9p|{pA z)Y~9_xP5AZMJE}l79VlhMpWlRb~!my#4c1UVEQs(nx!F}sy60m{3a>J15KR2ks#3X zbZ6AZgB&i~SWp%^Lw87r@sQ6ErH#3qKPq5PYa<19Z~2mq!@$@^5IBD$*F2SO%vIdA zd|avHyQYp?jGepevUlyObnWiN(FJj`TYV*6Fq4^1>>(`uv*TV78T$m&u5KWXrzL6l z2XTbK8`&W)L{R6dx16E%?Q4~@egD_=lqjhE9IZh zpO~_1C8N!r>-@Z28SP=_cMAnKvY$z(z(stA*m^7cg#YmDB8Rpz!X6hr#QWeFnRTA8 zKdQ8GW%1$aYp=QO!}OB*V$OJmeYv0 z;(h}RP%3=Gsjp*iPUiQT+T@4jCQ*vHo$zVo2T|w0y~GZr>=_cJ$Wj4)GA)3t>#JQlZa}GO(8Z(3!I|{7jCXS5eJS5xa}>v7DieUDdzZuOnfd@tEI*vX~CjHw4c~a ziN7MPE#^k1#B*6Vusg^2wOt%F4eSJGj+*BZv3G@9;jkW32~kB|91l)|Dz!8|9b;v zezo3?eC>zfYQ|i0oH(C3GF8OAY)uLG4?ikw?;Ym1t2$TMgcZ1anNT{e@ZyZ> zp6NcF^G!!1z^uPtrjRWJGmO1WZtr)fU(MLZQ@<{KR)3m|`$W`a--dfzC4rkdv1j=6 z6?xBE=ZTwf*ms!odQdJ~_gnS%{=A*~yEM<}FQTFRhMjyl?sEd)a&Y>H;I6?n2O!8V zVD!YF%x88q%Ok_3b%<1fa7hZc5H@_-pZo8=ceyma)`l1!eMNaU;oNO;n;j{$ zyy5fS3wE`&OD$Q4>zTy^F}Z3Rp543VrYn)OE&)Wa=pQm&y$k=MlTdusL(Pay+_k#O z3%+pt9wtcJ34E5d((NKX6yra!$y?EJ^X4{>z{ch2B8E~tj_W+m}dhi_SQg( z=|18@CHhjhwu%}BP#8~aiLG_I)Dyg!(EAI==;JAwGkdoVmq%OfZi_)Fy8_}M{^AkA zPX`nDtDkq_+rs4V%9`$7Ia4NgEe%D(+t=u-@hy-qkj^((VpKWeqCBs z&(%JP#@wrAu7JDVvItaK`!bOTHM*I1#jQsKKLb`f!*eI)yelrvnilSRX-~^!CQ-6 zRfXcmp%jQqlN>G_%)t@6h@5#4Hz=00>B3toMRHU8O02+2!RyLNqZWMJejE17Y2pqw zas$EZ$9q@YOtj0UPUzyQqx`gk-jzE<-YfjjyZ9Jsz;Gg|ZIht64WALj;o#}MAqRbF z+d10P9-9HBe5zxGHYNCu#OFzEUcK|K>7n#I^z(5ycjB37DKfoq5Q{$q!s**txqR6Z zBd2;L#|dq>X3b}t~Z(tIKYokeVjoo&K~@mc=tTjeP(rxkUbS`l^L?QD(Te#@6K zr5}_`um#p?oh!6;uGHo|>wKow(Rj!Ha?}56znrYyW+uZd&CfX6BK6fs6|BZn1j2OH zzXy=D`e;cXT6qhtE<@2*t)+0ad#R_9zEwWmlj}I77RFGg{z0A#ffU0?!FG z6JJ9bDi%)85EGM_jz9Vv>3Akd+U0UT-@NcR-&D#s+)#HP4ss=AEB+=+Vw_TZ;hJJ) zZ=2v}RToJx#=^RKiq7f9gOJsn79N`A;ZSG(*U1!`^AgEBHJJw?j5$xn^LNYJUr27_ zVfOv8J88r4xwK&n?`B7?Hfr;4Nhp?tjW!{B0Sk^g6s{)0jQJbg97cYkn+-QfH$5b2 zH?J8_H{X7YZiZ|HvzMG;swT>rnpW~l7 z!O>sPBvD_(6Hy&+&LyNR?3ozx;qe*%a0Tao`r_m)x0M9->WGLtc7D7C-wl>UvT_c3 zQdj<;1LFFTo=*;!WS9xWT5zNhyTmgN+i00=H%ar$ash!Gm*%ODA9zTFVEx|UE9a| z+)VDVoSRe9R4hZml~cth_?wFJ+~$zTJXO)!#34Xe&1=3r`I^Z1w6HuEl9Geh2SRMQ)&(`5Qe) zg;v-=BWtnsX!~~D1e#U5XWe7=y1TuBr7kEAtprF{Fx)Nh~Z^sV%NoP5pz%E6v~P*P`Vmw+OFk9E-VUG)=adzmkXOCk&#m-bhlNkf zgrBT`4W8>x{mnD~-_`G36z&+GjHoeNyQcB%B`(sBQcchM0LgF`>VIIQ^9dJZdc@<+x`?3a*3PJ4rBq^pFg7|GZ9WgrK# zKTQJp17m%oP*~9g6I{ku>DHuh_M=q9Y07ktjj?Dz*dQ%RJMB8gJXj@dCg}{k#@N)|;D_C^N z+t{P4v!&s|hHix$t9zTxmpKOo!d={Gby(s~pN{*igsDW=nMpT-tM|HK4N60ne=^78 z_{*g!`NW>aU%d&lm_KyS<75Uxds$(#XGjugI_H-Y;8cSy;&pX@vR{>D+FGl^N5Yw; z%qe6}!+-P!jDca#CA@JEFIaT@TlCC3K@N^%qyf9L?$+XmP{5qTxhu;%Xay6b82<~w zDlZzcG(q->2iyKvJKCR7Q#5##IIVlYC$Qw4;acYwGf>|PyEv)bNR3pX(@rFzsf=q{ zbz#`^^oL>|oqrrwyp7xkAm=|fa4p>MH++SIR^NdM`2Ggb9kGJxw~0E~&8_Yi&Ys;k z!!>*pYQ&$M%9SxXa3bz=soflp#gFQr;u#-(?>+HmNRMUR$Q1F#JaU7>)m3Z}I%w^S zsU*?w_=D)*QLBGH(V>5ffHtkvyXB?4ojh^Ic{FE~Qtp@uguZj+w^82OHz^dhw?wz6 zd4KaVufE`mx@qlQ_zZ8Qk+a&PrJ>t}U?pHsKj&U=xj)UOuC2QukEl!>@_cOuTm?0;{T29@%k(%=HmyWSk{YFIbH3K< ztrZl8vyW+!dkDkz7=i%B; z>35JWqDdtIo4R@LcHgmX2D>>uS&{7(Z1u6;=9(+fQlyL>e^hqjMOf9#d^uh6mgd#a z3g-|o2Y?SD@_%T79KGkXy;8IsC0n4U?e%)UNIB=z_1wKoqpo<7wkhm>;3?T}^!$k* zYzE^5buDWx>ddH zo9Ug)o0%w)cJz>*-fED8`weMApB28Tp5g21U&Q@4tXd)lL$_6Vb0zHG7;|5xbX4w^ zinzagr_#mALMZ$-&JNh_!#)Z2W}YL^nK*pn$*)9i?x`8E9>M*P zW<#B4$eZj-G`XkM&v3!n+MC?hIZx&|q+dgaT$X)CJim~t(vnt9yJ*(t)k#|_nmc2D zs{0%2Qh8(qTO?>f>wRgSVmi%FiTp&|57Q4pwc1+1JHr$=z&L}jUPr_!A%pPD^iH%R z1bo_JJ2%aZI{9pyx>}=;O{~#}`5m4q^Kc5Rga}yiC-hC5R^^e}|9Uo(#mcK#Q+avp zRcEgB0F=&^=uc0UQb=%&*jh?4cTFYIwH(I3(x^i5)g>qtdMfYT-B!AoYd-%{vub`x zmTW?q)lSxQ^VPpKYs@ud$%-%X)iQFGPBxQi%0B1MbX)nD2PvkW>+fgMezWHVzE1F5 znvt~Jy#Qu~dgwhXAk>!AdUS_=eyt{wlV zeO39oz=5Bch%crbTKUTWjwTAUj5>};%WgQE-JzT1usq5z^3PZgVaMeH9eAOMxsH~KBi>FQ=9sTMc@o|5oevuE1l@F%EXcW+bFQN>d z5Y#06Ka$t2bB^uqik-)ew_?hXf0Q{z`I8WjWlplH@ig7Xj1c+m$}Z-_@wIkNNdJd- zM+{iHPNcqtM~T>-U5776+&*2HeT}>=OeY7KGqQ$ZA->3faP~yfiI0=WbVmoGKg02a}+-;S0~H(Z>KeO8DrE>mZvE){;*3s{++9;na_7s zS2enW4UD-m>ZTf%CACY(OP9R0_aYAY!fG-!1#AmK;;pr$$vD3DqNPeZzm7wsS@2qv zkAJmpiTflfMGpI}^=UL3UEiG z(&V%{5v?64W;H~wG-bYegSkA_Gmx6RD~ise0OjsOR1>b0Zz}m*f@-TRr&s{^WU(wa z7YVN8Z8WW5q_x%wyQuTK+TkDQz~kW%uq5-yhR(Dw)m`CsSQaC7)IxPL~xSZC^?n-q~US##ByTNWp z8*L0lcx?8U4^bIYB9~wgpX)Q+nK?)xK;hN6vVsH(Y6&r~7H+*|f^?7;rmvNU95d-w zSy|xdM1>6B=@Shf72y^3xiXpG)%Y2Q$|VuMC0EsBQt_}xjk-Y>;2#5|{TKdd`)7B- zpCMW+|IHilN+euN9XrTc?CK>m*(Z` z@BMkh_^m6=J3aa$1)$%thrPnQI6R!)o8KgDqlan;YwNP_gL5I_4%oy`$oRLd>R>ncS?L__GI4zoCp`pjKIGS(SQ~uoSgGMI za3^2L$$DYs$I1)Cp`UA_eE#O8ojw;}((uOHCB6r+oi?&qIQIx{;|8gkqK8>Z<4Ts> zxGU1>eJSP9ucoME$=BwbORRoX&r64X^^?KOemeZuQj1)>cF;1`j)r=MBHFQdY@Tli zqDfZ`Tjtv}l<_XS9uW`{s7Jbo1h(bg2u|Fe7t7Lz)Vumuolp`rpT5?KKA7(4I(9?Q zm^0ii-_`Z5jrWc7O?|_cwq7=39|xkSmO`I$nHE;QwCve02!9!LmHb3J1(^UQJi2o6 zv!xa6p)Mvyru!s$346}ca$|F~T%SfVvbC+ALvy5X7TdV)p;9Kz{f$??sCA~rHnB-g zb$|Fgd2;hxntO~bHdbqNAIgv28NP0=a4Yv9Ngv_v!H;9GtS9y7`no}klIt-_V#Dm~ zUPjo%sO(UA9s=RC-M-wP{L=Fu)V4USrs%)VK_pc@+TEAQSF#Fa(IhkH68M|jPZ}w` zCFS*B6Pr)+zu^mW#1t`kyYtUQQXO$G_#>X5&RUCB^F9vyQH+i5Q7@v&Hf(Ce()P+z zPd@o1ot0~6P9Z>S!>%KTZ)$JY^bu3x5Q^dQ*W0@MHSysFn0u&U!AA^I4eo)12p$US zc#c%cIk)0&4QHQ33kdC&eJsDkGI)gIOri(*kx(_qN(i|0g&Sgh<2ygSK5NPbx>a$;8iU$S*FUDvr{IwNSrz2r!SgHKO;5DUcpc@L5({Bs@;!J;;q z%-%aDz!Wi@yFX}9!@2t?LRLbAwR)5qq|VPfPXC^4Y7yI@*l(9B9nV-TGaLWZ6<1&P zjQi!EFY@jC2)gLEN<4n#9*Er-jC#*vYqKqDejwG8l?(j#?V@mZ?rOZtx9L+GFf_*- z?1786FET5iKt_Fz^F?mNa=Smf`nkr^KF6N)inuDp9#j8VioR*vZQIH8IbpZh_f}B` zZ}a@R3(}86-XAD)`0l(MK~=@9$Sh`2-1yW_5Qp} ze$8Fm=yAk7WdvF$DvQgc?GhB!YK3OZhnhS2cyeW7G#`ItbF~iet#!ODi4phDI^#Ux5zb1{@JQkr+1ItL3I4DB=LaqvSQHMHOq^H8`8EG*sHMYs0tq>Y=MoUOE5 zB{%0RXt1sOEd&5`Y);{qUeiWmhOMU{_V0Rct+;l|&=k->bG+0Oaew?up<@dAgVbK7 zwc8@%sUaVW{N8#p-Q9Z4Ug?BKO@F=lOOLhUkkqF6X}|KA^yDO`g?bt{m}i! z-{eD&k)IKsZSo_l10PJGAVYCk${?6`m@XY7P;B*+C0&d{551>mvr_P#Oa`|d-W*uwA|GZanE?!K|b=a)cT!X;bF_S z`oMU?|4Ya3XEFve?WZH+pPG+80P|*zewqNSAY0us;9>V*%x(mI%aW$$2R&zi~X)R!xf0K7;{-R^!6T7#QFKrO{(dJJ}<>ln}k?)r{`O+iu*Vuf|MdXXk%v|H- zOBczXXW_3uhJ0mA6UjHn(CeC_x3QCp-^NNVo}mq;r&307aIBO`qYV6qJhO#CG<$dw zf!w>IPWD41=e0-cqVgnn^c||w#5vC)L2S4PHCtqh8kTo+@O&9+&;6?1Iqr?&?1lPz zcV07}Gv|tK;XPP1V?`zH$wC>tl6N~k^Vo`&0~<~5GScLq)}5yXijn_;CUT$DB*V6o z5;4pOwaRgYes*e3nz>Vt#yRh}e@p31$>TPQh4r_+NsAACjrKor4QWMGQ#^YRX_J2D ztl53f@3gc_A0NEYvCg&}ZnsQ+8&6nELR`2H|6Qmlax2>yb{mYFRJ+>7Sn!&tT^tR` z0ldS%#Qh?)UQ^6T@)B{U{Spt3u@n*d5Ld&9`!BCBb^}N<8*RdE;&3}i5>}HCMP zAS$$M|2f`E=nrNYy%OmMjaqU=krtyvZtJc+3IC=48~rraBd(t|&=+4gRX_D~OX#N$ z|Kk5dKUMwsILCHC==e!AG1-4=e1t^HvYcyQ{-g&5Cz94_Ek zc)^9|swaf{{B>X4g1KG#^>TjsXhlD+!@!Qo-#T=#$ysv;}nJ|-FN1;v}o*`ZYv-l~2I^#GFd+KOr-2PeUnZetw zlf-lwZP(p6wIV7NMclVMj3*5ryG%cwT~JTh2FU(Nof1suVxJg9QyDrzUM`3Y5G|_R z-2<4K680Q?nyoTVD}TLrh#9GihGatj&#yb3`#0u*_K2sBV(Pkc)ZZCVaoxdwgzHZJ z3}%l!i;a^5in_k8xmllwV+E|~Ii z2I{F)PY4~KAM?){Cymw7GSPDxs{c#>9I?N*S>w%|^!OmYRwMi`O3eX`Nfk+E-$O|Y z7WG&TUOF`Qoa?RT9?AX>tB5b8#$a&j`gbw<0k%ECPX+PqiA#FBEPL`VSXBS4Q_J6F zsWY3elUnrV&T`60n*5968ph5^3iU`DT*fvL={qmayer3GiO-i0vxO0ruNGdWhm=Ns zXMVpgopDd`FNn&{OSwNp5Mf4?qZ||S%fg{%J_;5EpQ2&j+9Qyl3FYe#t}c*Gb8qdp z@K!nVGbL&kKHrsrfo@L0>aAWv_N(rM%@5z1FZRQR&x`Qy801^SfXlzZKCapHz8EWF zJzD$+DZ)K>cdHAI7D2yl|FiCb9@YLvxaeb^ao?_0ix9M5!1nV{dc1>1iFt3j&^tzN z+5Vu7bA1tK55)W-(0Sc4jnDm{`C>K?-zz<}W;6+jN{t2TFomh~0zEf=;>%2|&?fWA zWgrBuKNN>kEWYxwTe_fgS%y2`+)i{O!R-`WHKf{Z)rabVncV?z!|mceRkBJ*fncdQ z(#;9%_7X?3H0;^`FdHjz{xPAj!Fzjza*%`$1COPJ?)NBDeKOMbe^|$5L*~eE_G;1! zvsdyrz8%vMt~j_m4ZoOrc4KO{sAXTa6Yw`oOxhm}#y@g{Qk*&WllJ!3d-%Q-H0R0b zA>1ytS%P<;dFBn3gDzX|EShmBVo2nVh@P5e!&IOcJnyLX|7%Is>q1`7f4 zbqYDg^Z`#!qAGx;B&vS+dLly_SzuN7W64!2uUWmCe0akhQjE3_GfQ4#L7*st!6kXQ z-19aYI>}O@^QyHh(8v2NL(Gjz{o=pHgnFNbC2UtZ+;3FHNf?s?K#DLg0g zZXRb#S+<;;-2HiVu=Vj`TmKZ8!iV~NL7op4`6xT8=DFn-1;x&;5alSq`JCokCdqIW z`@#cJt#LOA7WLOw;EB8y{pt%6jo=qi#Aly(?;_0}a{y2s7GV67vBYuhxX|@H@7l1!fgfn+h-W zlcg%&VzPvLA0hQ~Rg$?dQFEe8aR>F1Ou>%Nv@~iZSD!>Bs^g7!>i3xnVIBQCs;E&E z;CFM5_?O$y@3x-qD#EnZ0Q)HNrA7&GRpbeB2Trfx2A z*o^SkHE&b=9x`I}=46}yXmb8C$)8O|>!mEZVfSzE7dpX*H0)mLJizMaq^4JCYyhM>i5f>{9Lo#=1)9} z?>p9?c}{+gxs!Zz0>fy|vfbM`hbs^xgPvfc#bmYWGH?;WYu3O_i0||71mC`QLY!}Z znc&-TJ^_Eo@h@;U7^|eX^TF zIQsXt&vQxlE#}aI=d=Qm@3T+WpFzXqC*pX~Ac1dFD^gD?fK{;@x+0UJC=l3^o5 z4?2`bTW6`Q)4rVUyoh4{=dp6b9sc!a!AeC7%l2zigzXn&N$ysCrnY+?<;;ah@vx)V z&j89l&A6Nl=GVb9h248fgO)u(YuUy6j2=P!ir5WW_`N##S|+vB6m8?tRn@=I-7F7~ zT?4=I)8*IWzoZ{LLZZ^oGU``1U$&!#u90W^z*tHO%#eEovZSQ!gBLc>f8Q)bq*D69 zZr5JD+f@Hu?1^T^b4a1})!*u7&B6y0n>A~k&6<`jHft`WSkh*VIi8DDRI-C>Nt+_> zIsG7-;X*VsVhN%d8jcgqHUz3Tx%}YQ($Cv`?9fjx6&`U66nQy@MMcH_(Tu>mC5q-_ z7y)68nV`I{^K93}7AZAjf|Sy1lZ}*2+jy zS>pWv@++mq*Q;^5aQ!WS7`T@6y$AJt?`lqOHo1?#mm!m{!tQI)9a;UJCj~^HhqIa> zD->hj!v7*_hAz?bf2{-HNuIM2L|#R^{cgeM9pOdkGE#Inq4GCj(L?H@1c@ zEL4c~Rh*;6ie%0tw5N{kKRM@P+D_N8-Ntjw%jIa;CuCqY#pS0#sFoa-sZ_Wha${eH zS;#gyv!3yNCs=fmX5gjdxkswITJuDF2=Xfb6x2+~D`N`kTNQ6Qzvfxs*YO_Xo044l z0N^6HaZKjC++fkZhor+F%TckGoBU0r&Rnq)p6|9*xTk$==j(V;_Pl6*MZ;V z-?Xc2_@+SNy4k-bo<{zeb$xpHB|=AGTQ%o7*Uc^|X)MieKZ&&OV^2x3V`MvSd|g(L z0Q=<#&D`_B7F=M+S|4|cav?J~r1e~V!!C#^(UxyBh5R~6C)v)(o9rjjK3`$e{IjLK ziT-ufL|@^-*_TTbOY8b@MEtDwKw*9L3Z*|rSM!}oVI z(l_J)jXYdVUTY)eAYW)3>l6MU(%RqZucVne9^*HB>-?YTPys5%+L86IFuKBVN5yY7 zD=C=dt{NdKdS;zofYK~itgNM&+$qL{mW+w+#_>JGBq-POMcls{3Qs)Sc^|N0=3<@xSVJ};qqKz;D<_liG6yrPI<2E zqrWd%`I^NM+;~>LOxfm|xjkLgXUbSjk(NzYN3t@pyp^=8;g;JzZn+r;WhjE6SlKt} zGv`dpI6W8Oi(+CJT*+9K|7u+gr<$tlVn5{w$8q!}y{IYiD9m;ei)C>I9`H{DVxLjhduzOo%L%pXqQP>T+q zk49v!MBqF%ZBE}P(hQVEw%DF>V-KF%tt$Q0?$wzxDTZY?1g(>CQzvzTe*zWJCefLnwbtdLk zN^IsexjO#4I*tD+DdT@gg7qsEFkG~?GDB{E3%%nTzKwO~*0DtOn0ca1q%^UE%2lmC z=U{Kdmmak@PA=-pdPl_n_J+m2;&ps>3x}w;3jZnmU{0p5sn{-~xWjN1b*q1Pn~X>Z zNP40SlNw>h>ZxqxhD8}2=W&xrQy(pTCY}H8nn!tS7b&$eR~WSd&LbNMY{RG`HAv#b zQs2}KzNq!HjH& z=$D^T;=Q|m0;b*focq^Y(vgjwIldRu6)aEqP_qaz!oBfqLS*lo^$nuBT8UeTk% z_oE&#kuqYZkx|t%c1F#??lx;mB5P>P!Sw3mRUVp5BCDo;BHv$&Pn*1CI*UMjDHu%% z=QX2gxHvuqvbmI2~4sbj?laU$th5+DAVLnKK{;vK1z7b>y%|C__V#iTWADY7*F ztk;TwiI`nq)yo&oz*3SC;~cMsa=}Qn79-KEm-@m3Ii3I9R@&ej@m#a~ENEpgbT+s_CW>0u{{v}^D>`dpY8x^ek$yoy3vrK+M z?xm6#a!2?f{{KB9AonxgL`_$c?$k8NmK@DblY7sSgaXxD(!0A4u|Iw6k4Juj_1!}5 zcPQE9PU9U`F9_XELd4fCef2^tQ*Ce%1W+XjRQ#aYv6_T2s zp-F3b#@M!QjMdM^{a{EFb6+0p_&!P#92p`%?8&DFd=dF8_b1OeSq^i^($8>smdX!I z1~nE=BTuYCtW0LKnEzD;AWg4Lw(|Gn=X8G{%N&LSD5TrJC#0KBEv5l_Wl_sCZO|+^ zbFU>w&oezHlzclMl6rV8sj^pK_UwR&O>8v!1vUdm$gu5C>yR&4EyaJoZ{q&;>_1@l zxb^UwRl+40MPFomnlB`hq-?v|zbwK9_;=RbONkJX38jDGx(9iaXS~sC-yYkbcS*>Erx{?$3LcpSq=a&r2#}UBEBUYviYJ(B)Wa<`fhTx|HA9`OKmrS3#c$ zXT3P-C0+zE_sGrM3@GeN#hJ>H-?iQdENy?`pkw9x(S?JKmhVUK{YTUO=l6PF<8;24 z^It@UB2HcQ!@sF?wT|UIcreXdyMs1`v$tT3E^L{Z1APv9o9Eew4sS6d!THGg)~#A% z6{&*%!WQrCy`YZ5K~GA`qW_YM#pG#ThPMd+D*V8^=mSm6j*{pLXKjy$?-K-R`-2D5 ztK302J}1sz6g;`)M8%*%QZ#g*Brqx7r*%RobH$0TM$=SCAe^MUID64DPbiU|XPQp# zw4({tj6>VT1`}}~mOX(_DD_b1Qs5YFd#s3g* zW~3*-T8H2i+GD=L7s70OhG}P`Nr|3`m>PC3StQ-TG9PvaoyT(X5Cv(^DRod;4z-v} zAz&zdiaGNI&6>81tO1hst$pFf9(p^Jx?Au3fx7`imZu}EQ+E#xjz|9u0P^kr+tAPJg z+ZnLvzds2$KM8PU9B|tM%$QAxr!u4&qD|JplFjmy9L^w)PNG?7LI15geqi$MVtp9c z!C%3$#A^~xu%ez$*rQUv$nh06R`)FpFA{ny!KG3}uvmBH3yy|UAOngTjXG2cE6vvV zB~sb|G2RG4Xi#eN1RlRaOSwuP10EzQ)Q;+ERc>ltR3=`16CFANQHF4X2u3QuUc%Pp zJE%@}cBG}rZ*JoPDbKTtPn4H#A7Og_MR;l;FMC>)NrS%~>s!*!;&dWIvoOfcqBNsX znZGO;lEMuJ;$_>HdbNF%-;-78y%SiKn!8wPWqq1`aeQs{h4%UyK0VA=`1KqcA43ex zX}gh|`vf9r7`St_$BlrF!k+&;A{;_= z+a~uHJj*{Vxq}z8lekyl;l=5hHY_YKm#?ZE zmWSZ};#&q{2JlW;;)=@r`dm=ulR0#_#HQR^Tp)K}>@2mm5 z7c-_}ioUAIN|?Duc0>ymThfr17X1Fify(kCFkFJu+%6h{m2xE*niL ztMh+cO8MA=K)7Uj8Ot;cm9wZVBRBi^ag*{h_1+cdT~20BAgx|vm$FpduQ1I`LMHU8 zfZN#oL=?&pyri zy5$Ld&o>cy%XpGMLm$^~Hi)G!Q@**YIo(xScSYVw=5bryUJ+E)CHn}f6GWdRD(FP1 z;&m#t{9%VJ{JV5?%@I=1MCu91UQw>~-@y}Obuk(6KJz6n7>-xVJTN5_<;B@ls?waT zCcrO@K=_s{T;PYKxbQeJ@<;;<%T0p@G%F*urBkLdBit^Ui40xAS=Og;z0F zF5kWny%*^@x_5~GwSI5FdqRU26ien!cc>+Ey;w5$;790bM!ogRsYNKl8~C0jGtCw7 z1{#IFi70;Sd8|dZuzDS6E{FSF0{%yF*#GJ2)vU6HjboP!>c4lve ztHoxE8sfFMr$4Bt$Zn$SaHseVaK-a4qI%2vM6{FDAsZ2(){81aEB+4$?I`P~I}&RZ zu#ixsyumU}M`B*=S$#LN$7e`vd6T~AUwyur{|CnwZQ+Q;cUB-?1lUb4T4%2M!)13` z({BPZl=ctQ{BelEkG`9vl20HZGQVILaw}}*Ey*c8jZMoh`E4pcdUp4)6q-9n_`}j; zFciNlGaOl|X&R1F$>fh8yyI{H1cE^Kq1^u-^hm?;z<(GH+m)}{v+D;(Hhw38Py^2L ztu5bP)2ONZisp1ruojz+;ZiOT`rRz+)ddcRS;?itykES_E(HJIcA>I(=o9L}hVNuu z*s%x5r;Ns9b~Jw8l{uXjcba{7-H~zsZ#%+#{ZC_IzIUyJ zYRlck-OWLkE_q6p?IP@Xn!Q{5RAV~6D`Kq9Kj7m(2b3Vjuvuy@BmnP!j_=yIbNrV` z9dpUWe>ugs4(=}gOHlp?8F+SlKz_tONWz~lKi9NiFnoNr>SycRPI0sFWYZA>w*wf$9KI^8Oxo(OGVk{Kudqvp+Ae+r*c}E zU^9E$(@*Eo>g-P5TYPU*2`*F3P?UB7jcgAS>{g|aKZgH>ahE9NZ>HgLLfprMTew!T znD{$tETfaXzW3VI3+r*zKH0SoSQp@40j&FUEs2#s$aC3Sf!SR^L41ozm`)egqnzl3 zZ!EW7skzhmyR#EDduazn$_F%SZ1y(e*CPmuyq6_qxFjo@uWJ2Klt}DW^ zF$b6O$#x49H@&yVVG!rRxYcv)Nqf5PfsyP)(APjiEh`!h)CiWi4L$ZuEH2FvTxX=( zJRuI{sMVHe-qs2RXN`h!7f!_9CA$u;B)RWGr)J;xRRjqh_gdxFKSw8U+Y!ha#pbJ@ zSYhsl?<29vZ`xe(ODpgZfAgV7ZPGV#+LPCundp*=O*1L*|DPew4gIOjbk{XxvbNSa z%kav^xuuex6`T89^)Fvevb|HbpQ|It_@3zMe=t-3CX{PBSZofk^*^Ng7q|+lr8!&s z&cW2*L(DUOy`qlLp^Ub&*c^$KOdL*;?sUDNq>3{toalefc+s2T6w4odqvBQuTD0XQu`mb` zKS!Q$EcX}uB&wFrmFlT~2vqm+3Bjp;P<3=mb<`@QsXeNr2wTzo>9!PWj9#v>FZpN1 z>#U%49D%dG$0z)w&*4Y%mo>EFz8AGxvFkrc=JNf0w>ga1TJA?=PeAk>nNQjxIamHM zk3X+4neh+*Q%=UAO}Ul*qiN~>4W}aPdX}U}HL(#W$9Q7(vag@j13zA*{I`_Os!z}8 zHfD7Go42|doe>YM72Ub6%^0V3T^W026;#23rc;CB`jCr{(w2FZ@!1nD0)rM zT6)NU|HJE4FGHsFG}^2bu4;?6L!`NFO0)Kf!awlHXwdfIK0hNzEdLEebII50z2#>e zn#nWxYG<-V`80}}r$<^+)XsxeLGj1oxt6`;CGW@}y$5|NK205b91}k4%s&yLRPoHOYaW~L`un$a38Qhe2zcrH+`-js6zyMCYfY}sL2V;V>&{{TUdX* zvL%5QJUAd*uMV1R$l(Deg_^wTQ8@LV4ad8)lgeBAgtPBlcOY$c&EBA&$c(xy0{5;+ z{jljpZJ)cBAIwj*EB9)fRMS+Rx?ZGGxC2BvIw+JgdbQ-6`hx=tY+P(ynvMuWeu`?y znVJu4r)Cj*ZpdK2s$6!D)ZvG1CM5kXYcRtilRYT@NQq5l<;Lm2)#>K~zuiO6FhL|kiz4*fp zUnm>GyJ==5UAK@!B`OHI3W8~K(9^rB z;Jn>cP|-9lE6{SH1~cxeOxJV8uIkxy$9fXUE@nMqZ4ludfssl?qg+e%=(&!TI(U`- zwvOKfZm5S}z&nWApznTmbMJY)zzHwZfooNe@J4m&S-Fz7#?@L$-T>=)vQGU8Lm&&# z#JmK$I+StlGgGsY()x6$Oozs8v&%yTe9AJ%N0iXwJ|btm<3(P!n}%75hC}t-rfdx( z^!TEE9IWhi)}<+!MxL4u;p|JgIpmcb9g(XP)F?Rr2mUH!eJaY@8u4O2yC9XUj7`j$ zklisQQCZd&n2m2VI6Av1#R}jq*q{sh+VbJDrVvAY{l`2M@X+{oos*00RS~NiZig`Sl`_)lA;xlNya(r=N`}EIHq$FFJx=xb^8FxlzP( zkN7ctD^lMk{vn6y%lRP>A?Cl$TjZy^?bKWwpgh#U`4J2x!k*|<>w{i7X#7Qb>dyDm zHFZf}%*-S`_f*Q;$$Pb*{NP4$cJO1ZO(rBAfPqg!=F1` zOS980Irz4tP4tu$pSs9jFC{=1B$DgDPKzo%R?Y55C*SorzfmJsC#FJv(~Tv4wMoTG zs$a#6bT+Gg03ZALhgiFnFbS4*O*a*A=0l!BnA8JvIE`iHGg-18=b4##-8i@$vSCO@ zpwc;Qvnpi-RN*7M%j6r?SKEV=b+3nJC(IuUcAe~pQ?V19Eih)Jd`@Iig_7_WR6Q$Z z@yQy#$A`Sj-nKiS(QVyjjdsm!A)DXjjob)d()P37)_4i1 zWi0e1xG?kF$R&UP#d7p6o1ZYCC5VyIR3=OJs8clD!>qDrP40)AQ^yb@*ZKbhefZ7} zaT6NSQIBqQnwzLby!UXluK{{E4EByQ4tBC-p@M8KZucL3oyOOI$WyN$^fiz;naM>{LhcEhoh%$lB8VYTkmVU_a>Ps4R) z0uosI+fWY9RzwyxApjzs-Lr)A32gDMCQD!rjwz5oioOK`Eg^UXqF+OadHV+8N&N#1 zMn|25#?gQQR%7Bo3&#q$47vW`YHU2*)>uEHI(}JKb@bALs^))xOM8I`j3eueSooa+ zT@io`-Y{PL?|N!yO$wMV4in?i*#Ao3{$pd-{6|*w2ei6P8X=Yh+@e246t-mU#m>SVfc?9?KW57WEDEZw3t3*(5* zhbR~0YTf0h7Eul3!L~BCaq4F=zU2(T>cNNXEo#g|$W#P@dB7fDQDUYhOT;o>l1_Q# z->RzSHc?cv(25e|PRa?_RVpBDa~8(r7U)_&*GzTQ$oO$eH1n!HOSd^%+RHCy`Pz&6 zIGs&$((sf3M~28K4j{ij%-LEUTiCEj17%uqT(JY8Oty75!m?Ld6DL?okAH^v#`Ge- zQ4rq>JW<%c?54gUJ5XOY?Um6jN4DYk$m=e{Sj2Jsy(?n}U&Zr;viAd#eaKM>m;Lme zG-fQi3E1C_WSPM^z}I8g)Qh4dkv-%B{iv)f);eI$+a9=WjBQDK=8^LGmuOb*Yff!cCbbUn(TD}X zwuEm{SN*7P?DPUhpY2(*vGM)?z- zxqM*yRnhG%PY1p22C~4*hLhnwn_OdMGiLGEh6v>}DxX?Wt((F#57qR2{ie+Hot~MG z!ft^VA98&1c7Ng&BuN+$vf{JjzW~kv1;E-IjOrGMzwb$4Zbj{p0HRkgV$1UQ-fc zx$Cr;Ahp1q#e2lWx$cAhvWwSghCPge$GDD`P$wXyXq;vVhVk{vimor?lE}QqV;Pjs zJmKACOJVq1ylI(#6GcMKb zmVr6n+1aRFW2_nX3!$pEPgdh_udJ>pFlT?8u9+FXSKpIU?FWf6(J#$YWsdH7;foN2 zzhx??Dn3mo9~yEs{DZ=Qmsf-Xi&vZ(AM~1xI*e&O(PH9b3d~a{P+s@9*_J`OtD`FV zMRjy#b!spNYYZH);K8CW1k;Y3NI4#&j&1()Q_}- z?u;V-xYuborkcHwp)}qhw?wLC4LD1iKhUfc+!&fX#&Tdw7rp@|n=||rBz}4YUYKr! z+?&dUaBw)hrM}QSnQZPHC&XB^Zt+IFBg{;;yV`xY=1_R74xA1_>udIA4m+4^)OK;E z$;sz5*R^S^P)IYTdVS%YW#3GHz>A$IUdS1n)8Tzzi}%&%PaiWedd_kVC7)i9TB$*( zwR{=>(;vAG!gTa8mNtg~QtIcp5<2Gb!xGCphDBf=BdO2~v$tC2?xO&DvZ|N?`Vj2| z<`DYVMdVL(6TV6H>q+T}5Cb;@;-) zG$g6V#EBh!J`98O*S~SzuVtYO%(;$lDguKz-)re(sVdpGI=ZAH@_v8rO?wS`s(4v6 zQnBX4Gvh-BHkB7omS8pKBfdq-3$NlOF^hn`3$GnhU1p|@WEzJmTCz?LEXc{@@JSQd z&~9E0v>Z+?Tt7TyU`5NDfjN)xIk9;{-hp21tfHQ)0P%9Q3M4YtWAXR&lJE|t*bU10 z>RO@;;bSzUv!#zCvx17|4IFvNIt{1fK02EZq2>3au!->@eZ2UcN}ABJvLR=4W!_$a z1?OKX{OSIgGB0*of!FtRrYXM>-J;_EW!g>+N8b)d->Qs$Z0f#mFV+f)$iNzDTE=2S zW$wU7k}D0GyL=wT+Fbb!JZyY{6aa>1vzd0K9jG3af9*KdB0N~q{K&j+Um$O}@#+4D zo0{oRJon03X@0gT>ZCu<*>5gtwdsAn#Xe;1TPywa8kc^eOaJGU>2SAC`ss*=Ono!l zrDv_CJQP9vPtwg5uGJN9@|AUkc_rl*mQZS*)N4HV_bLF^Z4z6j%V82-Ce8iLX8Q?+z2o>T2fv?Ef&kv=EghhwNsGIr?7) zpg#Rimi4uX=5bV{J zQawO+tjs(>grOo$-y(B?ip6uE{*(d1hMHwM=DC)oUz-!DnbZ$HCe_B}dD2hy>0`J{ zze&E0cK>?5pYGGw;uc%}x8G3S(%02~ddQ96LoWT+O#Kr{HwOb#Lh~dz-@7cZ0K4+Y zCvPS+M%kKKC$r0Vub$iA&+%e|3e+2xoXUbcb!VKJKg&VSH*_YmU6@(W#Gh3pq{?M> zF;kQ$p1XMiK*g=$kTl7$b0~zlF_TV>+KM;SS^byrtj>GMUd*!Z8gqdwQ>yN2LYfYW2=_kaEz)Z|cgtmI%6r03hXdfhHru9t(n)E} zbTi*i_32}Kg!a#~?RUEhmvZxC1MYVr%FndrpURYlgMaAe_F4a;QAyO_uXuMt22 zSD>Ion#c#7t$YJ>^B*;sYd-O^x3GpKHiz;~u$w(HY2)ojB?vBVf%BQlbOvbTv7f_G z@LwOYMmmmaEolBg%UNK-3&VP>X6f4FLpXkJ*D{OWP|H>pG}^L3IJ-TT?x>ZgOy3%q zg?qIA8JT6~iji>Ccf65mhQ%uXk`%ch? z{W)3@#@=P0F#%_SmuYkM zY*xOI<7j<694;g6n9*CMPrxEH6KI+?v9H|ex?(x>6b>#!!dej9Z$@QY=6(r{E zhn5;UPpaU|Ts*$cgce<36mutw9I)Urr(Wh|NYp&=jZd8!;!IAVK?kS9FZVX3@f%9} zJ;;^#s7riT;y^*+3HG^mmE4L~r>_(Csqe3)zzw{CxC?ejNiY0PrZ$IZ*Mjp;>D*Cxc#WN zy};>f(*SM>T#OUi54dh*u)X$IF>>s^!PLn-``ZtgbhC&ykol%HbO723zjg~}>|x`q z8_?nWz=!t=#|FKNosxUr7g^V?uBDh=W+sL68Z9Mguw3vuoB^6OQ1L^y7s6o+Eu%G` z>v_PamDnji+Yc&95;>_Vl`N@LH?Vkp28^tGblF587{{tKF9V!$dSfR*< zvz2$s5;|Ea{!@7GR9wZ|HxXXm^6bJl&Sb*)?%@I@W8YZ+ATPkPgb*#qdc#eVj}8 z`FHC!(%t&+rz1V6{6nt(7rvifJ+3C*f)}*>fAJ0Q=b`#DS{-qk(=M2_RN#_I&3e;H zxZPc`YpX2j($3Q4N9^s|3F795v}Jz|mslPW%VVeqF#=8+;+zuhH5z`f-}(qx^n914 zY($TQD4!!=N)IN?D7vYkgOA+NWV1eyus_$$fOmV@?@-wTt@cYj^<{tQ#h$YnvcH7q z44U)RAGI402yb;3u(EG%zLs+*wN;q&Mz#5~mtT84K(ycPKfh+3mpCfWc?QSJl`(Lb z4Smviv1Ylh<0;6m2hUpqwH-?4GyvZ?UZ8^nod4tS0HVS>njUL)e2Um80Hl+FvxcYJ zRUI9+4p**S;T-Ux{Kgx#S;DN&HZ0a3DqG_hT7takJV66zF7biw2o!WWn0~Ml+Md>CRkmZ)>KtA+r)x6k779Vg9WQf}ov;pZ5&!FEWItGy>dPD7A5s3;v$hx% z6MNPX7*8_+@`sCH4HOEc3z@?x@S?m_9Dg~{rBC{V zN}+))Gmv!ik(#pO-yJ4kGE>{4oN0^{(rS^;a}Cc;l+^St;n~hrO?6Zx2n}7`9OqXz z*}Oi(g7Z;E4SPuZm6mGFb1rFRI?2Izlb>E=?r`Z(y$z(&q+Gh?&y=TSsb|?8`VI1H znjK?rN#{4ERAu%66c~(1JI%a!s1@*ZFf>^J*P1!kSup=^DQ#Jq{+~hPZmo8@@ZI8?VT&jIMU~*!+r$V{r^ra@C7RU;c*~myc|GB%HmxYSH?V7amzPtM4aOqlw_gp6%BaQH~*Q^hS5Gc@%g? zVK9SU)RwF$TYXC(9PTe;7p-v0(Z~U1OBy|%&(<@yOh#tpuxZhNL?{|4GEbh_M=>_{ z)p!+kw_C~V9AX9BUeL2Oa0C7o35#5F)aEo3eifZcOxWSC02=bwPqrwRhU=h>mLwC~ zQk%wPvJ~M166W=DcXG`igeyKYsquk2?Hnuo`#JDw7v86Nn_>>m+K)lM-G2SjEPvif z>V6p(sS>&0;oowX3y8cLpW^QOGw-tTfc0MQ-?5EHPT}}U#YbWM6ubRfUdjr$^S9B@ zG5}`ioK`tvUfN9*x9kTgTWJ!W3N!3zD7bM?+yhuFBd>bgS#kb~M5^ZpPwIZ*#f-UV z!#YgsQ=s$Ao~=vGM4>~C=KfxIJh8p0xgvdbfid@gA)kt%T+ z&9S@9CqAIIi4>tugqq45&(I!sKRJ=7^3B^H$uG-#OoT*AK@rI~YfMoo=c)&P$0hy_$&VS}|kQw@})r{|nCVtZ@X;XR8L}8cEsW$#>z;T884}~VF zE8q}QAeJ?^{;c2T@E7cfzw}8=i@z5&2*O{_vzQ+EvzXl7Sj?(Z3~5`8}RuAN8Bmmhk;Wl@y8GNWGm{O@b(@L?1Yx^pIOMnbDl^kbf*=+*4? z?ynAyGkrdA7>A}2wU?>yr6MSzC&tU4zEF{_)(P(YtKugyDy!?Sk&1()x7lj1+f2pR zZe)tOM`n)9b=xmarvY@Gyv5CrV<|YFLnK ziP48i;0?|nbaDPe`@rx6A{3&TpyPKsewntLn5z{@9AxLEB0zvV7leM9EG72mei>KU ze)jN5q?KpK>9y?LX>X~PQN){D%U6OOO6PfvMy&PC#`r)`FI&}ko+8rG>?$k>d1# z_<2wFhlR3aOh(#mBO>k=0Y-q&SQQjVEx`H?8P_l>{Co+D+z*EpPX`g3rxm(wDQLuQ z9ZDv$50Ye&mSEyiewhhH=Ibd|D4f3np)ikLnk#kIlp!jBe?P?C8_-^v><@PLVWOYt z+aGQs-F!8bKZCF2&u?5^KVDCEEA9{Xvlp33uBNH)ljNjVYrkOQTWtl||5tg{wA@wl z@$0aMBL&cLx2M^IIs6l!e3H!~>K20A)aT*OIM1uWZAT(^bGsc^KE(18*zrzQWXYIg z!m<0oC+$6J$HZC#kmpuelOWfR?r0Ls-hzJ{OW*P(a2&=HM~X58!10~lyh+G-%}utl zJ5(pDdhY&bTJ5MJNQf3}4ePvYl)r-f`>XBe4%BezjBV)qhqXy(ToT!U&bY}N(3$>< zzSxFdXiWu+!qyDe(b!)Gg0>XaxQm_Q#er0FiZ`#KXsP9YjTdM5b3C`-BG8LE&i=(O z2%>HIZ_$+Iao0|17~C|Y&yT`wYP-x%};us49j z*a;(k?g8b--5npxcDo8YND;^U?wq6UV6-y#TxMZ!c>&lZmzkUl*q61LeXh2t{a+$g zyF%vUAG%Hzc%*K-ir(CP1%IA#wY;wz9EPcB^uOWi)ouod&yGL!h|g-n0H=G(9gKEQ zA{nHgC1^?MPQth8q}IRZ?&$91vUI<6SB?6`_%6QG!v5s%(mgxOhrI~cNk0+j;U9;K zEN$Ng_UIew!_~V5jFnmCYRc*Eo%q9vvCPyj{UAN{4}oS}pI!f?*ZCu9~r;@5ETV!M_VgRr-nmtExcy>TfOms6w?4F+EqhhrWpKh6ByJL>4EOWGm zRF>?QHl4ykId+lSCpzu*d#Cw#eC$1^((1gSsXdUV#ty$hiMY z3E2wK@0K(|sevO_BHxSaciI;^mOQ09PQ8<7`pWLjeW>O1D?Vtj7q9A4RQv;0f`RklNb}U^h1${lbQnt;usyV69YwSO=q+eQK6D zt)FQ5XSOh>1F%V?3eBp2E4&+;u{BXlB5L*bkOM0UTCx(6^6AQ3&mvG0G#xhM-uA8+ zqMehk0Dt@~f2@SF6Bj*R_^T};Jt7DZ1>(gAAJD(Qo%7gnG!9huB$+RXMV`CHTZ=VtQjles^W{}7j7 zUmZ)M!BN-3e}c-lc}|-Dn2-LRW~Ki_eA~v!=~dFIRN&iO@we~rXh%>M%Pc!1ui=no zr;f;$lCq|~GHVe#Fz)h2cY=POxwPh%#CYyA%Yo-8GWhn@ZlG$?K&7G?9w`3ac!l3S z@@i#qQ0;%T9;pJ}8El#{BCGy*T^8UKEenmX@mD3o!doDx?)%gdR-^Iauv?S&o@3@g zw(-qH?^*k0Mm>4$Z$4&HEds)=FNd;b`!Gxv40YDnK0}aP1SA#{^2aFi@&a=VlrI)_ z0e8spOUx=}bbDi-M8OH}TW+EKm7IsszLpdZ>pnX~O!^K%>bdjb`vc%h@2acvWRFea zf6iNA@$Ooc6s3~9y<#VWp7jjj;;k5#J?g?3!3&E7+*$whayo62xc%U zN-)h9doOj*^hqVm^ewrZjUrcc(p01ebGn|LyGlIwz?IOmy`wBKJ3YVbu+sey_n#74 zSj}qqksp#z^>$>l^nJKbTX{ldk*F&7ZHFmndSo`E{CnmbE#~=Fc5`sL*3>JRsj|xl zN5A;kr0F-PHcHjT&(~!rmpLrTJ%elsQ$<_g5OaDz&_sWMJpa}Tr;G&VZl?3Le=K_v zi0E5sM!f?4I^C%-A3`Y!lV(7(&}w#87kB2YZ@7eY>8<0mdC){A)?60fH}s&m5k&j* zwtnI3Np`vI(CXT5^`xXXAidS9eKr0sDEug7MFQ=sAxiM|wbaP&g&G3TH-y=x%KRdk z1r>X~Z+H7|Z5M1@slz=%$HRsR;lq-sO+_+N3X@!_A0Pw@(BD=x4)DtIiva(6Y{TrK z5-rjyF6doLeYR$zP0W~AM_3uL<`uqL6?QsbVM5|RQx%+7Bq9H$i*{?8vEym!qplbK zgUL=glvo(elB}qP6>a24M?%P*KZZ!+xm8N6L>lC|T+cc`15WXSq`^uW#IB4S)+}_AzZbvd1Dm|Ar(BjMHb-B7WdMB`J(XoC^fwcxY8$9d zQLzg&+^M|;G06xm5SdGI3sI3?WH_SR&1I}1;0L(tuLaN-NEI{VJdp;F zr=UiNnj{U-WT~2j)v<-9*N|8}q?1!@9{6j{j?swD!S#Ko^J`Oeehn93i&XguvzuTM zVKq|QxZTq-PptJ(qCPMIfXZ!Yach;YmZZ_+^z-9=+>}@wd+b8Ql!<_UBkyN3=VgsK z`~<@_mr~ z*|8)3JsHt&fKQ&G9*p~#rt+!9C_433rr3dfWv1D?$Zf9S>$2QR`T$2vM>Xp3~h;&&3uAA(|#$b(n>ElzT_vi3-Fbf-6`)7sC9}`Uto9f!Y zY#mq7vDtQF>87LO{@#9L6A6Fsbu%hjvdF(b+1fwG=v-esd15@LpIUQWU1cIUK7Ux> zP}mK>LrL;PTbZ~3IA=z1{%k|O7kj}Wto`GmUN$^V=Ss9vp+__6v$>4Oo~o@EYk9f2 ziSPK|Y@NZ;43IFHgN$6%OT?KqrvG9krEzA@V8Mm{$PCJgerKvEDJ=5C?`(@`^UpVh zX7{n_rO_bY(HG~wZ8iv4v<|G;o;K#QJ;_Mv7!=AJzcu>4^=&x)2WVpA*l4gYH>Kr? zxw5U2Wo|!iM_G`oPNccPr)td?M5%_-=ldUOeEN1@&uM^iZ{H+{@wiX}d3vhVlhw

8V71#+1!AvFAmfcVBH+-(Ym7iKA0T$ACYeZ8#hRZBb+vNc)!nmwNF0b_b}&g1N> zmJUgob#Q&V2s!*bgsHLHW2xDV<{{*_Rz79+WA&OZxqHU@{1lf5b97NI zb%_;Z77puUmK?gu{Qa&a6{R^oFUZay>h+(&pTk5_fqDCjferuTnXF%WVc|7VM5UV0Qy&ZDI4&jo=wzfdKwblu|qvqds5J&L5xCws8ra2tK`7FOb0Tvi_J zOV6xFr2V6|hjeM#G8oS0Q#|YHYxBlWD)0t;M^6%T-ix1s5o)uy<}*e2_V!(^sE%-j zQQ(JL&^j4*DS+|Z*Ossh*nNwojlD+E{O*mOJUe;0&#vA0u&i4Ad2?0; z<~%KSBtiEvJpe&q-i16#KhW84T#r{G=EjE*zvyh>e1m}I2@4^TE2lGk&sP@QR6oLE zKs!ON>opNljVOPz2-~jsL0PcIYlH%kPZ?yEg4m#S-X7s~K-D46^|VAmu6(Uuq~hdn z^Q-aIMCzjUz^zX*U=Ds;*`2$p%*~IBR3pY>kZ5ONl>M#u`+0#sd|l&UAts z2+aAy+oSS}kUBou#F>aR+mW^y1}azD%jjgjV$UspW=N1dXlu_@8ymalVRal{!|#FX zE~}#3ssiKr>BK7&m(1_7f`nD7^vcF=7*&@w{W#Xbd@jE{rdnf79j z(g&>Nc^M~>A)d%p2*i8(+x*{rOQd{dp4Z!U5<8~#jAnQhF<>6HIyqAh<{ru;dX^7BUfL1~8%l^xIp$R1WvU?i8x`Q@ zO1r1?9-qT}u8P)#Dq3s9y3XYFpKAu&>nJ7DwMqeC!WWslR{EXmz790(nAQ)1Jx{2;gSTl3j%FM`{EC}+%hopPKtfH48TC^O0$XCcTx15O0oh@ZN!`73 z>#x7=V>JY;h}Yc`#J81*=%T-^RWdtSfvGB6@mIoSxueXLS&9la z(_LRwQi=Zno>es^95UUGEIO1M9#i{pwb^=~Y`gxVxl*E=980Upz6s1jPGI^>&I^}) z5{Mvv^AfC%7ymm^9V;#lM=vi9R!2XoYX6jzpdae!Om+10s`h^btJ*&w7M7e_5&7g4 z)yBgIr_eNW_oSzxLWc(fG_HYSHak8&1he z-7K;$wW8WWeIOIV)PWfF+%bW>+f!kUN(m=`x=um1)#U!#VMR8wJ}{S=L4@_SA481D z&cd{)`3sdFNO?A^-s*wL=(@#Spew@;sN)#CF=m2tTHS0*;fA)1u=n*FQk{HFv3>b< zIJ(iCiW^4e{7)MVb?c}#_6$bmn5tR(UAoBiL=kqp`or!Jttrbk~^Vbev0$#8AY?5g;fS zSWsVFjC43mqDcD(?A(2cMA#0J=CQ$G_ljm4%k(fMnCp22(PBHFwvTkPjW0aL4zDRYW#eRD_GWQK9%@#qzFksMs$3 ze!E119dQwe+$ZFNkBh+vNR)k{ScGk(dDNW6t#;Milw5^c!?^^so1t8b0WpFvrrgKi z>)na*K`T5I75-WEMuBVRjqV|+_njDmf0ngR&+(!cS_AGxhWa&VS5{fh4`3uiUt2mo z&)d3=vg613n9q|7hGOZgnu7iLR_LuDu@&4_FO(t`ml3 zII^sHos*B&e`Jwm*6_QFeR@ZKJ@)5|0t+FpF~idKQRD|G9+f1(vE_^T7l)%M2hd#YW_*RD>>0Qc>X~#UEB%2`m7o5@UgK}q!g))3j=a|SnrKg3aTC zsqFOB{+TTy0yKx%C|#qb=KooL3%($$oSIhz=7=O1pTPXz@xUkUE%~+0`6z#>#w+6E z8wqq+D#>0jWdgfUynY`zbg1w>e*dPoUt>!1WLNI`GCkepgUW5!=I|5`f8C z7AnoA-B8i!wx!f%Jzq=@BD z@=1xzI&chZlv?nRk52WPXBGULTn*ntY(lU3io5)lhk|^;=4{S8+%Z|Q+k(ry<);-W z&5NDjMQ5Q89FrPmO3B4W?hJBS8#AF*{M?EkQLgHz7tr)F)Gt10o+Xwp(r5G6jZg&Y ze_mejWzovlG}SQ!=2IN<40}y_z7uA(th*I;*u7N6yc%7VMR+S zZ@S8=A9{=UcVt_iKs;i*TK{^97du%Ic|PzWw-jVGRB=Qqs1v;4l1VDnC@9`tAW3)j z5kimEF_q?)C}luo2MaTJxP>CNzPmgk{DfJM!ciOP$D)d`8Y-XR}_`Wmy6qmghn5W$fFY?_f4P}-r&47nRZQuwd-EoM`>9;snDrtiB}qf1|Dp z{mGE=TXh$jnQZNqxTWBQvTB>Yj58}>g&1dA$ReC<17~rYy;39wmN}tdt(@3BGz#%C z+~3^@;fc87HxkkQo0A&|Yx=#S(OtyHl&dc=O_pZQcJmir*9tG%i4yGApNk}pL!6A6 z6nlsh^wsafpUQn0R2exO?a6ejyR!Ya4vP`|&)(eyN|wF6GfH%k`_2>8;peO!>92*D z@*#}8Vo|Y?fo!f@E`-2_bYY-?zJRyD*{_eo1MpileoY)BmJiyPF z*i}ALL>4rUK+A>Tk|Xo^|ChxD#vDgVovODDaOf@fliys+C%Xxro6_?r zl9QYYXLphXG50;bxvg?@H_$LMzo}$q?ypYvm9!yRFlDFMq)jqC?x`eG;gW*lf5Tns z%>yZDgJ-VvQ`w%={?zu9ZRv%-q_o{Xdy;e-XR=AYFxyYG>EFro)Sd>MPrA86;6$uP zxceK=z2j0^I7sbxTw=M%chB-+v_354gJT;^Uw0YmV~hM@fvVkQsLAVgH9_3?ET0>Zh*F0)aZtcPXSCvgXA2$cKdA_!zFA zZp>Si#NDoIh4w=RMfr$fZU_mSM2HWGPGHXD30~rEEVcOa z%0+TB?`3QT=du5N>w2Ob&HFz=XwN>!bFbAUy{uJP=Gn=h!94jQ4#Woov1JCXf4Hxv z`!`MmfiuDT`$9d)UxVcPRw@owni^{I**Z@9z7?8({}|hnoY?aJ&Li4GJoj}TO#g-7 zbuG*7?a&#UFp4M-1Hf{T%lkYSO=^p6Ah^9nHFKM#yx?)$c~TU6Og z3n&!JpU1N!|Ar%%<<_fiRaNY&I(HXUU83%$p0_oPRZXQYeb<%xg=M}%CUgW<$e4Vt zx(CiDW%qDTjIQyvZel>>lbG$bZ?lYCH-e6G!?tRp=#w)R4NqGM#tg0!*~7_G51U+` ztsH6AAA%jbt>GWZ!$7%Rx=hJJpZ1s^$(wdXb=`bg2f?d@!p>tcnYpSYGgc2fOfg@^LQ%2O1$8q^YCVT|$ZAw@N}eVPQ|?bGVLqZa z3c0=yuKCAB%C8#6WSljU)Q2Bn>O}Dhi4b$P*Bw;}cMuWx&|uCrS@ZC->y0%gVyP1z z5ax%1-eLwjXcix9`RnYWG&LNCF-Z;y+?CwM`gM_G{PUkl^WTA_Q&+L6a4G$Np61+} zVb!Dr&7q{2UyRL`{N~rc;biK!3&&v>iwhOM!2Na5Z*npFFWuy{)03mG0~xn8|Lt*o zXG39R(Pj{BJ#;xWG&nHlW8SnZuY+Zr^EJc5+Nq&X5Y+fIcq%I%qj>I1k;gE(KL>C3-$J$?kK^9?um7&iq9gK_F%BUS}J(?(&nrKN( z@m(IG^`063sEqxTQ4BhZ;|s#BQI&r zWr6CKGN|S*)nfv4o+LB2L@?3c(*j99$Nu;+W-f9tVX1HAj^I#MsHuz7<4JaAD$zqK z%f{CR=B($f_2J@D>J{e<$<$rkRNfZ2hIabFq*kO3Fb6#;_fAq}K2*>Zd)}Syso&VyLFavdWY!_;>Y`^=)7>_;`>5l_fFv9*ock#5X;p#t6TH|<`lOhC_w&mg z3TIzh$@C|*?1e;d#PlBq{&jwq4g97CqhF<~C%?^mYq63DRTXIYClBx^BIO5K=CFuX z#VS~cSLwRpFDXeR(xu_pB}L)bRbEBQ1lH-duz7Rg#e$0qxu5-${^97hOo-E}>{k;N zE$`@#W3!Mz*NP6!rda13h{yv<=*71;Gi8^v8-a>Yz}FMqaP58(!?e~2UgAY zyQ-nHn;Q}ZLP;HcAzEiq@-d{nz*Da!Dq4Zau9Ydq9=6)1NYps9VF9rv_Oc!fDx0K; zj-j{QOy|9bMQK*UbiC4vx_8FBY%YuVoH(6MKH}2ttPuLg!7AHs*sy3nSdKYd(^mTn z#p+~jH+`#OCvD@*Vh$ihzCapj>Cfp#{-4)BeH02*(}nc^pBf5 z68(W<;Y4S@oKiONn8s^d)fLedRqe^XL@GTg5RP3Ms%Xh6CYWjQpo*<;Rvhqefw}tA z2`wMik5xc#4?#!#o5^P9QJR)Aq$(xeOH&pUScROEd>Gd>8b8L1v;!!uX?-eTPIlqR zOeFDeHaCP=r^k-zSA}B}A+GG|*r06cIr!%T>#`=y`LMqKm(_t6S7^Hi>kE`&_W9|z z=(y6AcbqfQckArz?g5DEI+wb%GXMqDq~-$AWO_ep+my<(zXaxu&IR?r#m>Zylcy{n zU&`z9@nc}~ov8)at-9p9nsm`Vm49*PtR3a=4Jv9Fe7+e;TMqo}MbQAktEtoAX z+Y*@5Pp?dY_^j*%+k-8{AIOT&>Z?Z%E@efx%$r`@bkh}f{R_0TGwQ_Bv+U4b@BFy* z3(D82{VCjBJVDFxFem8;A~Tg7AB0}9kz=@HPklss#h}qhskK&xMwhVav$^DtA}6j8 z6DyZz!KpzuLU1Ly3>o3WpE}3cK8eKWwz+bjy;Bne-@F%rN4|M&`i-aGkvIlFwS&ab zqjC#jbXlnLGj*ZmE?NvMWbYv{Wlq=iERvb-C1X?@YOOCGzUJ|8vU%=>s-WRd0IN|U;Qv%H zd*!evW8?W(jc^`kgNeUuvD~u4f(tDM6M;EnfYqWKR0mpQ2b)qh{g{T+eE%G)Uj-tc zD_1wx{|x$ejrG(YSadh2mLB?9Ao6(c@^M!lkLIep-lO)E%0EJRX*Vb5(JEu2ca6yt z`izmf~maYdV=Bx`tr2y@W!`S2HBgV;mZw{CYb+Up0i%r1h1pFGE`9C%fxp zKY;8p`hfH%F-A;aY0sDnbqtynLN!vYQ}q_l{f+W1C!d`}czEWoM7G67W`{FUeo>$f zN!MB$Ll{ z=4r9q5xDuG59I*k zr{wl2D zrp3V#E~5)UC4G4o_+?9A;f<_K%los$_Z5Hi%C-d}XGr(6t1n9Q^_^JMRK5hQEi2IS z8TDx>CR@d@sr*TOZsszuLP$B` zviGKKPPHzQ>}Nh(rpf;GB8iM(aBOPP8S#trvnrbYon1eIzkQJH zwSHQ_q3hD#sw66s=oq~AokWq;3;W{y0VKb~u7 z-vGQr8{uZ*8J0JnCmY8IefT9GKm_|ptulDs_bBO9l4pJ2l4f(aNy6D5WZCuY4;Z4x z7pN$sHa<@eMdtX2`pQsriaC=9eJYh9t;Ei+?Jn&g)z74rJ3q6c$Xs@!=3V-m`KL;% zA1&-3DCC5mmA!~-b?(E8Z9kYJ+1Nt;CrH1!^cV2-L{j@lJ6TP-Km1SdU;m&F|7|?% z0RM@*f&bAHdci-Y2mBq6>;OOOoDctxi7Vs7|BIjhukbhC=;I!-K>k*f3Xpq0`1V@q zzeJX4s1mCzuy)?3@5B_?s`lf#?c@*!3(Wbb27GACd{{OXb|G2fX5}w(K(Xt?m&4A6 zNQy-y%~di~ZGQxFdE;)8?C`r0;l_utF1@CjOM=}iBj!aWI|jMH!Vtv4u$%Ep=8E@8 z>Gqxzar?`>wRNC?pno`$|MqT39z_`cRGSUAHh=y=@l<0n=~w&dPJc8Hx%AvsG=rMM z{iLLugL}i~p#=k7Xc@+O)KPRUasrGOYROJ0rIwV*Hf5?(Sc>4Yl`t@98E*oX8z-J{ z;R+6Xs{)Zfz-m=6h?}bxxmCpaW-|lpxT)G-{lv;@=f{bM0M5PkQ@RSAsU(}*{^`)a zA5o8^cL1U(%EC6FIk^e^U87`D%{TpQ#LviSVS-DuU1gcH<_&d9{YW%zYibyzA#!7y>bGhm$nx~1MmZ+i909$% zu}vPKWm1_YClBBeni{X$Wh}pJJT1*H%@2R-^U7qlEVqvS`*qWQsWuLOnP76-@P_RT z{Q|L{33P})B(~@^m9O3YV{7TdfSxKeW%r8du)A%27ebn{p1dq(id=$ryXG}h|FGB% z^c1^8f%k}O)A(Owd`!cF_go%?xVso18ZFygO5QM~(vj0&@X}j#`?bLKWM?-kjgAZPQ%PaoqeE)!?b*$NC^p7@aIiu@rNn zlqc7F)>!~6KV40Z`iocg*3CzlJ2`)5cdLd1+gPQLA6K(g^-GiX*23kvhx1kcn#$|h zTyYT)6))wjyFniDQZCjYl$cf@mnGe}+#^1|E7~l~Vh*)Rqx7+6YYw&BIPn^gd51;j z*VzqwSNOLOChGI5mY-Je?l*!h9iWR}jZY7JJqpe|YE53ZKW=;ke|B$%$`D1Q_b zSd$a_hjAKQk*o*j_EGt0{+Dr8#|0$(;{AwAX0Lyu@5V8fw}ZO^^Ez;d4hy^}7u1G0 zUzx`T4kzcD=emHtQjF{JAg#a3Yre7I zTgJ5kt)&j^vV!2_96OKrMQ{U~GEQ^^P@ByrSc)JlSymCu#y`Y|iYnQR72xg?M-2+~ zEMsG8xdO$kMfx6M@>%F?VJ|k?=HPZm>pZ4LrjG)wWt>hqtw3Qx3%O%y>$O?g@EP>~ z0qFIgKViv$h1A|0Wbtzo{tpR@3r7`)KiHEh{zv~Yo!{}-S5MVU7JoQJwSo~momMv- z<*bs-vZ*$MIaNH2{*pnqJUy~nsi;hj1OX)4qfSVNvnLjWNBsr=@2aiqaHg$l-`KaR z{p(Yz59kPIFAb04&_D8jobGE1gJv0-uz;2(*YhnAm}mH@j<@y&b1J^|ZJev9;TvjL za%MbdFZ`T~!ev~?7hb_l0QI+9t|kL*iOgX8hog$ir4u2h^i_u%K(H51ig zHiGw4MFryMMTWC*bXya|))XC_%ENpWv5w9X=i{~P>Rrgx*ly`u*HbUilHeSnU6-H7 zZ^99>x$-{0kI3IAf|i}TmHFcOx3If^fSo7mwJ}mT3vdIF6GOGbtoDbdD5ZXygDl8k zPvhn-*ND&sMk_Ld5`)a6ne2O-t?6f%w*r>rc0rNL2>AyxaJwp;^eKCuaP>ae)ucSB zIs${X)fdA53V+wnSxJ>zWK$!A>tKH!EasVpd;K0K5AWSy*S~ob@W(IRzw14?C3Curuzm@63BKgT9(;q4)58QDePZZKrMMy2 zQ%vnKV3z+t>TC~FVQK1d`(rlQ0Y$q_-3=|I@PEqvmz0-$zMK4$G;X;^CBYz+d8nXi z-$u0SjYPW^2Z)c|i!{hsPI%luj(@~o?>wrJm2QmCS>MKYjkruV4Pj zQe_|cg!;F0S|wBe@LyBEhRHm3LZyRkWnwDj08GMukTj}2SL z8XLQ>=D@KG*qvH#c_Ix~zQ8j5h0{5 z2zT>bz_%LclV6{wTl+TrwvDkJfd|(`MR*DHwf+GwJN%ZKXROR1w$+>UFMK|OtRLNx zW-OOlM(E({A-_30#P2`W?5+gS+qqI2Ua>bHI4of`q2;*~^~OKs+edlSBstK|*qeDX zt(vhuNfb_3dXzrK1}NJzBb&@ldA{o8!$mGXY&hSukNKndNpT7&Kjd=g z-HR3ubE(7m!5hC^zsVW&3Bp->A&MVd!rIryX+#?_P zINt5I?ZEM!3Iu+)paV!v{P;}9?QQ%^a- zQjqhgC~qjb>j?cx6Xsy+o@?vjoQMt%mP+;wda=2#&`;GvFJh}>TjMtSHELhO(YZ>5 z3$}mImkq`;AT@UjObZyN92ut?FV<>7i6v}J@$0g6;~|4<@kJ!<>4UvkldE<_hZ_?0 zoXB%e{1zS%{ld-f;m`0A&%I(Frp*5I-b@^uW)@ZkFeli%?U zvq5ii7%Cm1QA1hN`i))z|KxCZ;WM{z2On`oa1LgfgS!(CCUe#JAWV0iX*N43F@Mr! zP2Xe(?pcz`eTgB@;`ZV}URg!a_5Ju;cYR;ZIMI!an(OGlnVkOo1p^2e5K%nQIyF0M z@go}1c>bq#mGpTz>+ecpZ_=V!!jU))-Z(F}QyCGDu?wOv#>YNG7W0el^rEXsA4f%g z`mdBeyE}cd(#3E5^m$6Zv^)I?r3bsyuT=W@?)1$&q@S+z1H045UERCgc;(Jl${JpQsZoRdj6@ODjz-ZpqO5vFs zy*SdamgfAQPy?s}jTapYek0ab8^q_CJz~%kXw6GrB1&=NGXxRDAD=pgCYET0z^g3l zpyDPjX%9r~PNl!YEMEaNDF?np*KC<>>|M=>Z?#S3=OcG$OK}D9XHtimMYCmEBc;1S zYmQr%HtJ@ABuNu)rdl0!slnF3XTgxbYIKdtBg#_|xcgPSZ$7Mw{yjWuef69V!+{B@)H8{S z#aUQ9H-`i5_iH4mqKdQd;j*{=qkgd1QZ)1iE-$$pV{F3wYo6Fox=&WS2SmHo7mkr7 z99Q!Wi5U4W3G?x!b&( zGQi)|s_0*>MsJas{0y>6gy*#i@G0Ey@>TsH032eac1s@PkOaE#_ZVm%X!7Ypy3Ay; zhskDwKkAdaNB#e&32~QqqR@WKQY$X)|MmWlI$zmyYG$3BOJfHxXn4Y zmcgLxBx?TjMUfVBq?2D4kE1gYD=dB;f8n<>Zrml6gdeMlF465eBlAexJb|M&vX{yF z^?Izl=>~j;SEmNV${Tr7PV5|V7cOof>I0XRg_(|QB{C6WG6n1o?Wl1?IlYgFE}3&%zX#JH_hAW4;u9aK4(wA`qFtx9ZsZ zb)wo9z~0{ciQF8kqW_kwL{)4IY^Y0n27!4p0=4uD6Y&8gRY(79ZC2l&5gb=U z@P-V>Pdd38-^VQ27msp-%z?_w+OOySbsRl^R9t6SfT(+F7;nZW08lrkhUD7#h<(A* zhQ~r833$NH!A=QB5`~IcEnixRn3nyTB73j=K-8wYdSt0Sa+bAJ2U>n@i)ptM9Nw=N zCV<5y6wNWZRfq}ra)yiRBm>Gl&}ShVCivgTl0PYFeT3Ax-vBR1w+azwyv%` zfC!}Zi>(Le1gKvUhGyBsk+0-bX~1(kDWvUzNk4lp1tIkuI~@{ifVmTPQ2-Rjlc zn7y~IJpk$Kh2kbPHs`B!G!yfF+s`gX8mLE4Y_Tfsb znxREI-2dBnc?P~1M$*_cgn7<{1s2>Gln(@)bJH?*aO~m3`h!$GvlieKePVxef`2VC zYJlYcd}F7QSxKu_Cz3fu`pzA|oVr~?agTrCr>HRgDYCH&WN7W8IGTaGyJk@Tz<;h8z)rb>0Uvg88~q@_Uyw*Qp}U;?2fAJ zPFU?Q?CT=7=?5T{xSIQ(70X$gdbTS1rLAWjb~9@WknP>lWfhJeX(DBRiML(Ed3k7+E3ea_f@`VuaC=lZ{}|5f%*BkE}G zJ~u-Jh|1wT;`UDeM1O&~uTZg%zhH|KrF-1p zh`H#cwtTWQ>~A@$-G@LOnBnD0g)%SZ!D=IJvthO}YQG_Uex-SP6D6lB2Ivx-7JbnE zxcme~Q?eQRh&m;ctE=(~RSDB$Hfqy2+Oo1k^=jpT|0+b^%ua8)Z7f9j)Lc91%{B6G zBS&7c*tg|KKH7kML?`#bal&@8`BS@wvQlYXmz?x0zpb}g*Km2?NxZcBghN7lfnmoc z(DI&&C}?8jHT!gR$H)s|jaV)%;Bpbp;|^D4TU= z?ADb^4M*?PPpU7nigFGdy&`!G3n#DajdFup8idqd3Ov6;Q#~*JB-YQl9RlQwA5Lz; z+LdoRrqljZM!b3!3VwDr!)QCZk6H!bSSy2K(MLTW<>Do;n z_UmP7kRaRLs7cMzP*6l`?u_pKVWwD@Vk^CJe&G=tug#i(b*ujbq`K9q0}|m_p$vdk z+3!?G*Hn#K&D;&#xx{jTtZFU|BmISUN!JBz=ZOPVhGPfcNr`am$cOl;&i=G2`}OLw z_oweyJ?hPH*~Y5C>91Fny%r9Pe>?1x1?)9-aL*TxJ~@I>${syly1Q=2F3^H(D{U|^ zSL~_voZ_Q(AnX6*>|NlatggNP3?vvZae@R4f;Fl17}VC_r6nRdQIKa~0#T`^Vq5A3 z(R!gI0Tk3g!eyL}9b0Ry_TT#2o^xz#Pqlg~g0%@K;bN7GaoiL{PQ`Bre%g8ld>6nph`zd+INo@ zv}Ry(In6?Vvc@`3B?M>?bS^J%YP9bjZtYw=-|`1j%;FF_fF$);26|sR1{3AHVQ2f_ z_$i7SJ98i2nmwxFzq;dy&UlF;?lEv}epWLX(fnDb3e?7OI3@l?mpE!cqBt(Xb8_I7 ze|ELnAuD7iCNmaaCy(Z(6p>G@Rym6xMuo!99AXUbrI!cY#~{wHXE-he@v9Cc;EwyZ z3({PXkUZss-LA3Yp~M1S5cIcDoqOcXBge!USn{jJ>DZ4SP-~NAQ8bL*X5BAnRgU;N z{L4wt_PTWqw+$u>>Vwjr?32q?9{cPmm~8I&uIiGa9AB%QP-1e~xq~_!Ki#-F&NKTn zR70?3BueG+N-}46y_97>+kB8aBD=m+W=fh5_HJU!I9s5c=3X8u>nQSrznwOlNB?E> zZFIABAp>q6y+tqBUr>2c=seZmGM%UKo1zHKy7V-zW%TW|CCTAxo)uk*|va}XkxI|lOVhuoc=l9 z_&Gl6Y8>gf41Y&jC*^N|aoFX+Hr%S*afSgw*e!jVOt7 z1oW|UtQvPi2>WP(y~wQYL2|z6b&RWI6r2YecaAEU&0$btH20oe=EY~v@RGByPt`1? zH+f?g5f+>3)A-(jK-Uh7F8TU(>ofXBFIK6P{8u2kh$*wDtG-ZP$GB@%c{<4-jw)?O z{%ASOVWn?V2W$7&TW-StzvwGoeA0A0u*Y3DEmgCxEyO`ac5c3u{)Td4+@<;1!(Ft! zKGETONDQ||l*VF^#n;Q+M6CL|JD%>b|Elws=F~pEth*4M-wzu#r!Z6QD#(U<7{q(i zogF)W1vF0*VlD_{GQY%2Hk-kL81I~9G~4y2oqGylNX!!XX~d#=r>4?3{{}Xskf|zh zH8P?2JX|vT@G|hXAAdlj=UIH(zdDTPLO1y@hrhzPa^YO1|8Uc>F zUi=}wk7$GbrT;%=Of0g#4DV35=6qzBCeh%jT7`&O^RylhgHNab<&Hf67YM>ou-;{E ze`gf9F4&J0F~oeGEJ&XODb2rMpBbsyv=a|o{mAiP+DL!gXIz=7TDuT#>!YpuE%eQ@ zCQFf(t()M*;teAAFDk*Q^Bmz#UU-3*yw_a@b}0RjjiTWGca*XCUi~<~1JJ5qx?X{L zx&Ogl8s^xXf(?iaF#l6v>gs63f8c;6);F=)Tuh?F|4~7DxnVZ7`Zv3Li?1P{`TmxX z#s7$^gZLTKC1&l#dS?TpVI%Z(vbbXx@EyOw;QP}-!Twpo z!Ky9?-#97$&%W)tJtr$0RH}b<{`*6N>}tHqOXd&CM*6T`WsMa6OEbIUbo$Gmg1?CV ztf(w~E_b30m@$&I5@{ZN#(KC_xb;7QSo7$idcj(!dx$^d{QGX{4f!f1fq_G; z8X+7sYm|Dz`0?-Eh(&zWPHNNe`5*Ec8Sq{7bSCg1Y!y`qiU!M7RF0K+FpG8|%q92!<%CnNC0M zyV7>#7XDNa)Un@%{mCAF?#ihaV|E%^(?ZEqiH7>>r$l(e3s2^i0AifiHTlFyo&t(> zQ_P7aFCzuR$XY12iee;2FSs5ei_6V<`?>hRPW@*i-6l)fph6shRB*!vF#rf`Tw~6@ zHJf(S>|f8p_FkFN=0m;03+GbTw3fM3>C4xx+9~kST^L+tSLL!GBz*rHs2GChbmS!t zx|yvOsSn?Zg3UX$BG*a&8bvO?JUalS{#&+$#H`Oz4C1c+;r-hJNsEws-QLSG!|_t@ zbwsYp%m4{o9JcC~wVWm{R};6!1q982$+;FUzFSSti_0i3Iy!+&aNy<7ERkz(~+Li(SjGL@lODtK6s_&h-L>fQ* z#=Rz%Jf^R%WIr#Svey>*oo3~$DPm;oA72c~8ctJwzmx34-MJ432k{|ml#G#`_&;Lc zBNNTS7Pnp#m$J-60yBR#ct5@i>IJRH^RMAlulhLEFVR!7;Fd8Yp#w9#2=fypbAI&i zmi*++Br7VDTR%fc58L_8%RjR8iYcTcoCWEJ|KX}1>5p*f=e#~L&uEe>zdcB=^81tS ze;dZ-|EKF?FYTG1ralhR5R(OSn)txX5TBh7K(WVy{O~q1F_6Np1&RNq{6lln=Ftxr zPqC^x*EC#30TRJRJ0ep#9~ij~Q7JN==45W2GiO!g2E@jQ$x#vHD1GD%a->c}1UNASvSJ=dhc#Yjf#m1nDKg{JHe+O{M(ts=8m0UhdMDyY$cI%J0?(nhwq< zJ<6;95EJe{?K=6N3#dt>bLEGW_Lyn^_*=Wraw`4kQ6#s=Bs=(Tq`I7bAz;f@1eyCh zuVrpwUeo9K!R-#06W|4`%>ftc_m#p_hTzdS5XV7`OG}(U=^!1zRzNOsKz?AiL2`B< zfLxL5|6z$73^(FG&69uY`|h>ef1FpzAHjz*{{f}Pg4Ys%flHsu2mc{g^^fKUpz#X^ z^n}VkZs`t{G}ns;Nbkn~PiS;7eh@{<0ff2^{!=fX9XxKo|7WC5$&K0wRt~(Xyg7nX zm1tk8(++tU8RT1~zwo!tV&ECK_Rd7MH<(HW3I8^#7U% ztFv4#>w^@B>5wAS;_h=sEHSPSQ*cw?$jI3xjU_zJKC$Vol-_1-%^YWJC>sEVovM@2 z^TMj7Ui;|%(u)Ley-MAy^WtA;A)V~S5h97LgBL17KmVF(J|w*Y#u&t3(6&SbtKU+R z-ThJPFi~TVhgDvEt+C2>I+`fmqN|)RkA++AB`fYW$E(=3>=p#naHtpI|NX>5C zJHP41%_$nPVh0XqEok0bGCiF9s< zI@b_U&`{@&0CmvRJ`HuoKids;N^+m$&mt~IiroV&Mq0P73e!OEzx4FJfFL3Jv z%yl{4^*=lzJJZZgiNAANFv;%F^0~*RlWu;~UGp2!>Su0cZDdJL72u{zOrHUGu#>Go zvKx0Js0}-A;6?sipB?ci3ZFW4NM)mcqs7_b1S3qxe!@O@PM{`X;p3f(n>FmulXiF6hUXzQ?3m$ z&y}%qT^SRtna>{^cyYF2=KL9l-9dI7e$>i0;`XBHoPQ76;l#T9x@ z6!)?pIX}daBnN(^RFzzyl3%_laH6F?%~qjCQKAWkaL0_Et>t0k}j`wfuwR}t{l zMpCT2Wm@jG0qZBJKl96O5|*PMKR%Lv_&+oBj?n$K@WPvTvFi5yj>+t+rjrGa^9+Al zp5ZM=KmMo;PjLAd8NP8h*UR`rbp9bACcRole{&_PxUt0l89TIgS^AYw4277L1|LVs zVX4*W0++>L_a2DB;eE0Rhg(|#Ud!lK&?>KSMsCsen9HlEfrUyeK3nz2dGkB8>TNNz zx}Uk{ro0jR{9CU2#7U93=SW>z%WBoHL~KhJhL3W$4`pYyj-t|;vd;h zZZ#*BD%^aQ2Y<0GB#0gG8zod>e^b7UTHNk$vcT_~Ri3sLX8D5u>eTFF856JrH-G)Q z<`42=@}a~+>F0Ad*pAw` zl5SQOW=sbc8qYun*S$PVeF>h@i*q2{k#QpN^LFTU)*Hn8M~s7;uQ69cME_AYArhsM1($u8<)ObiE(fc}zn+B^_ekf~Q#aJ_rr*s2Ho+Q$lo~3+ z{%pc}FK5CAea$d>%kw<1^?4>wnI*roj8D(mVkDNws!L0dE^Gs80)U|+Ft{3HI?`?* zrncO=J9!XO`#GA_{2cC+?Xi(`KSQuym4t~x;s2NC14d&{3i=Nxg`tIiTj%t<=0m;1 zi>1EGX}oncjbi_4ydB|L3rcUj!wcUho%_&XCF^{}ceIi9V`Sjxkgpn+7cq3O{Etf= zmj8=iu>4K?$zl0xmYOVhO>lag<$?ZwpVjS^vB9ypzg7ISQeV2(s!h6Tui{6G=N`Qh z-`#$4)z(=m@(REH1gcFFdX{_yoKRHX#2l9V4A%ZunIJQN1Vgpb#*>@=PqBsuzVMv- zKX4&GG&Jy6dQA+xpC4JIM@Hi%v3S9~mYOUGtD&8sI{ja3K+13GyDCx^O21%l1&w!f z!LLtv;gfvz67}3KA_sD4%(QGLUIrrm)+rk6!1-$P%z@*rk@&!wdQA+xlAmr}7-Ol) zf^SpM>^D>BMn-oKF@9B+u*!;yc-DNl%8LJGl@-5fKRLj?rc{-@NG1DI6FA703STN3 zva0A^`8^QOWD5VrYrA8iNfPM?K1qHEFIwM)eo3-Ykm7&DGe3tg=x<`m z+|!AG9{$0qOo4M*j=!Z-9P&~|Uw;$oFoGXh3(?K;53~O^S|w)5XW}G}y)=6bw@ju% zFIfaFi*o*z>uB=z0Z9lnfDPNAL-d0Azs3+PUtNMsfij%?kle>*VWI}+~Sz6r-fKB^r5sLvF3yO8vuWJ;lsjDcntFZ z#Bwz-ZuVERZ3@I+M4?wZBblxJCZ7dUW)o264c{ERhg*K7I@#aK&Oj^s943O4FxrO@ z17!Gu>n8NLIAk+&hopo1Y(a_-wKz@Kcu4V-Y>{|D9gmj(V&%Uk$RB7@T~bm?C@&0tne<%bxV`9~8`{}#%vff_MuX%IdH{_l=c!Ekmgu$P$1S=uk z|C8}w01oJHq4j(rY~IPY+pjkKdr9E`#-ElrkvbO zrtHoGo&4~fuKt%}H+c=7C!|y?7D{rh8J(p6B^SwaN=Es3e1oV_sy*Z*(2#%@RN8|D&#a zyP~$80+{@MlxHH#1NYotFAd&SHf1tmRf4i*X~TRXfV-Y?QY8XZ|CK;A&AK>dj2l= ztSzc;Mj`eqF*@QAtQ`IX32g>^=1+bnIz}idvrkOTDZnWomcXcH8 z4;Vyn@}?PwvSjIC?H{?zPd}ao`&y<#S~7J}u0_=e5$+kS{_YaKHV}{79)u&6{;IFK zIl@J!-QgBH$*bXf+DGOD;-Uc0=&)f^_fc&S64etIXck*!wkh|p5KfLVHXw7Qq7NAm zY4QQxG#=mI?XG&=Tdo_&x$0Qp898G6^Laqu|K7Rkk`=b?{CP4&>~VEgZ)qS{)5qFX zl%H@pb8i6ynO@ACg(|NNg+9ncq`f_`OmuY3%_ z{;+0vh_(6)Wf`k1j(;YvEYX!y3)gUe1lWV5^?Q1YWDk5o9}`imkd5TiGJS*Wop>t( zQZBT?OEy{5i-_Z=#j0OzcuxDw<#~BC|7?4aJh}C~ydeuezvPhsYTfQPlYjAQcmC>X z_8bjJ^WT4p_T}x?#xF~lqdd674DMPy(a<)9Dt(w_3(1pawK!kP=<;mmYC~IfzDo06 z8()bpJ!i^wn#NqIl+sKJ=i54)PDcEPY#={zN$=EH{Aa@+BPosj6i2s>ti{t1&ZpuZ zY)H|9nUx6D&koy0COJRLEQ2CJ4JdBo6S4Jk6f=Hzc>d1j&M@>RzrR1|d}Wbd7xD`+ zP$oQoR{){G{@;y!Xvdz5EI4oU?65Wl?Ik91QpY*0a$X|5Xhle4o;YyD((tV}p(jo70iG zBT&-K940y#-9)tdTZh%xCU4{*$qmEiL^r*|v8K(4x4K+@V{OIjwG|t3_!X<(6<#o( zwrZEtP$%a;WY@E~|e(zw#qCVy#o`EIqgE8YTkIEw-}!@aGI|LS~=R4PLpW)yzP} z*#_s3Hm=p=kSVMQu!Jw0ix#-G<-k+n5Mo z8`w```d4K?yy$bt9oHlAEO`9c>XIfu1pYaHuJ7=wdpqwARi3o-yOr& zj4%GO%{cMJ(#261U7?{`^EI6XvzmdsQ$j3LuARv5=dY}Ca$@8-Z&zdX9p~t%ybZAMNbUaNf#FODwhN(dV#dYZ&7GnNEbc!V|O|=c&>W{;%vbs2`f0{y+X>!6XUlPI8ni8$A6S?-H$@|RV z60am)k!k{A@xfuu)G$6VxebW|xuX?6mkHN6f@SxyKw2>|L?X~U|0H}WL{zFY7tnYK zKzQCs_?aq*M*ROa@|Hye9M|o$1jbsI# zJEE_OM91+SjiP(E0abB`Ccoivw z2%4DHWA^Lj+Jvq6$NnITimr?tZi7q;KXj;>6KmzH!5@563lbk5` z=mP$~OM=i{<@y7dU4NY9IoP@)vp);n^#UzH;H%U|0Bp->ZJaam^gz_QoKPMOw@6gv z!WTIp(}{4)2|!xCsAK{=n098dwNJNIM^X-cn(Y}`(YFk3MqMo@@F#s1n34NY20n(- znw^7&K{}&nw#Q6+4fhHKyzHD&eoGeB=v6@CXhlu3uN;YRX!h4v>Uwk|5)>#GVy4GIqH`2*uzW{4@>glC5srr7Glld1mcErNZgvjd zED9G))%(00NlyIP-cd9py^BpiXob_y<}=@c{9DpW`R+jf`R`sJ!@)i@ zRkq`ezKZ-qnj0{q6>XLQq_6C>wi)HrYX6ZdkrB6>d0f$=5(V?8-6Y9gFIm5cFZS%U zziM3f4yUhXkiX+AOfuzra3 z{uo2osrM)Jn>_z4YS3qk#*xe>KRc|9;uwE7dL1?oI4yimS_@*kJGB=y6fh3(I%l6a zSP)LSv)t^_$C3s2u?fwMJDikUOYF!R+melL+U0H9IVm~piiwF^cW{&O=y%99A#vW8 ziHTvR;MWdbQ(LhvRs+sCORBK zbl_OEX_-;viJb0G8#+icqiKfos#`d*fru3iz3!xxw%UqznuGq;R%ek0SvaQtIXESaCv=RuGV zCuCdg_5?5VCftEQMeh-egE+vr&Oc~1bfSA_38b4)?1u_eM$8vT+G;gA|0Q9T5cxre zTe<{-L~9xKrKL$Ikr+Rf1p&EVD=vr`K|T)p*X%9a#B;gM!msnf%XV@*@mZ8`-!Ng7 zi$zq1TLeCqAsL}-W&Z0csWn;f&h;q5X7arB6~t&Xojm_5FjJC#QA!d(%=0Tr%67>g zr@&d?9zw0^wqiawL!Xz59l8TDzsjnYyB6b z`wQuAP(J9myi(b$#XfB1TWvv3L%yNB|HYU2bGgeoTYmc`$_a_mmt?=( z$bpKDnb(CJDO&Zw=RN#tJ+;}d`5?bw2vQsJmCu-1{6ky1-lkoX5=AVaHPKBwTqjk9 zuc}%;#xS?ftN6&!)YOuF(TY7;FYwMz&=3QAfP7nm??MuoqB`L%+%NG|mDS)uR?cb5 zvF!>g>LQ*HH*hC5xo$1557LEWsKznu|OY--K+tQJ_y z<)G0`J8LUACwf>&SfzdiByL5&_Lfj{f!CqRF@%LM_r4Go=%f7t*t1 zRuU+kpIq5>t}^LzZ33lLx?3uSMy#=Tg9*Fyf}8bC?|BtlfQpdCvWLfPIl5(0u}^Ms z&par5d|Omj8i#Vzun(>aFZu~cN8LC%@PMJsK`wWyv^lzPkDK0Tcyo`c`v~shQt8=yAf0bppt|XuYZvAcVP%fk?|3elj6$}?85a9{dXOLd+ zfB2H6|2W*E{G&m7oxj1Q-_tXFZjkQzPm}JS)y|)Bbk*O+&miP`bntVOQ zbz|}msH#k**9uG&P!8zVDAy7HB+{WOoWBav54iN@8r4;8{P|YU5-l*W;uUvE4iS$C zE{;qV-(jEf`P4dWJ;kkmzcVm=W;ykrqWZ@N^>20c&vjID`rV`tRQee~x+S6pry5K9qvP8ce`?|sb;&P#9fihNDMtOE$y~5ha&(+Z$bQB;n#Ocvp>-}xk``%dm)mk_ic%c@hB#+c~OeEFN53DNBOaDrIF}LE=Kh|rq z;LIx-RS6~h#&>#Ctj&M&tHvY!8*oF3#dotm#zQxS!Jlv;85n+{bUpl29>u#QD{LR* z*Xa*6;8dVLpq=TCm;Twg*W~QwG$7#%x6H&XxTY|cEGmmGlQtt8b!})vG_(i9?@WT& zbD>~t1`NXeVw&UW4LWj$l<1vB^teL+YN*EFH?YWoU<8it#gV5bDs(Grp2nCfai8VdP286~ zt$inF;m#{j$Y8gsq(rkF5;~Nr_TT%E7q+d4y@`z%R*y#~d$@)5qPW@ui{B?Y5BY;8 z1zrkVF~@!)eG3d72APkn0{^-9sm}@~eq#(GXp&BaK6X|MPP|s{pT#cV0>?qn42fk_)j2e`YT=e z^h7cl1vzI&*Up>2P+k)fL)!hjc=Pq+Q-P3cppEFv5 z`zOolml@>df-He`vRP~dT7j6% z!-1_6+Xpt`0kP1k6hUJO8!mV55s-pG0y7&_o4xQeYsbgg)QGP$E2Y2jc8x!n^+Yi1 z@LkQiwXVfKf<_za1}yoOSF~xhw>`$9}s~5@R~lJ5T95hhsnTw!)tzQtL;fE%@d|ImEM8_N;TjkvH!=FrrGyM#75j4&S7=}}wox<`wRpe#9qAi1x zXq>IEgG6^lADxuEuq+z-sK#8{&@!W)FFQSMO|;^bXvNxSvgjNX*4prx4nh<~!&G+`jXYu>N|<5tm7Ox6DJD-&v5S5|!|&A|YsvVIF{`vP^Tx*~ zuPSL7UHv`$sKTvRumJqqCvjzG{CL(svAeD;gnp$j;5%TNijw>V>DBRIEIPu`&Wu{h8Mn=&&U1k?&WE{0BJ=6 zF-H!6?gnfZ|n+pTgosQHd zi(qgApSe|L;h}5UB;>MD8Dn2y93e;Cz2_e7}^1KtudMx-Si&eGB$UBoI5auq3t-&iOeQ^GbvwcmY0wEC6sf>khGP+@anEOE-q z+@2+au_`O9jsKYvwaIyZpmX=i+Zx-xRr?kpl*5aNCrm7`HH03lCKwjcI;-^g{#^hy zmU!`DB&%q0-Y|zB(dtetPS%q4{}Kn`Cj6@lfaadxRHJpq9j_Te1*r7ILOHEWyFi0xmLVEna%+hP@~VihYiY+h$#wvCymG4 z#*6}xgV6p=?j#S$qDEUEngJ^2duT>RTaXs>CFNw;wn%aG3-tRY!BH zXO)2V78gmIa>MshBgR3wE{0O_*VHq(a3qM{pB{iHy-la=tUw@jC6=0RBK#lerJ1f~ ztrz|ldzq}d%@-7Uhu@oH3x{hZzwGco;@6D02YsJMOEmkCrSJ5woJatr|3XuPOisQu z5ZaMXhCHi=998n~|Ba=eGnjO#sH7i$#-)oC`nQwr|C}vR|0P$`n>xz!1tp#PdY0fy zVJs02vjc#C##u*82aCIW5*+>g6H6oDU!P2WwSIE+H>wv{G5?y?XN&2oEuDTSnS){p zuMEmlR}Sgw7Z>Tu5ZU@qP_0SH{PJ212(=Ykp@99@h?2#0C-5*TM?d`n%*YKysbHVOT>R z_nLg5Qdcr{xRd(sP@h>0Gx_Iab!MfnWNLHCG zKn}{SiaEcw)S-idRpGz)luNZWaSf^d_kY2kr(7*-b*-w_gy}~^HXOY!%v;tE4L@pS zm_(lTv%)h%p4P{##$at!+C+|M!}On5?$1{^Zf3S5D_Q01p`0|BC#%oF6G)V(TBdRm zoaJgHs`pqC_`bXG54lYK@8^1na5O%F3}1qBv}B`_6iRrqrR-l>8T8g9C~Lsv+bM9S ztT1|vU+e$%LI3Fp5yn_*;|Qx4YWgB>ndkv8to1f!T6Tq`(Q&I&A({AU&n2d-_r`-& zv59oD1FrzzV~FliR8AHe^A(T=BGThOp$NgC0X6ZOzVp+1=;+VCs4?gxU{e&dhE7v5 zQiyWHU-dS;t!*b_S%HHeHk_oeOPhAHA%uKKz_mXwi%>o@SXY6tYrW|MuY#LMU)cDf zKBH1>#=;N@w*+dhJNLBdUCynQ1mXyLe$nI%CUb+w_lo_vdlsD6V)tKK~&-zQ4CWG3X<5>e2R(&+I2$~geb zlV3ajkWr3a0r(a>5Q|e3LR+|Hm=QV20g3v;X!xgiAB>S$^&Nm5i*J_WYR*~ptK=m% zb^nQp$(vB$QG~OW1<9_QN{fXyyZB)U#f9OPa@CzAhDZpXvC=nV;b(SZTX+-O0@9T; zFEo73vFkbk8wzB8MvSg&VA5XjK$ap8Ub`OBzfqvyk^TFr5F@m9d}7|FImGhH^$zp) zm*gvixxvtn`Nf~zkokYpZu&K;@rjaHA|V4AM-)nMYpAVWFSFS9^vT%NR#h+D9MKv7 z*uP+-%0NqH$?!TZDIr12E8>hKdF4>H-;R}#vDH~jer_+${Sy4+Ev3m07GFlbV7bMY zN^k$)*!MyVGFX50trY)X*r%^EX*#eu|F@(8`oQL~Gd$-^T(vsCj)>zzR{4M3&R`WM zXTAp6B|n5#5SSe7I}NEh>v3|WwZL9*G%|j>cD-(tojpivg*L z$zi=FCeP|6EenL|N7?boQXIdoh~Phiejd1jXJ+?^XvA10SrB&6#a) zg%xCiECeC0y)7@*Yv1CK>~&p6M4ifa>^;$VV2+;ZP@lVe)mb{u#SK_^T{e3c)?q}U zT2v0&t7FS$kocq7&qq-&aYz|n9|mxM0i4ViHev=5zA|Fmluf3njOr~r+T>UhMOA{j zdXo}ES4P>h;2#$D3CaA@=>AQ(PT++A_G22jrr{hxout>g&i&b%GR^#H#zRk;;zGtgr?9Qyy_5@#?3nv#zn znHuT#DE;$#= zTEniFX}Z?|+pdtTOAFHyusf7$2M0c*GJ&9h%r}sEGURNVn1c3Q6FGm}<0{JJrC<-_O|6=66*820)gLuniv4&i@lS&Rt;xJ)AeZsPFR}q$K@Utq?HZ7fr7SQcMKZq% z&?f6gCOOC~u5WqCuXlQ<4c%k)2mN1{DPcMJ2`aa07zfH|uBXAOdw+BnI+%X4oexK$ z#I&JDqr~20GIDki98n?K`930inY31Tw zfyydO@EkY)-CJHqhf$L7n77E=&cT+rFI+KA8R^HpUiLW|{u{xBfi!|K2Cacl=e%yO}9iWm(U<-3#kq z-+xAvjl)9Gg@11<%=|zG+xS1ZPAc%%-UJlsC;9ms1BbUo)L9Q);T-$END- zXE6H#ys$Ge^Xh2$cYC_fdB4}P3Uq!meLX$KfZiuMoDC)hyJ@A@(dLHEV=y&za^I|{ zG&MhoBKk2k6^F1)HNBNtqOvUK6I8PM{Am0+uk)Z?f?0HOyUq$XKLW-}1`Md}YNWf) zrhfQkXD}h5Khf#Vrjr6tNWw+x3ct`cK6zd!(?WGkr+|87MvX|eE_6`Xr)$CDH1 zo_j-_e?$^bo6?lL1mSN@`fNjx16ZzB5>#l`@j1Zp&eZa`J3x+jHvGM?OLn+ch7fj2 zL5*Y`oJhXY4-;Zad<4#-^00DFemH0n@v4_RU|KXtS9ZuhpTRZBd^)GurI@Nld+TrwCK35nN6BVzDs0TD5 zxh+}n)HIqnNljdqYr-Futv&N>*QmRmtp0O@@t5d&bi(DS8C2cXsJe&~LG9nKrFM3x ziU07s>;RCTw+{?@|0(rHa`h)X+Bn!tr%8w*ee8xJ*@gsve{QZZe@3pUpXUak{)ZR+ zs!Kohgb($a!>MDIIJ3EjJTpUB#a$?gfpDz(5f2l-pO4dgr&8Sm~ta(~34FMk*V7U5k6E(venAtLm9_l2Aa+gQL@hty3ij z&9LLa&4A9-JG;Ipb80M6`eAKot!+^6>^CV{G{7t^yZd;Zt7=DVz#Zw{KSgnaF~^H{ z`{hpP^@)vm75RCebI3TJHwh$5zkBlFD_=PEmSxfSMsGixX1n`)H*Y-k_r2&Tg7SKA zw4TE)H=y`Rv@65v5>1w3a%PVtmN*gnd*Vb{*>9|^9$(V5H(EWjtm*w}sc7{z!mGZE4tmx>eh)=4&Q77W$&FGb*rUh^CpqvpefrXN@@?~7W zRfx#9NddXM#bpMjVKV^J-%1^RX;BIwovRrCNB5lRB`(|p^I*~rz`tExM-Mu^9Hqf3 z+j0k$Jswn6o~ulCUD&;@j3jl2Mc4kP=}rI6Fhk_i`u4n`8~aO#&jCJM$nz-F<5W?P zUvNYFkpit&DD&4y6i;FgSNV(jcPEJ&k>M2(RTdqCu1fbZE4*9(|4qdKjVVv15(WKk zzax)nEQzP@18cg}z|=-9{_^n0oh<26j3T`o#zO3uvC<)qIC;a7urCDZ{uT6rV&YT&Wg;jd zFL$eok-PyFX@U0FT~WUimj_~>JWBlF^cQ>3{2X!P!Hw@n-NuJbrLNrT+Q%*OfiYmx zjUq)Q$Z7r=Y~DGD*&Tod>~>)j{)(PF#0UPYJv&(d9;zwXxA?tl-TUH|_bk`?rShL?EiDTrv+zOis$xOJ*T z>O`uI&#~(ZZfqp5U3ULZcgZY}x!d~$hsUJ_zy={n&J*!1{;mIFbtDExzbW1G*8r+uf_i@U<3v#a>x4E?IQNnBi+<)otO$e)e#p{q-&tV%ffe);Bm=0k0mQaJ z%K!JG9KqJsWapTE;8oCC=-Pf&ZXzsxW(hfy#SdD=qKE8mE(uPl>LFk$@nzW{;I6_qRelh2mD5C1EEkEAQ$=cwYNNJ;wito0<2 zuZ9G33z}cU|Cbc{3x5TGO!D#XqCc6f$3PAH6+z!Ik6@WKXwCk75779hGbH5=HsD5n$`a82_5BZMzHlVJe*q8fu8s%VI4tA6 zc=ipyn{&e{vmTQ_|L?a~KKs~)dwRd;jo1>6w?Qr>nC$0uy*wg!o*IkCRmxTHyj7t6 zloz|o$=}iny{kfP-iQO<554_|gMwO1KWS%2dksXq_lUNYhRmO)U1aH}4y*?A6KNT} z5?+eO+COYnidVNB>=XV@u~v+*TN4N>5+6Y!gGU}Cgl$#J=myo%bYcr(l7Pv*eVdmJ z)=luGS~UGl}nnlz3=kue`K2IlaUq zZsq>QWnX65rI3E;Uw*K6UV1B;f+oV!57o}x+W3cRrNL{lR`7}VBAto3R%#QGNNv0q zg;XW_WhlJhc#>-q(W=@+T%C<3zQWGTeL>6CWaq2Vt}y8nuxoaY(nswvk)oMg}QbAP8?uN<{Fc@Pv;QF(msK|zpLjO30=pC z`pvS47#G&xc)R)EoRObrc*ObS@R@;Qjz5;K248;�o~f?Dz^cBan-#PlShA{>BJ5_#<&2&WGE+V9M z7Zh8#LpY!l#Cp@A;diG(++9XzIulr!t)vT2S)G>Z*8H8Wis@bNdm?=kUL zg%kDZYoRd`Ae<*CE390WScDR*`7r2x1*%Dx5B9S`&B%qSxyow(jO0;mR(0TNqejhs z+4#W1eRtzhgq$h`<~VM0zoIfzq08Hcde^Vg{dXmK4Gr23tE+7C^Xy_j`U;jIO zj*S~8_NG5P7J@kZ_X!H#e<#g`#EZpgZa|GB`Pgmb71y`Aq4@<(@%Fv1zp_U!S`p3I9Pw>Dq1DZK!tO-)rRwI zW|~6#%UqE1e|gJMgSTW4uuDNqTm#oVZWEae;>~Qifo7F=Cbw!O`uZa^&0V%I-^&k%1bs70UDt4qdSXItm)#4;-e{gv zjT^Q0@`!tom2a!P@^Gs=1ddUNP`>w z@MXpa81lcNyIA1GT`AoP;VQt8dR%ve>|itBA!HPU3?u`rs{3IeKX%ypzb_kzBDr$0 zggvq~-aERqtFLXe2?4{{nQb%1W;1OfsapZgFV5@}4UMnDVK#J4)ykC&i}BT>_($W{ zRIc^*Q|>I+$!W+s&d_c!EF1NXjV1vl9yVZ7h4Mvm+|>0K$g750)K%a z4KZc`O_reewY|Dikj(!H^v8=R9JEn~{BsksOStu)YM>)h&QIQ|$Vlw+pmTW9apW~0 ztBzifwa&!Ao5{}dieMF#@h%iz^oB~B8`dd5!ZiU>D3e8y-YdLFwo*HCnZ5X{<)u2; z&8c#MD^ZIQ_>ma~I{V9HaY2R6bcP%_PfX5QLUQsMq2wAdc!lIyjn@U_OYlm(`-BFnM}zIiZ@0Exe}Z0Z@UQ+XGJwG9*XjuX{9Agn9uNu)=vs50y{*WIzwrPw% zaThbx8Jamhh7zO3*&p{hU+W>p;V52WRb&-5f9Zcx1>&4yK&V#)Tj!X!#_-jYld+MF zK7zKU{M&%_QTJbj7hMR*0{p74)qPeqopzy>o>xVxL$T`zPP$RSl5GbvR=weF%%Al{ zz|V_SZwt44NK^6}HSqWILBrQag2y0Om2h4s z^gzUsPPYO9tFWD_iH}_+{xrRT2h$mxJ=7(ZXn{%uji>bV$pR>TL;f^~JaRBQiT?Ie zbJ&$4RIkIX|H!c-$A8|Yy#T%=wJKOAS^^A|(Q=#!LN#K$Q}F%WJ_m6Y@<(~3h$jpF zHU%rl-havwQE>-b4)v06r%7^>>8iKgGa`I-8$OX|w1=-=qu+IV!(Ss>A`EM9xW3ok zaQ|LjXnbMD>sAVTLDQ+(Vm-5lf4$DK58lgprWH46=71qJg{TRKY3_Yfx9ZicA)Ho* zd5!E&#*UDSOj7h`Vi_^$XjW`_v!)RbO*k?c(cJ5f;Bcu#-lx*v)Z(&FO-D0{Z*``_ zwqs>9`h96IH{94E`Q^Log&qh+gV}UnBkW6_|Nf3k^Zc{*y(Nh7-|`m9!dZOrAO2ZZ zS@4tp3u4u|Kh*dHZ<#zL`v%&{B1eZr|~?^Gfy>od)J6qS*#hXnz{|Ar+-k%WVCq{lyQdpysRGRW>$Z z3ude?a2p$cSdG=RaN>92R90g48aH>oEPIt(daM?y@ry*S&h<7fS6TW@rMLmorDKcP zm5MDD6bj~cv^RIaQ);vjdgNRjj70>uy zg_A!ghh~Wh;!GqI4bLO{i#3~PoC423kv42+M)`mxdnTGq=5?MUXH{J+uMehxBUoO+ zp-r@fH2{Nc@jg8RpAz;5);654390uIaoH(bMlWI6>589rHcBnB`cMotQvg+w)HCfX zxWDoB;FmvYh`3JkW5EDQ{C;=RPwqvV1^S-CSO3oWd;pv!{;%)h^^Cdv8S65=^BkG} zO_`qkKUo|KwaUF5b&a@Sls@4F0Voq#!2~{f&rGh!syU(*KFp zoBO#xpPS>3g&17~-|TDY>K+UB2>NgMQxo7%$uUmutPekgd*r(CL+v9sG_frm>~=rb zk3%9C(pUe`k9%OQzk6TzZe@OaM1x1XI{G2Adae&$Ud_q#`r(1WU7UMno)TjN7`(lp zmIYhD7u)w_daOeYg!L%8faC!r&%RJEM9z?W7^_}8>qLxyvzR6D;6P@O%J$|@Ltkf~ z*gLzoC3X1;T>R&=UGNmG?;$;h*Ts7MP>+F%hB$_su7t*BhiUL_2*y?>(p9MiUM2Gb zgO-9(Qo^rX_d&aDcG;Kz?q_7jHVW6PW%T~5Kqav!5H~Z>AI{yY_H0NcZA#}}E-A?-`uTxUH-XxyU+HjS2dg~kJa{fb#Qi2SEO}aQ^3|fXWM6OkUiMpAN$e4 z1Dis7{O9EOpxqAQ|M1w|5!d0@I(nGHFULM`rZ)S=VO9rhp{YXfF2~BCBXhR)6aBnI z$oVe1#wISBt+UnQWdDpChtpv|g1j+(eug|ApZq_WCQawgvhD zBApHZBqjnGY~s}i!;2up$S}C}zn33P%o~YPhdL3f?udjR>_Ea&e)|NSrdTX-g5Yp! zxaBSMSBwOB>T|xhw7J+V~CC%*sS;Tdh>UM~W@JTB(VP2<%=G9o}9WebeU|mgNd08z!c^x8jc+hG7&e1m;#u{e~EM^ zQpI|ci4~gZc6}lNUV%eI$UnzpNzqm;o{u&DNF1?~=__?Iowx~Kli^oN_qXu0Pz%H5 zToT{wzisb({cXYb{`P&nvh=aXgW`|NN%g;|KO^+#N&d(Y>K%TKzUuyG_^v-dsVC@9 z-{4D#U;N{46K)i&R(w_48X7-KlVR2e@W)@?Xz7(N!jr*7PLUtqQ6UrU#m6_X{jN8IJlK?h(39da6&#hiN6a5qpuIm8pR^b71ak55U^}?}N zSr}Zd87zull>trT@BStDXQb2xFkh!eXa2SOK+=~wM&RiGY!Ez0{~aO6ovW3q+RFVq zq19Xp%sLt>JWmthniGO4^%NwlFVVd%{)2y$b;Q1q2>uO~a##ibS{GiN20Y|>D)X<- zY%xrs)KcY`YW0TMFJRHonB+lIeXwbUI7ywi;XQ0t8;-vT<)L$#t-J6T!m-%#iURct0KlqnCKo~G3GGPsby`E&zIz5G2 z^YGRe{Yd7o#~)M|jmrIRaiOiM6#7GMhSn?8FO|?Mo$KY&O{c$w7ai_~UsS-QkTw(L zsTAUCg%0Jm?;cJ9E=bi|B+9Z;{88p}hy;cU*N0)^0hm`5tvO1A0^O;0B8B4YG= zS-`rf&hh+fEwF5nF4)0 zM`wukrBd+k#TS@nzbD*&Wl84>4j%&CreT723F+?E%n@rAPk^};{D%kh`@Sty(y?xc zm-XLZ)Pk1pq%y~jvox28JEZ@28ge90y61N z{z)_Vz``o=YaQKR^jZK-<_F!0fKKO;;Qty3`fFWj^F($%RP1iz5$@(n?kk7d*B z*8Qyh#AHjUi*J)<3&*7V!PZsnzpp_wscyfwl&8CKY%QIDN&enoOw-_h*vi45JJR&H zSlxj@xxobUd#j~Cyp(hVPxxPuUggq9X<(Z{GXFzY)1iUj#{hD=^eNwAblrR0^Erzd zM_Oi6ri-wsA8Gw8?BU7~;m-1A4#|)o1_tLSSWj;u`pf^~)=Fg}wVE&SO)myMi;C)8 zksLZ}xQ?szZYc3?Ui&bdhETR{KC`ZBhmOn7W4V<28*a3b&h!IF1mgq$nc7l+<~JGR zUzt3=&<%80cA)<3`#*7vUE`Zrg@tcDa1r|`OW7_Ky)h)bMkpu;EM=Iz;aCzW}vL#S+E(^+I?c5yn-1s-e1a zFEpbv7V-$_QgnhB@KocHpi|n9(@U!!@u6<^*&{yzQpV6=)m>Y%gvf2B_Ui$ zyaLm*H@nI`8P!;3dWB3%LgnR}Sj(3VY@W;E!L$WmPRpi=g9q-1C-b z);}#uHvtd*b{xO^g5c=wEi`ZZjH#$!g-_-+cTHp9s?JX)Rt&n zuloGdTTTc;Vpw6qfRiq&*UsC5*MV|a^nSNl)5-BA=)1JLIr3Cv5Uc|6R7Rv6v2eJrYJAM5r|&&7MyzH zkGJR5w1znS-(I8emYknTmnc9WRA^lIw+|NP<+CM7G)r~Ds6`D|1)$W_CYzNswslie z{+_Xg*ztdR>2)>x+Cq&u;?uQQqPZSm#d=@LexyA*IOay0{yqHPc0epaH-6nL5P0L` ztt&MX+OB~v!}1s)jzs>QLyo6f%iw`&I)H&>U)AwSqy z+qO)F{lE3Lqw)St?iW9vNS=QeFE05ho&r|;ayIr&dhW5jjLYGf=%VAnxLxw-KwIR7c=?Pvvg&)Wj992XS1^V z4~2>1XGh}Z)38d+>rbyvewT=lrEhGF z!`!s5J+rZJ{=qblO@*z!Q)!t?M{I04I6L<_zqNPfOURYX7Qx*Rs^gD5K_-5Y%GT&x z3C|A%-wNp=$m9H2_ArhKoFE})!U*&CT5Z8yOq8$0NYTP4fhp3Fl2P|=m;vVX9Zs0~tl$JTe%Rj#^>eyKuaK6b=)i^xEAy2PpjQmd>0 z0(5*&ddMe;c!-p?3!^R`Po-3MVk%`a9$p1|N9o*2d%vDI)wOz@{c=`z|G3;sPS9}` zcr8nNvgQS$PjnoS%cco;mwtafnqf$47pk~<^xg~vQ=7-kkGi*aP9K)wf9+LGX0>z-Kn>0W@p z-PzB^wsJZA9~dv|x8`|>RGu>QlR-3?pN&leIG;wC;ZWm$%fIufv^G#$*EH2@^Lk=g zjZX4#X^MzV=HiyaGn z@=2e8v^b zVU_BN2oXFgtJ774mop`HR1^Y|@^`=C$U+3N5DEX5z(TDXo7j9=q6I0v)$^)fZMuX7 zuO8;HyMIoOw%;EI%H}56dM{517mtyJrP1ksbkMP%NV-2MTAYq3Rr=L$8m-|xf-u=G zKD=CN*ST315x##FwKUJUKqLjz_fp=dTI|EBemGlo<{E7hjN#aoqlS{7w*NUxjWG!A#3>La#mVP8|3 zH6v^`+M^H$CqN}iiGL?L#EAs7&h$6vEwlT2g4rF$7|i~*d;yagx9ZD{u$`~!PYnbozG=32jv1{mH2nf7KhnY z7O|~q=+e3{3rpwC6(G=mJQPUlphu0dc;gcZj2NC+{EA?m%lr+07#r@72W4y74u+tA zo(%f&3z5NQvP;trYM#(A*SmRJ8DnIDoCn7tF?sKFMCdX^UMVy!qhr-*5RF6F04$sC z_`_T_UkUlFFLP?K^Wx8PF^+U)bJR$xW%S8bVm`s{%T$Fwf&9@pPAV^0SEBK`g=u;E zfZ28o(s0OMxFrBu`hSjZ$N6u0D`ny5M=w6Y13QsFEM9QO->2nqLGigM&zOK_QcdOi zcR~DnC}4?#OUdi^g4jFIk!`3y;3{5|1@BH^!41_w7JSY0wyyrqaSlQD*R#L%_IJ8^I})lK>Flgv_GE1p~jN=)ShWx*XEP0Net50e}*(qK$NiP)jM9?SU6 ziirp`NFVf z#{InWRah~i?IZ7(nGE z+1uZH;V}S7whI~6*)0eD**Ku(z)RtUy_gdb$r-ST43fBex@U3EUQji-rkpr*Vm{fP z$@h{IL*Dg!w8fYF2Iw8^PZHQ8-0~Rjg27bKWQD<>ejRjfM z%xlfkzd!$(4=9Y>$3OP8!wbt*92fB6iGoV3aBhDg=ow6vjiR-PDq|!(Om*eMt-9A4 z^LZ>;ys+wyyqd+;cG8x%K5jbx_p_-v!v=07j}M(qpHW}#cg=tY^>SHKKE@UC3qG<_v17QU&ju(kcN<=s@cE`&)#K3= zAO>ws_y2l1kV+O@dmoGcb99vZyROkJu@e0Uu7)}kP3F((S6HrRO+TQVW>< zMOV=;bzzdl204Sa9H+MRJ!E5+(tZoR*0nH$Cnw)`(64~LP6cIWJTDv(LPcyX?SmrO z60OVRGt?PzM`t2AI%CupoGqiCqv?#lrTQ3414*s#w3HUclZx=Aqg7fh{(W^e_!-YK zAakER$Z$1DZt%wo%Krg?{dSk}?>f0>AgsERkyA&Z|8E5SrAQ(E0iUGU^%%btlusD{ z^ZPnAsJxKC;RyEsBNR4_JT?@L6Ab>V2MiWXG8laP2t%u+asMBTx(f!!!Giq9nfPhg zuU*zt8_D`vWj$j~00qathpcy?NS}C2Djkfvta)ik0i|=B6@amFRkOZ@yn-@*JWYC* z9QtFA981eQj(s$Z*Gj1L3hIw4aNdz9Ut+YWFaDPgA$Db8Wc5n4GNkRqY0p0G}vJMDi&=~D^f+ifC-{_32Xu^>+0fNX{**t zFKR1l5yaXA6;P=H)+_y5FI6YTdaDHlHUIbL%=2u*5~{yf|GYH&JTvpmoH^&rnKNh3 zoH;(urbRGI5<}6JyUc>6@nmnS8i+W@r;|4&dMb3M=fVbcijNh3Voincjj+Z?prUs! zDwdKhby1RrR54_a<=M2pxJ-PS42_l=%qSz4V7?%Qfxd(sH)&rY{+xAG4k=-c zvd9&E=sygC%6*lDH^;w1n+T8YzUNVfwOkJT_vX{gw(#{-!~cK4x8ca|2%oW(fZJKX zh5MuO58v#S(;|@Jfqc3l>p-1im&cG1IP%+bptlQMj44LN9*1rZM1G|15Efd|b8ZTS zc-dKbkyYIlM?2z1-Ji26k!&p?$H-x|s`?V)D8I}43xoRevh}YW-%Lt2knN!gR`2eMRSfTkw;SHxesXj$cJkd{@Ktg7Pw?lw+{&~= z8BeLfc$^WLf3}@pcl`jnZQ1q}2Pt9AAzwa_OMhS0w|(zNM{#_aW`mJNMolj1f-8Q@ zCR@ex_5B(uhYXT!0i$ujs&9a)HxI|@0oxP%Lzs;iNuqErF<(C{2W^0a4c3Dcz+VoD z{HQEdEfQ@25soGC+YPz3z!F1k*Ie?e+|OQNH>j%o{0Ag$tNs_)Y^%o)x^I0%Y!7$g1Civv|#I1*v#=vw&q_Ei!YmAKc((x7oUH4ydu92Md33U zX$em_x=ar{CzgFGcFCR(0So8^@(*+yCi`LU-`yHsGeRYu#CmUh>mGYL5DnHBp|ZHM z|Kg0H0qrRceS_9;9XZ2IX#SSfM1OGg)z><+zP zoBp%@c&c;XZJ)jqcwT<}2lyUs4dKK4sw?;m+->12?F`@YKZNl05q#H7%z>}pe+S>x zlCsFMy);UuBlW)>e{AnX=d;ew;;&y8zG)=8k1&CC;vddGtoX0+w|3tUzV$5cyMQkT ze}c~~?F8Sf--q~HN3yHQg>OU4e+6Gz7QQ39hA)TzxUnG`?+o9D2SfOV2)+k?nuEXL z{~dgpeM0|5y0?bU+B-=QwqD0bdUOa4U%B zcZP4#10nuCTLOHO&&$Ez)<6GO@YQDFJGE>0a`;d1O#+bY_}F@X2;W%2x8~d&_{RTt z@IBN&#NWg0GJZS$FtbKi|FIK%)B=acKRwJt9zsqe6xNV;%^)TJvu+Vk;h$i^w~Ou>q|@c=>o~`E zDx?_jbG@^z?S`vC_cFe_ZI5&&Oz>>_M@3e@Nk=b=u(~pHY#trPG4sMBRr9l|d9-Ri zSMS6zedoti%>1XCcf3B?u6`#+bJHj9GRO<|&Z&R5p#C|&{(j$He{AjwP!ZxNuq0*U zBQ0zuGuD*muhgqjsY->eJ4;X8E>QJY@Qp1^RPNRvom_qvD5hdaD}Ap%8sDB(Z}mrYzkjkvoetN9&Ars@57fnl8^C7$mtW3P0Z#VmTYx0qIb z2`)rNH!Lfb+`*3r3~Tn^?`Hpgf;zG^#NFlUA4{d)AL>1^-?r3yRPRu4(AhiL`k8e? zxB7i4cy~_0MM1%>X8}$+n#{l!2Mg2%NGL_P<%~tD;L4n@>VvPI;VXlmlHKe7Ov&?d zN{$aoE~2DYs9f2ypy#azW~GQwG>tq*~Vg zY3y1*a-z0$0t>Ia1Lc*GgsT)tA<6lZRaLTpK*|xNu9)rITm~yN+w0#ph79&siGdxA z|Jd9QJJz=t*MD4=X-~+zM~_5{T&so>S51oGdX=cGF2nJ>jzd8f#Dr5+vdJ7kqAOd1 zr~}g4R_*t5{Be@^|2r<8x@rRcv@dQ>RNh~#r7s6-YhK)J>%ZmPdy0PoOa2!(TaPd= zr%s=$b7^Bakvd7I1e;SSfn(8oZqO4Bh+Tz`Bp4vkxzf=Oj;xNjcW^wyzqEQ~gcFF( zP+}am!!*MhzIkmkgMMm!-G}(kjGYuquQ7OwE;xckFBkSUYCI`!MWS-+o`ScZJI%o4 z{^o)a6p|x_;zaRVSd)w%q3>3WC=vWMsp_dP&?E)jN>xu7UGdr_-xsIGiGgeWDTETa z;Kc9Yzo0!u*tM49VN(;8*IPq{?)jG_cxY(EgfU4i#Yap~R7JnYJ9AQ^@@Fa-Yg9;T zt?w3Dd0g;Dy|!5&v<@IcB)R@@YzXXcE)`KM>I<}LoyaQ9t@VonYaQ-yo` zNW)6Q@Q6-45ot0vXpJJ%RE{ZPC&xmUbw$EPAJ@x1 zKrGa(-R!?OYwhOhl)1}s%rR4)v`|#IBX-5YzN?&I9i5uS#d(*(q@NI?;Hr9_Y525F zjG+sxB|?6jwP|xr@(nkW-BQg}t43`KhR_Ric&eAbM7NnEb{?IaHc8X?phgT(2Urce zn|*+f$7rJSQmlr#tgV6QUad3XBbi6wPt0Z+vDER0GRjYS*pKpfBTJW}p(G_LU+zoa z{p4BOHpqQH04Q~4iAAn~JheO^y@qwCyqAzj$K-&2A969$KOKFKU-!@-wJ+_Yi;4A% zBdDPqSWE|zR?MWC`nd%>bssTOOYK|wb}jbVKkz7o%E~N z+_4?bSN-tHI6c12Pga)aeGsfm=A?bt_vtFU`_PW!v{v`@m7i+peypZ}KQ_$-6^=|c1G?^~A>)r2} z04rnA-0A+?v+w8I-um{LW3;_7il;?nxuf-G+7SL+qml^=f}_8|`Cj+$ zKZID=g?@Cx0igflC{(w7sI@CE_pZ`9eID4qt2_9Y`L0fjJv(5E-kV0@N>u)}@PCc? z1%L6PC^Z!gS;Vg`ZJ2p3)75>npQwB=DE9su72AbFUS;0m9Y@d5K1=HM3i8CST4U{(V3*O(KjvS)J3lJnZ>*Vh8>)KXY_hNUdiBAu_t+#FALk zQ1Zn}v8Dnw$3%>tCg3gp6Z@rxY{nyqk|c?9VvhC-@sBn|nAVl5Gk2^ZuZ}G_5(n`5 zA&JVbb`=H)m{MEo`dB?oGgY-he%m`%`UZKFN*$TwB< z2?S9)j@qBwyd21ci4tuotabZ;A(5h;oy%;UX$vtsk+xO~GQn{N!=;Q1Wb*58YPdWv zQoowx47ZTg=Uu<*o{LylTSPV$qiqkA% zO*UaM2^lX`?t360O{R5LE=4+zj=(?q!a%l^A(HW0Ow25g)EC&ZZCYz}kuI37?jwa6 zK%eQCo=7S%yI4S3gfqy9C)tf&ox3Be3sqvE&Xy^f0ef_UZ`<*{Ep%%vep(R!l#Nm^ z1jG2J)6rp1DXmGJ$~BQU4!RTgm@4Xe5O_XcTKBUxL_xLTXxP4S(5>o081yV$Uw6bl zdji34@lR7fDrcQir4A7X74_Zi@qO)x`5F|dOtrQhUZ<3e{ayQyBA zL-3r;U>HF~c;K@B4@#VzEy0+!5>@e(4*snAIB$ z{E8yFo2ogua)~`p^fXR4_CmGI5Ei{*`o7hPx3BgWE>^$vc1^mv=cv3FMkhA+zv|`I zT0Uoi6uV{w57{zYVVJ?UyyS$vnok(mLh1P6O%2E6N2W8sZMz!(YIC)H#i(QpZFQre z%?;B_s}pZsU9BixpH;u~Ry;ke=LtGj{f!!W5wvvZ58S_U%~Bx>Zf6Z7%JByod4&93+_L4juQLlL?v880M^^m^rz*l)M%-fQv z{9!K$EDaPjr>)2}trRcvY=ZB{Oals_cuK9(aLi&J*iRo2AFt4zNm(m)dN325GA{Im zq37=Mj}TPp=p%2S?W70%{BC5)2PfkJdah4j$b zIA!VkBQil`Fq08G7Zen=&1qIfMTR@rF3@=r@Qg84;!qxqS4;J6+dzl zrm`8o3Gs9A;b0)DK6Kz`$>rc@Atl`!P~$$(HvxV&0RMl$kDuR1cC5dvn7yIXSbnDw zpH*>Ac5XNhkLa6L;%U7xp1Q0I6GNQyK5ctus?A@cCSCMwH;zl4z)41ov}o|p))BvX zG#l*b2VY*Yt9YkwJ)e~@rcqzM)LorlCMx&osX=2`1MKcjWuw)G!ORjVIaDG;Q+=;h z4gY{r`p0=N)4XC$vq7{L>xCKg1TUM#O|@ciR3FBzmi4T$qWw=eKH|<;*m2rS_lNuo6P0s&WCwF?Td`Zp7OM$uEk;ud1%uaQX4~wfcp_?1 z0;;`!67YLfBtnq(C|(NzyYPly!Z4d_>?yKjF=vyqsBDL79YaYcAKiysPI3L`05H)pf99OARe+*F_S zf0ytT1V_QI;(T0?UFydA>=Wy<+0y@)lkaZ!?Tx;JEL$H$YfRGob3F-?H0}#TPe#Dy zyVkn{xuW`87yak;aj3bNRxcx@6#A;fGMQcQr%28=`%5}{8m}fW)JwyHjaKj_B?gLj z(qHm((Z?*ZO7&?_pu=E?c&#~&sSbB}ZUr7lw;#Ru_WQ>IsHJB)%r#bZ#wZw451p`OarKioV*&F_Ufb2`&-1qx*r?Ca9Zim+W`B-MBK z>(fQKpL9kxOxqdRT|9gIH+0~?D#Sm7bp-eqkXSH&)Qd4%JtHGkYIo_UUe~9lEJf}%D&ev{zndRjZZSRS(*GnjSHm;FG`cG5pLn8j z@fKJiQ{T_+RRXrt(H0%+L^em8uBPJlum(9*09~n_) z?2(&cdR6h+Px<=AEwYjHl$maO4#2cdE3&0IeUWruU~l_+l=?>*HgH^@t0q> zCn|puMTB0xbE5K8JzUkx&zisg24H?0C)}%bJfVFkx9kq^e&Y_7Ii5cp5C-_4AO@H! zK|eFR=_r-a3?bwVr&^S=={>!kTEscUQ{(B%Gp;A*b86HkBz@f`3|brOj+W|&Fh)6d zXJVUxe!I-3R@smCw3`!%cOVQNTIe;#?(+Z*q7hX5SG7{0&w3@7t;)Up|W|M z8c)p^eKo{tc^0|#94Goqp*>^8#Xd8@TEF&EYU`oaPYPPE_4XH?6b8DwZ2rMPemtZiyp@4iwt$HA@8Pq_t>gvo?(Phgg+D$E~Cna~8 zzcI$;R~xn3@(F>IR~@^)IksfUessX<*E+p0`AW;j-4gBH>=1lx~{9kS6qe5%%Q;GM~sJ0Up7d_>y`>-9whBd9A}8T|=wdG6ecyG7KLasK}y4BXT^zHSd?&l8kU zWQX==r3_h_7L~b%`Ow}T;SKz{StI~dS>i37HJ>2kgiUZ+=fK;49!#eqQK3yIt?y@# zVrk0EwA*DG+iv029h_PymQ&dLUpx&zWbq)w|nqYLGC3#0JY<%bdYsg!$GZQ0y(ULCc73CXo1gEd;97>a7g7fTczLZ?cRG#DleNzm?ECx`F%Lr%A0Wv-Odf~)Y1SmMk6P2B`Lsb3+WNX$Yjh`jCB&jP zopwVzmQ-egv&xQ`#nV%Z6P533_N^=Aep&nIsG1bJNZPI})8>0R`t<3?(pbTtwrbT} zdE(`)j>qn;1Mxx*`pzWxE2N=tRb_TNcY|qYvBtS-N&qRiH34kxLN4v4cHhF;!`(@j?(B`Ob5Hjz)z4653SC!)Vl z&faY8!_Kf@TGX&b9Na&&tvE>Cx;^&Ti~<#LcagYbo*2nxkw)hF z3L%Xzz`Dsn*kdIdxW*nKU%(>b6xhQUDNwglCYRE+p|sfXo}-4ENIU;^&w0czxM|mhkUo4FG zr?PKx*AV*vQ;AXd+4NOyKZ&Qu(a)yM;x1g?Q5W5^M}y9?a>XEr|KYh@hH&09ehY>6 zDa4+h-H1HXp0v$uMGsC``KPA%GBcAH} ziUa_$0$O{`etUDf5Y@Yv1uS9S>d|o7`QE8rNadcOXEdk=l*^udUIlPt{+{)Gx5zTq zh`g+dC*MS1r3d_WdKvK#p3c+$f4^dt)7Ojn+8wIT($RZ;O>btXX)ls8p8CJPDMUv1 z02$gpb^d(SFGD48_D6o*?X9Bo{Y?@&n+Z5I`KbE1Vf2eF@%LL0Kd4bL!~tyf)B4VH z8mpo>3S`Y7Ho3Q%L6uhP4q&fOM&UcvF=LrS{9>>qb<$ZX63=KHA-P4O4!GkzdGipL(SJ~F=Ut&B^S;~>#cIdCI9mZ{rEJ+JZ=BwCQ(aPMk& zLd+wA3`~?dLNV_|@;(!@#T$_&9Mfzoi6{RaU-vF?CihFcOW*@o>vgZI$tIfjdDVg= zH895M8qW#3IQ<;L*4d>WoX|MCy7AL`MhF(|i~{53wZ24OtJl5NDG33}Q6b1@GbHEs z{D4Dzmd2Ce`tSOjHAicTdb0LcT8TCOnH0>oRIWyM6+*{tT1T@bz1k+QXx$T*DEM5c zrlqN}PC}Z-K@Ccwu|4%NKX@S<3&{bwt+G(vGg801)V_1pz1_J5v@KD}t-r||foi|w zq~+)MEYGB)i8EN>u_nU)INX@{tra#LDz1fzPdhqf;`uUIz6X42Hb1t6*VdzVD4wtQ zxWkH1ul!j&)td`OpRM~ub(NbDTR1MWQFYWX2EL{?c~!~^2(n7(->fGfOhe@(pG%@! zU`e7lHP@<)oln`Yi>DO07njegu5Bx!e8S3O2Oe3=46?)u8BpUxLkIhoi*TGJWCpSPasLZ=qJ)&b)YJ3QL9Vw(A0nie`nazuU%A;PHD&2Y#VHSj_;wVfAw# zt2@?s07dWvWS-v&La!=}*6r=_yEQgP+Hw6$N}TbR*01<7p5j)3^epgr6`*d#17(9! z&GM5%VspH0sanAPGu0es#KZy4xIdKn7T#}AQZVnv5-ap#=xaH^V{gFB*PDkE1}{@U&=K@I>pBNc@n3 zCVs$O!Gn5D6HsKrL4pYqa~NM8`<*94v?()l5gpbznBcS8vZG0Y0%~bE_N3{6V_1^5o zU1=|a1hMZ-HUNo3OLQ~s;4;^-%rB3(o4?RZ&5C6j!6xV>^!Jc0hgpeqmXLar2VKp-BzSOh5EA#f5P}U7QfI@sTp6Eb;Fk7e)Bzb zy%C6Hx{Nh`z^`yObKU!jl=uh#XnRY+Se-is@~ zVSi?o;<@d)TuoR=!hk=H*1GPs&$R3~LmT;f`m6pJ?QJy~==(XLYGY@#pI?(2dO(>$ z!U{Yh#S`0llytEtd&6{@$Q}9d`@?$G=JtAeFhcM*XInoQU9awvwKR1A7 z@+&8j-&5e96y#U={8_tN{xgq}Z)WL(gZ#k$Ii7s?3dnJvsU=XxU8@@HU4Y}Jcwp}S zLjVl$S^N6RjrEW`a()1bZ(r6&)mr5~V|yA%_f;f%LE`fl1^IEej(k@mNG|Zr{8QUy zWCLFr(vgMZm{5a!h6FD@-Q{Zo$=SM=5udBg`T0%zouU3E#Ma{OS*wR~xOLx5>Q;4l z_SQTatJ@`g!`CvnZOOazsO0NJ0_MdHLz*ivpQ#v~Br&$bQ-wbj~++ubP$=&+LZSXff z$S-vZ$#=ck&U6Q<32H3cF%T6UI2yLtOTknaff>l&RPTHrLK?KzXsB|U;E1AVKKMI5h1yx-GlQ!Zl`$x*-XRN5b4wUgK*aCpkhEZ@3mOH za_$r0F%NsGpJ8?dAA4WK^5Nx1RBs|0L?4~R+bZ4!aQ?~$l@Ijuj!HW{dH(o0g@TsQ zTcYzYFSrq$xS)T_-?V5KeMz3mn%W&gvb&E&WOiQ^JOT$KTB=YP*55;rjoDw8Fzhdt z;*Bc6{OpD^b@T0OpMkP|G~<4!VUUX6O>yOK5W}Ai!Q3=oa<^wFsY6A<_&yBOxtE*R z=hRYAh1B1-VRk%Bprl$8#)jf>L z!+SLwusb~_U#sfG=5}<4<`ZH|nn$;c`Hr7t&A{_#S&~dM{tVX$B`dBO>ndw>hYJ@ z+Q(vM4Z!Ym;oTeyL_9}aQ-eZ6@R5(06=&ll`0OBB>G)!F_KP2G_eK3c>4zSsWz)}} zIQjhZ>JE=r4oP0kxwSU`QicUD^J-~0#)v{7r{+3t0R-0WtL3Gv%5c`eee{h8;l>w6!NG| z#}3ccNGf{h-={<{i{X|cOSRcQsjv#o`fM4%VoMg98xxyF^KUaZCN_(9wY9?DZ?RUm zsgYGiLblbKR5U`$n7q|vbWh>t4N6&tXo4VV_3|>)_EP9jHPUaFA@L$n# z)i1foV+|?s+3gVqQEc8qa8Q$;XXSO1CY21_jG6vL7Tu%sxbLlH`ea=}n0$^4_C{kf z7`!%4p_+lOv99bfreYme3_iyY86}s@r(%th5G-=-!@&V01%VsqJRtPxkX=WY)0LBH zLQXn(>QEm4ibEw7kCw|ya`Q5Q=LUB{5!aU;bQ8%P#IW|ilYj0>;Vd9jx0_Ksi|{;; zZ(?jicA}~o&hL8@oS$>GU$y7AC$)VC`U^4Aq*w^F{Wd!TNPf;37SzuzBb`Z^OUYW* z`urTDWWJ<3_;bTm-+w+o8=L!Y2-d-0h`+3SvhlS_<15Q{ZW7imVySugplx~48`I9}P1aW}}n+K&;XrRG>osoSkyE^w(>J$-OIxxikHkuxG^xyV`NUSUJtvv=?} zxx&4J0-QTAUBT7cu|ezW+t2!0^7!~A`-b#K2wd`yV}gSm=Sa)&L|t+fMEGmRR~=7A z-_GA(^w;qFfgRt^_|M;$hwbAigHJ^C)LH~lcQDQTv4L5;tq+NFPbHBT{2w%t8(`Rs z^W9!lu7zX%j^azj=I+V33Hf7U?C|DTlk^+SKd}W5>8Wt_=dp3kWUa3M{^cx#*7Nw> zr3Y9-#pTEFyKcr1fQW~Xxse_Szwt0En|x?CnMxF=X>Y1$ZxHQH4;Yj8K18+Emt4Nj z=dr@*PeJ$Ru^)4rz~O6WM3sHmkmlBFC^} zE);QE8U}39-mJR#q~HnarOuX1+K9d0PVdfCa?!u7ro-1>+BW1_y+-!*uP z|K>n0IBn2vqk~^bpRP&{lAV5Uo7~uha4U(0VkpZz^|``K=exho+6g)kb~?ZH+0Mpr z?A=q-@#b86E^hg80TK z%%AJbr=n?XJw85=i`hr@#=i2BQ2s;fun#x4df88D+{^dzxQ}=-=4$Q4uSl@%`Suw{ zhxix&*oIUI7(M5$P|`f6=|>AFxXOKqxbv8mLYVKiTo#DLF5^37&p*cIKJ%UMxA7O- z;;)zR*YY7zxeSMgg2TjH9I_sE2ijC@$Q%DUqw6 ziKnXpb@NXz8L@Iy?|w6R{l;Dg^SXB#>z>f%Z}$wF{w|*xqHo9YzvKG{xBGt9cliFa zp#3T}TB|?k8mK=Ko25`CVn(@x%8AnL13q1$w6 z&<9hF+ESgykNO{*o5_;?BdgX_ok9UprW(pMg;%+?3doU;7X4qqcbY%kaK@*2uh@x0p8~>?E_w>*Ib+eeNA70sm~(^UeHGWwq`aS@Y7-&rS%X z;Z{2AUJ3G1chnH8h+1n&$CVnWSX)jkE?5h0ZePcwzi({GYHrZcd*+b6YC|Z%HU>6MW9L!= zBfFn&wJOGE@$`~l|2-93hyn<8;7e1$3>;KJqN`IkuH$Kkhd%sj>7(`N*cV)CI(8S% ze1DOT8Gu4@E%v0Zwk`wOT(-)-caop~VqOd9AAg8@CiFSTj-d!;6L9K@uWBgQT**JW zu*(e-BX-rns%X9*Zya{%%IL4w;=Ks=MvNN?rrAq>eR_^fSzXMuH~f^@RF9k&%n;Ui z9QEpTbaL)%s-9ja@YftnQt3+Rf~zrp8gFpyxxZR zx3&K55dXH=zrATjQY4+Z&eJ$gA_({r(K0!&~JM0sjT)8odE2B{> zQw>#W5t#svZ80<7ngMkT&&+G8h9O!yWJ7CTo>xW><4!}>cHe!R!f)yGwE|up7;(4! zj!m!1>s}zU{@{knCVLo2xgI}+N76;@r;d!YuT4eYC4AoB$GRXh6?WJcU&r{?hAkY+6?S(a?XNyp;m zSJCHR0gHR^e>Hxzs86o;>&r?9-(8tZ6`g3!)~{*3JS$mJ-CuWG-&c<%XXM0ZNHXkz z7Qg@d(iQ7l72A>~ie%ci*i#4WMLX%}>!ld%4-k-jeZooF#*lH-?c@`VxswFjm-t+c zPU+~aFOaa8O1$k8p6x~*B}!Q96Q;CKVowrmep~Jn8@0b-P5+LBjC=Y|K;+{&+%^^4 z=@-bWtWfCmVERR06APS)XuvBj>Q?_oUQX_9aFuC$vs2+N9_{-d>-$9Ibqvx--2u>j zwcRm$#E!{Rf#*aLBXn)^@pgrK8Q1 zbbZp~S7xJTz(P{Y8v18U{L9Q~+O`pu4zLwYC7 zo>L2ECr^e;h1tMeI?NAUxa)hR3MRZinL8}6#;+%On7%2`e$)9&-)QhXQJZEkXUvtM zN{{i>lWPGsp7`@aI-gcgxF*?eq{;|G6KTQCMi&!#YFi&w(bPN@WyH5@I+>d$ttK_^ zH>8b8)iiHTRDLS4R97%2bz1Xg*F9xnV@9tU)ht5~pWm`npZl;)*Qu(}iOY&3^+QtE z3Mh-+bhDlmyJ`2ZyO|D`^&0%=5(?4;d?$=z%pOd&?2i}i#Qxp2V2#C_T2r zr-jz&x)0+O3)N&SZ8ok{QTn~fe*IHk=T&Gw!{E-hX16G7GxeN4ZqVlo72ywf+ zPm?yFYf^IztL83kk8(cZi+DU~t>FKvFHFA18g=i7`=fo@DazAVu$`GI7yScL&X^LA zlEVUmlUp_pF1#(di_b0|-Lvbla{K96_#N48m-n*3uGk3aK|wLB7G0B(Fzt03ME zSBA8aZwp05tSUp-7|&ewJIf`ebq$q_P9;U!(OEo>PA&9!eEpI$SSg42`=-4D|JuVn z{!pm{hPdzf4*YFkAJe4#0*^oGH-TFLyDR*%<_(aj??%1x z{}gD-^y4psPK-`1_G9T|eB7?Oj92SW3JN7E|NWc*E+GEYo15Lvw5y0wtDu=M8gG9- z{J7%H*qoPF>9xc*hqLl8*)l zOyyN|Pf*>%FH+rbgW+3S6XRM3NF7m7`_J*uL{d<8_6bO&L+F}YP-qFG-eW=-poGhO z!d)oPoCc{(IRbT4MTs`FHrfq*MFP-(pU;e?51k*Pk}@QZ4nW<)tp5 z2)<-jykH0&)v6*t2!KLwScthFF()AS+3xP*^FY7&8=nR>vHb97Ppg6x-sSkiLRIsC zs$#V+D@hOTp9WWlOZ6Kl{MIv{oA&A22;1P^|G>YNVSo2w9Bw}zV)anblY*kCr?Oux z_xbVvC4X1tA56ZBssZgN$5Y=gTiKxFc-?s9^(Vmdg9)66`&GYgvhUA&kK#*c&#mJL zpf0PYCRr1F{lj+^{{DDAI;L*w$;Cm4dGYIk8~C!nV7PCS;Y{(LazDs<5An|m63aSk ziKA}kEN0T~H9eBuW%iR_bXNxKNYCspUctgtS3aB4(?yuu#rOBosZ2jgx4>%V;|Dr`=o^adpe21=8*|> z9O(h9*Hc&k?mbVVy5O5%0dZ=;C;HLU9sNH&agJzsD51aFpF z7Y$j#T7F(H>+ArY-OPz}HlZ7wpm4IqmE!C{O~2MU$5ukG;yw)nXtb?4k37U(e(SGH zaW(l6-fv!_GB_Xc@zX7GaIJ!V=NcyOxl0|#T*5l}PJ70yRsIX+oXEq-DLA9*o$^kMHl^fN+jL4FnubQJFbtXdaq-rq5ki5s*p{k-t@h1`wGxe zm|R!U5=-CCkO1tyVDXKc`Fy~k`n>lpM9uP7h&(3}6uanMd0swXT!>vG#YLbqfdzj0 zmrlcwo`1#e@nzrt!Kf2P{Ll9*;}yLWJg9Bgj-(+QC;Iptetuo+UX&(>L&x+3xyXET zP#Na;#M{JAYiT1W5pFfov0w5Sv2y}4I^LY%LuT@*o4f>f`(NwNx%|O@b=EmZu2*)r z2=e3i?X2nSBM$DYie3-TYtgRfl|0+R>a%gwy5`4Uvvysn$&ZJN?Oo8ky zI2~-o=gGL#{ppZDKLt1VZGAa0(#9Z?jXA$eB(w=(c3_~Xe-E+^U~t4+-o`n;=sgtC zc)EA5c$%2j?akQVD-@~tivOb+SXMD2$rr7}2B|AkvL*Jv;vKg$!_EBz9W}Eu`oR-A zihSvDU$w`UME6-eB{Dy{oFrcMu$Mphmpps9n-|x{3dJ4xsoZnW?{Hq{u-a4Vo?9ma z-WuGa-S_VX6C*}{E&KAK6js-;s4fb34Tx5XvP5B&@eA|AU+&?RIf6F(RjQ$ZQpxB8 zYX4-;`DEPr{|e`ytvk`+E8n9_W4F-$&>IPr+e5-nopz0rlRbM;Ps+LYYSC1ySlCL0{YtF_N;U;pnZX0J} z2EJy+qW_@`vN=_7*77MD=xk9&rxH=gn->1CH)y`_cJO-MYAv#FYB znTqaDn(V9N*>Tl0mX7xQ-Vq{RFEaT2Lw@qmpNH`M2A=gVQQ15D{ZqmB{etgTMIQ`G z`Kg8qZ~Y##xaq_*qEcd};;PrW3wHcYI*1?tO;jFgBEWXW%V6n{=mdUJ1=Am&5*fTY zRj^y|JA%}TmMb{Pgie@wO48CD{WnvcK7Kr$(eC|+zb;-^J1=|3G9Nvk%w1{7%_q~H z=RdjZ3R2A_Zhd#bxaKTiR5;TBMw?$w?0~Ic`Cox8JvNVt;cE7FHD4r)?jz9+kqnY3 zH^^kB`0;8gH>b>ctNeedyzRkGy$|hUXTd{38w>!Ad+odg*I33Yj;tHN$WGrnfoaB! zUdu}8M?Co=VED_AG#ksyyXo00ILK#J&6m-Gpl4+Upc!XpD+y7Ke%}h0 zER8gPm$i)=hiDZF@4Vli;1nPKjEz)NbWUcyB$gjps~Ev~Dxmmi6_(Fg^f-GD$|ope z>f~CT1OtLTSy%P3O!Wo5Gt!}+LDHbpi2t_ERP7w`y&+ET{BTX_5!L6HR!q(>FJDLJ z*1p`K^*l`iT95U-@Fg;DVBj@WKKK|FdzGuIZ8#r3QH=h!X|{F^YE@g9SM26k!#hc! zRAHScx7MYQ&Cz3O*pC_XnT*__mV&GF>76Cg-0Mti2>w|EIteA{wyjY58^c`6Vo})G z_CjeRLuy;0R2tSk3ZnE2>Nm-x`nQoxR|NI{kLV zzoL^dWgE`)FMfQU)4`v$;?D`0ROVK&GW#8Qujkkn6I<|zwK$_RI0$He2IW9C)`lKip6JO_%X8 z0OqWC&+<(s{42)6iIgT6tBvyZAW#P$EuVTOctFz<{2gpcd7v}TI#01YGUf9 ztB@IX5aaE?dwDX!ouQ|6blFg{B*)##AdC1)TfH1RVJ>;L&+)q;&z}!+8_0L3zs{d& zY5@GjcMQYFuUJF-hcJNO@j-Q*pLo+Uj|!m()~6vTZkmVWjt~-`-!BBmjTIzoUgOUP zzL~Rr3Cy;qa(a7sKGT>Ko+)-C&gjgj!hdlqX3S79caqPsxgt{_KMOM%6Om|xKYCo zS9i2{Iw^DW-1jkO{X!-)_mVCoUeVLf4c)kAn_)aI+ei~bx(vY?Yqm3KuhG;L@ zM@2H*d?vKbcM(m-3mNy&UyVUV@6EK2Uhdb2{r=B$U6ott`Q!J;LUpr2oBc%mE2c~~ zyQv%O`-ILo>-_2Nm&)T|W=hakFUEhSdUr zL5o$MKezfu&mKa)jjaoV{6e3hGY>c=Bmp^jYfx*odAO6o5X% zeZJ`PkWkSE7F-XS5`9X)oj$YHTdTjkj7p6@9|{aw4Cpi7L-nU4$T#{t7vu-@+3LmS z35pKp5l31@j3%${}34HW9{oJx6XI}s1Op5zu_S`UcRQI zcZ87m{C*)gZmb|#17_R@zM066pu{nv&*@u2`KLMBRN9g3x(nKTP18(xeOvo{l`H&^ zfqs70DWp*TQ9~&;9LjWg9P``O?w20JaDQG-O$_!!wi)akaKYuckA>I>qEumgG*Lug-GKio@7Fp`GXt>2jP7ZIIjcuQ&Cqowh>ZS>Pfq8wPdr~hAMjs4Dm7096kP`wQ^gBOM;{n6l0r1T^^}~()vmRs#Zab_NXrd&b9qp^~nIuwIO>a)8G9kB03Y@0!S}=T?FiZ-a+@q^b`u@>1#U4APq4NIwrJwhX zExBbC^=@3zv%crbc_>_wydbH4#eB(^)lCD{v8D%+>LdeT)De9vxoT^%Re>qhGEdsC8!PltTd=k`Vi`e#IaLh-?qJObyHHF5{zILBoNhC`pazEmkcK zwuuVem(QDo>%IcHWVLoYc7|LOfw9N=8icMo=8_RP$mFP89! z=Xc(!(I*v=k#|NdbI8%;x!q`pt>tBh>g9xs(H+&ze9;Yky07~j5JVQ6P^3=xZJQI| zcS6{{M&j|azm%d_e~irv?RFZ899z8b@ z@ywsI{` zPWDsTT|acI*jow2if;2J8xU;KxHO&W!oU4*6boSRx5yWpN4TDA4{7umU)3Ng>0hGq?cYKv?pMLlQ*?2kvxTdj zKL}GrL8a**^!oYv6a5JqeA(9Y5_e#d`tHUB_Vq*!L0u@Qf%OWO6Wu_C5ylH=nAQ}D zlGfJFbTayc^0hR!M3PlO6)&goboR2`)+jJ0bh6Zy(fzLEYxe&0Qs5{T&%f}~ZF&CY z9&A@Z+H$uBBKztDGxA|Pl^U3Bk9H*iwt=t29R}=b;NqYG_CGY7Y=5o`BvAj>6T3!3 z+r8GD{I_58@#`Ei#F%9EM|VP)`Hh1f;-{f<(mmd6#TdqH_55)Xvn{bDK3i6)3x)Mb z=6;6v*#d&~i-MCn3{tzKPiapska-G=lv%c!&+r?Q1#=2g=-BKkL`~3{S*NjlB;~;2}3rVz?8I9lo_XcY8ogh zS`|@kO%h@3M$~qrQlC@ZUftI%Mw8gqCX|v^7<`+Y?fZA>pCpz8hjt{a^~HBWYN5Fa zP4GZSKDHjoJfViU!qD3%pT$1;Y|>sjbHjY&28%+SJ`--ZRYYd})-^w5+)iCW3;xR5 zSY+TqzpBbe<=*&hm))w-`In$icR!&%@u@oumS@7O{}YzVf@??ye}for4=#tN%1$H{ zGVl0QTz3`Fnf#x%ef9lq34r=%Q_bt~!Hkw0{a_++I)N~|)yt-TSBvQXyj!;4*X?b( zmh;Y)KH)bcBq|GNo&Br**$VGDNAL~Hw;%Qd<+*NQr#K&eCxl%(Skz4ybnjah8M3}= z!LSK;Oo^abvNiz|wL0gMQ3?3ndyTzvCp(>YRWn2+Ojhgd2Hpg5ttf7#VQV&{LB>Ae z=o7UexppO!uM)0OZOgdMf2-Z~A9jn3US$|18Bzc2n8Df^9+Ssj-ltbq!oVwV7!((Y zk9VqGez=OoPS{YBKH${D%Jlfv7*=_Y!!GrITwY2pXYLO!D2ufH8W)9l^2?g!iq?JL zC-?px%8P#T_~gj)JLj@Nd(&}Wx3iLeUmNBxld6BFU$mZq#H8rQ52D@JR@8^BXg89T ze*L|nIy--F>0*)X^_z>BXM_23oKQMx{#k?i*$k;Ys!Y4Z%D2E_ptveE?37<}#9(9H zk<1?(aM)nLvsI>8YZF zRUai+MF;Vue{J28>yoRH@5>uSSTvhV2Iquy(MxSQtI3$M1bPhoxyNsUI}ZSJ zuI7VK_lCs^g@j^LXJj`ib%!4xsNojWN4@Ymjm#vat+m_j4rJ{^1f9T1ZFJ(eNd2*B zYA8!K0IE#b?d&}zszgUAQdCRJx;9By4ieiU8exu&C-XdWpdIDKqjxg*P)G%foi;HH zZ*H=1fYvvM$j{-LO>2xaSaAO6P#m(f1dH-)U;E zSfT4V=7@9Mp(%=5jhfnmC;}GnCCw4twWdY0r{UE6t?@6WcgUv_ zTSBFacA|@nf41pkjqgAdBpxFPRCziz67tjAZ-(TdyR+(ARUM~8VduGk2!svuriO0r zrsIhsRJp5!PdyHOR6|S#kEXG;ZqFZi)qYiVt*X$Ra>b)+z6$&Ja4%U3NsLY7FiX#P zdQMzil9GLaR$`5S0HDv+zlZ<5vJ#c|{6c8zgM8b-eyuqS7JGN-!p!4zHZpHvf2*Tt z0}UlA>#Y2K?w?`l+jJ#|zFvEDFL;w!FLy4S0(`^qAhc>0sG2c23dP%__v!hrbdWPuZ~v+c z8b~c{Wax++GmlFwV(K{lRulevNXC_6@7T?6;mwINhrhuCp^Z2fmB6NkJ)YfKTr)4qDxU_mSoYwh4Z=0!q)vrn<(&ZAP7(qR64)P>OFP<~{)oS0P0PS$Dw==Glt}yPWb`YFBr4Be zBu3@3uf{6-Dtb1{?+UT1=Ebmgce`+o$?|yFW`w&T8!*h= zrlR5A;@{Mx3E9KVTnNCu!7sHJ9B*)tHDp8EQfZ)vlR!~vpodVo`@IdO;7l~fp&`>G zxo#G`cPXG#avbo#ZO?GhkY?cTJ~~~6v(DOBIxV(F?lY?nWqPd}6d#p3v3qr5bGL@d zMw~4p^@Vy-e9MR;Vn^CaM^U`Qy;jCeA{ zbK8mF+rgzR%#>5jKHGRZB4cmrbJOLs&5_l!NH9a-IX>ZnH%NeYBCeK%jAlY zSc4`9{DB88K&4*ZFH{_Uc%U@@g3vqWU6P0opBYw4niG`@Co=Ro{K<(ohesT*kU?;`kL zFxg$OisvEC248EQMtwRu^j{+)?vwE%ef(w#M^p1AWZDYcnYPo4)DZ({z@x>R#>8rG zk{i#i`cits{g!8o{d#qLUD8&>{KwZOP!)$G)yG?6^W-4+)=gruLbsQd*zPiG#L$nB zB8${9jT(XmzgZu1yJ$TlX(K5^nN!PBHr~>gv`IcG4>_JTw2b_J=g+KQ{{3ySoQ#1DFe*;y`{dy)EU@kN@An zBNsCMTk&G%1ecWktku-5J2Dt^9R^eE3(Pp*HvVRW_R~Uar|=bVL--H7*qQm*H;0)% zD~Fy>=~b@%G#ItzsHgFU%8##AH-pErhHPLF&ik zG#5|fVPw?RR#M!Z_O$m{FI(BLH#J1uNX`;$V@-2?e&+4_ab$WA?TEDXZ>ZemICTI-PGwmv!~l--10z>IZhdP-zoUQ_doo}eBy4NgUiNWu~ob%&}8CT_t@ zcl2A?bf8!F_LfxlK^3bnb!~6Co*Zxp?Au=cRs&7Yh8fQSiv;7xjmPR^!*{7XMBJKj z)X%^uLD!E!bh$hvYpV$Q%225-k0(~VWxuPcCeScNS8}U8ot|37?@Y3$Qj|fQnNKe9 zC15PhH@>Mcx;7)8HmBqg4dvpt{=mPrMx%~5a{^Yw#FzC_G+w5Ip||NXhW2_2`W>%k z>I`%50mJ^aU}6;_82p{)1em$tDzN5FEcOCzmt#M@@L_q zu)szYuv>BwH-3H_53hl6?SDLUI5XS*C{rEe3;j>ZL*o}Wmx=U|x_pIiMLzD;P`tm` zs2rJ69})cpr}Bi=%^wr1zZZ#HeyCqjmAO-Z$|UjGq)V3WEVaztk^xPwtui2N?m#)X!!CCQfLTK3%0E{h(ZEw=RY z>aLapPX%6~!-z}KC(ChFNe@dX*J~E$v|}1v+ix%_vQYjpwb{u;NVoX7U^!JE&-(G~ zn)|gRY0SyB`6~GDZa*4W~7VUtb)a0q%(HRG^hGoU-%fw8q@pgWV=ld|HyF&*{1wBC1yYYXl@eleqRj?Ha zuXgG60_!!$N$D<#=K>iSYi z$jo-R#@8&G@8Zk<_#H6pnHOp$x33FVEuD0!ror3kQmxarm7JbZo<5!Iqh)lsN!V#@ z0x||L*Jo%Lmh#3(syyjj1Wl}w^&;pZAdO8l8mu5rE_&&L)&psq1RWhaTgWMY-Z0#U zFXPGH7!$(bZSrMgj-J9-2kEQ*)~LPk^h6cOg-ULFPL-jpy1UMXQ#j~+4XOWD6TWad zI!qq$I#_G?T^pjuO)U+O(%OTe)Vco;QaI$#oE;Tu1j9C_!g1ee*dLbqZfc3AwXh?{ z!~1)WWL^#w0OsEf+K?CDeRqcgIa=Iv5N0pdzuztX-di30!#^J=&b}j*|NRa`!H!Pj z-A%nL7%9i=Z3QER`SS=4Q6ly7WCVDZ2mP}uR`3G+izr5gYj|_962=5Czt+o&1MGVr z_U$I6JVXj3Z6VJL+q(Nu4@|~KXOUv{^6f<`d7>(X$oZKz!fcs@3(7WVZ0esI0o+zb zTZC`4-Rc&?cdfm%l%U-q>&+Ss%dmlm^1)mJ%}i;%9`s94 z^T8m2CoX1p%I$SsfRTLVrAob3vv6Ch<`?_;PgtUuT|aBp^wnrP-)#NvkW0khR`)g= z009@JqnFDQZr2b#x9Bfs)>yAoA&Oey7Wjlsr;%U|*_V-!ahDtbc0S%t1PIEN4eD=R zm!S&0e7Fax8sM*reIeZ(bY{*t5r3C5wH_ovTx+=6%r=NyzP~4Bt=1`Liqr9Op$buL z@Wf2xhFH^3YG-~MvXGuPHOCqc1)AK_sbLROTSj+NxY zE~^{wVW`6qMyyyT7&gJX`G2cu{x5}(c=}p#Cr4|Ak2qWF zkM;A+i!v+IW5<=r|7m^$u+;qd6i+R^hQBqbh72BnTs5V)>WVMx-VaG^HFvB)eZp>cieCb>rb4~pqUm&pJ)>@59Myiy;O z++{)02vUNA^}QApCG5MpU0||Q(JNPC;n#f=Y~OJ4Q1W%`8{VO-`0S^co3BES)?I|h zTO13g^fQ)$!pj3~6p{c`*9VTQ1*;Wo!c zwmBAu+Z@D|28NA_uTVFXE}lgNp}jR#a5`^hVI9LmXki`Avs+UfuzI5MRzQHB_tH$N zXfiLUVf$UlF3$@UpVuEnl{=G!Kw4jSph4V=SCaQ>XYCZh46)YV}Lr zHL6t4&QQ3DECeQ6LcgRDTpG5GfheBpZcg~6fnQQ8x(i*x>XIzz#)FJ5GDL4t3-|6B zXuo~+cFRDJW-u6gto}0WvW-W#d7n`4x)x>)2HrN~?^MQ2Fu!o>`V=NZvmekuP!nq$ zNHz{|CXOnei#7G(T~jjV5O0^R>*an3>$}6L-xh3c)Kp&tkKWFkix&g8W>;F+ZW=Df z--Pt*Wx4Pd?j>LH2W+K`Gfp}t>A4pPAPf4OJ4s{mWr5Sz?f(mqkd9us6#;b!Eaa}g z*Us(q<)GH8Us>KAFNVTv26=9*zCic`2SF!N4T3`mtTHov=R;E^ zZ6vkX#zrZ%siG?J$D(^I_v&^A=8K!3w)vt&@ks{?*-a#5y!_~o-g9$@z~7erg7aHf@=d^h=yYbY z8eMTpS>5R{n7A%dKV14}q2Gvb7t_MK0Sgqy(+f3SGWVDQC*PxaeTgG6M+;dS6tKZr z=zD{^FuY(&rctBf^R{=)Egxm8wHnyqQ&pfs0sAe zx`8Wan_jbwO5nkwM{n>}wf1K*0bFx6U%3MWJ>27013#dE+l}BCB5z?RAO_Cffq+0d znVVfI`XL{hfOwmSP(Zw{+8TEc1jN6t=WWIS5HjtHP6# zl8|vv5eXOm{kHg>9zTm9KtR88bt{ezz~27pWfG>WXutdy{jEDlMsK1vF#Y`En-SDL ziq$775v!~eseaNUe&A_QGrYo&fp<7~w*ln4L z=~OWcYRhb1vwJ4pYpd0SY}2deJj9p!w{p%h{pj}9*(<#-)AYjF5?m5AWA!u{g;S;% zRg1MoX+NqqOxv4>DaWp8WZs2C7GS;{>O_ZQJwD@(0;pV%5@gBHFF$ zmEk>}^2LcQS|>g_ObON?j)m#y1z5D)ii5skeQlrG9gvv6g0IxTYOCc&`zSTN@Mjx~ zZt2}J`PAY8SA+fz^j_#^wY}yOyxe>%a>EC z~+R@16Ljcl}SLY!wK!^srY*_O{Rtn@An|Zo>wjEHA6z ze3=$(N8^kTUZ;5IpM?Eux@^j7Yh(1Z*b`OVcR8_eQb((jW8=x8gS?|4?Yn!n-=X{u zmEv6|Jzh%FK;a}q6+-or}Q(NQ5!$!$Avo$d*oBW?WuXnPa*DvRs?KLIX=CElO} zQ9y!5O_0+I`O)Iwk%@F|NWVHo+K01GwYc%XU?2CbLNbf0phImn(A*V zB}4(YNUfQRfmsG_we~rqM<7-#&;ya_(CRwuxFsO6X3rB!Oj_y z*w!i053JbfLTzFFEj4{jll;NgkH>tt_YUP_|UG0ie?dl8YcA7Afe&O@-;Bx^W@Og3Y+2B4e2|jaN;)>w&2^Sj+ zKIgg5vxCo_?(4(q~9CpLXig7cR1_I{)C(eCY z6~aN_KR>R$PZaoa=gec5VYTd;W{Lu5X{uRi~B#k$Tg7 z;s#UY>1OE_6c*w?#G}<#u+I~x0`PdKU8?uHov)dPHqSw6Ws+F|FvYT1pxCdf-cbve zsn~l|!gD_652z@^TD4VohCldMH`}ucdH&!|x9IJ*2(7KmhN&itep?4s&pds9eip6O z{b#Vx!M>8{(iUaJRD{T75$@KzVy zd=sM}uujP`YdRyqB0vGE)Bjo|5ujWc3 zO$iEavqAaxo~ypf*Hi%8zD@F{)lskitkj5wTcfp29GQ*I-wI#(>-CHR}~B_-@E#1*=QQsHMbx$5h!{3t8Rx+l~;XlSmU$#9M@X8q5I(_x>|@TAWp zH@KbS>Z0h4A$FOR%=n0~TbsMZmLv^5ZBaGtPPM@+5725;2kaBHX7+SwRq+29 ztpa5qvS~*{iO<_1Nr-R=LcT_6i8{&Q=Uzw6;73$#`Tt~CqVQ83;%DmhA$~r+zCC^} z=p_7Lk|-o?CHn#J^T|SoAH0PG_;CpxIN;~wLmYk-{Qoz8Lsw$Y~t}|#@a>^Os*#&X9kW)=Ao;t`Xba!2Mi#sZfuJs6};d;^@ z>qjogyB7aOaJq1i+a*YXme2Rpcq^HkrOpyO0uJHj4}u0({5 z-^QThT*BEFFgk9KT~Oa^^Z$=KPN3{X*n%_52BJUPJst#!DO)~FOaU>LcRyHz{@L{D z?wGeSm`^VL8U+4<^lb?IHD5xYD{!y+S*1)@tyDH7tn5URo2R?Ex)i!s4ruCl?Zo7X z+`2pizLSxkJxjpR3fMW%28@zdJFD{fzy;j%8v+I>#ReB}jz@silD(_|KXNSOCJH?c zn|&_3Jv4@2nELv1Kcg&}-Ichl<<#Gbg>0>PqnE*RuK;*dS3{hg`qB4bl3x z+&wMLuSxyXG%ci)m|u(@%W=5sqE6R_LwyB~IlQODcbEJ@>I74G%?sm@b9kU!++3ifIXuHvj2jexZbLIw{U&cWW=$miGF7D7_C27 zu>Qx2S?%=WHakz9eFQH zyra_@@&sf(f$IVi&W$%9>&6zIeDz-@UEOcu-179bu|=Eno}asRYR3=C2d+Z+xWcgqttF-ySToDCWVuBjicq08AH&PA8(*vfr2bBGuYermr*U`b-V!nY;X#5iX^ zV*o+YGZ&EVMy2z+*8ojNDE#Hpx|?$Q+UBzq$`@fK-U_Z+AI(lHrZ=VN)pAQ=VtJ(; zgg>z~v3z)97cI;wMg5jr3ZgkrgJUv8iwH+RQUh8REB4G}&u$jVN09GIWIAU-B`Qy) z7G9K_8?Ai{E#0i_9c(|M4x9e6#)F@|^Gx={&@6I*F*;);xe0rNFwgI~IMgr13SW)Z zUSS2}T^*~mQ0`EQz96xrq%jp?{FE1NjLwLXmzXW&Julhmf#E(HnO`@QMDJmJEOksN z+qwO%PDUcPdX13?vz9B9ei)A>r%8>+ir)O4y?YHrv;pW_DPQwf)GJ&+rK=8)N(Y-* z4?Uix_mQmSMcrE79}Y(Gp$A^JmPMbL6=;PP*FzVHMnX9`#8IJWZ8}YNr{kJWau5_IFU>4YJ|GD|Ews~&}=6;DKbb$lH)u#FHXf(#S_4u5p`DBFr8m=(! z>*H-c(9i4FDws{ZbqtGgxw$h+A;$st-#@X+aG|ky;0m2j^O9$CK5f<1;rQEcY>t%q zoz`%VdUVFq>Xc;npAPrCK#glB2>R6Fsbhz8P_$e5^vx2}$uIgWiEmAp#QuEhgpjQb zceW41{d(c3+1a2&cawGOSeYp|+V7SUfn4h-|XNP}n4&f^D9T^T@?Vfw?9<(8EjU z$x?r_@1-P|#pG$pUeLUlj%hu=O)2aMTH=Fq;FkaF^zy`7jV~~F`Cgctcu85ZCs;cK ztgWk`@ZFNyG-mAj21Y2)YVCY$z#WXMh04zL<%MhR>{y;ytgo)+aQ@$v)-ppYQW#~{LIqJb@zbV9}}nLKn@7;Gq;RGn&5_p z#^xM$k&_Js6Z+jB*a@qukDs}N*TT&I@H?@KWwrEgdyQSRcAAQk2;Fy5fd9>VzEt1+ zn>|5EDrNDar9ad<-Cn2(#)RHvr0U0rAz6L)$Vo(p)P?Fza7~;Ubwg0%N;Yszc?b2T z9f?@H+nSzL_~5<5+fuZ>Aua8s-Gbp5%?^#b4&{;rq$RlkPY!;AI2kSXa=Np%sm=pe zxb^XhN=&xse_PpbRkl1`^kzxoQfNb<#$|xt4(i9Q51{U;3_n+fWJHP&(~U;9LB;sp z(;=<-VoOV5pS6Fg|0Z4%^d0)z%;&5v%8Dxd82OohbRRN~Xq>9CmCU?t)GcXNPsIwP z2a+Gm{188~whs%C`jc8(Sjb-g1Oivwo`}=FlerxT1bRm5p7u4ncU%9JLH*e5we6yQ zH_+O(nyudqFcL?!jXI^9>ENnyZs3|83`N;)s7b&@W`cnJP>cEEF>Po5`@bF1zN&sV z@dq%B^ZYDLT6VQkEppR*H@3Ry%2Dk_)TRGnqGR3}tG-5vS4t5Z6sHrii4B2&meh%$ zLPsGP-J`h#Q0riO2sEZUQi6Q+HLLx~+_7Q~d8!>3HW%rD;F@-8q#jQ>NjFCm?9;+1 znaVgU++uoZZ45c*q<*f*ir~-#{9*b!-0gh*xrfr^**Z#_$Y2nEh1s(;LoNUlm0W*g zxscaHL$m|P9>N`nYZ&A049#ml~0p|8&JZYw}z2Q zxJ+h$#ye-ET`#mW)|b&i6}UyX<1(SVKzmy5Ntb7 z575B=bbB=1O=95M2M-en1rL>afQRDr%v*VN7-z^LHG28n9Kusb`OkPNob+Fdl;$@b zQi?&!KY4d|{*1NM=*4L=4i~&6y&*B9mJNG5zs>m#q~(xjNAvG-)oVv{bE77C$Nz8c z(8m8Yh65K}v@bfHlV$)cZWe!GBqWwr+0V?_EKbfB`Q)8BtW z7PH}>K_r?at)*%^ESqMS$U-f1Wv&-rs2QI6o+5+)tYja-Uuac+(dq0jqu+R&Vlo*(Y6mi``we{WSj)IBCt!FP$>?34KlAOCaY+&g7UKxrOcC z)eEhyN9EyWpe*QGQ`ktGth#8!tzsRWSmhk;R58o`jysL5_;sbIwuJ6B$Q(A=qP2gb zEf$nh$!oUn_!74^KefdKJ6Wvt0LB$}8^y7obrt=ugR||aDEme-iKE~fjHS{nORR{0 zD;dU_XDKeRp4Wb*ytYI}@M~-R%eYqkbueOkH&-)>*vPp=nxk!ah>x~LFdy_Z*Z#^8 zNlsgM=rM@!mtxe-imSGxdps?iDwk53IYD^$r5^{=P&XC08ClqEH;=9eEZT~l{KZbL>2tCrf%{=#|l3=3LrbGH>ZxP?**3fpm8^h1N?zo4VRoTVV{y-O;6|;Zvg&imr z0iBu?{uA=wE3twI_-`X11~gu*hF9QUh`nLXhp>6WrL3vL;dUcU^kd;=lMZ{)I!*)- zSntn!fcDkAZ!b#N(;+(jEK8(f*bq4L;EKuFiBNlN{1=H2KYnnR|qEUX>y%t@_a{=>v+Ap0T?X9`jm|p z^XzAqoztTgdvnNqHo3lvLq-(mIAjN2TOytKwPntsOy)P!XS-PQ$LDFy5ivLa0zW*4 zomzMs2Kd%UcJE717}(Ww0nE-rtgk1JCvh_J$l>reROtA(`H2vw?s5L%^~ptDuY~ra zD}(qP7r*;98$Wyi`K|rvlR9It zA!hCEHuBUE5?B7v5FB%>gXGZ=5*Obm1jn2&Nd7i~pATF$zx|jtbyu4&ekioTgif=} zSvVjTq|QpM8QYvzed@j|2)lmB4%ZLJ5h>ZD#I1K|2116}y)7s!_3FNP^=K5-Vn%Ft%wp^!74JM#pYo8%Gnl+C5Cp~l4=zK`XYy*jqNQi( z2#EW;GD0^%=6q)mJQ8iK*9kcw!3Hn(`}i*u%9yZOKbeniu|)pq;|!-`0M?8WRRatY zo8rqHkss459Iit})xCafbEWn`(CqhO|NL{P4u@yDE5mN6R1zR=z*3|A@MFSnaPf13c*d`WUKvi< z&2_4WHdTBzDDT~D3p+dn#atMaQ-S@2jXEKO!whK!;T8wN!yyRpk3OySpRfA=2Byph zuJGSBw~qh&!|`vrofg_)bEqMl-*5^Y&Tj?ktm5jTb?1chn>=Y`np59bKU$fqSY65N zWu3@WFp0NrHXs;nuI$C!u-wjLw*NnKq*&ZGZ<)s*7JitsB!8`|gcwo8x6S+C-b#V< zSqHvf>l+7?x(D8jHkq?Iw~d0N?$M5IJUow^VUGGZ6a+SYrFJKjU+;n(0v>~#nxC_^ zf!L)D+Ipm=UGISb@N$_jn`O@4CWK&Y|393jA%A2ZBJ8eDbTEG9zH-Snm?;VUC`K>nB&ow2q}K63P9@sD_u)1EjS1+dw~ z`5McN#J11|oq+ET>18Ff6L8m>qu`Lq@AdE!FS*z5teX=pXq&v6T`gy%rHwz1wRdI$ zO_^Lw6rwv&I{l?G74zdDrU%o*B5Vo~Y4L~vA{f8f6%SQh__V)6f_vMbN={0Vuc zcb7oco{(d$=l$mlX?V@@{O9W3D=YQqwB1Q)4zFX~D-`;i)-;3`AfHdp;b+lSasY|S z(BHE70zC*(QTkINl|_&;>kr?OX9=JSy!fJd3Y$8t9%I`1m!VPhZAkQzhR?j{@lDry z(OXu37QM8QuM@X|&7$*3Bs`x^c{VTEK?XZV)7k&*D7;Ahpj-Mpe|`h8f91cVxWrVZ zu!#lklipNZWmo^%t05QmSif%vp}CYja}5Y2*cHtB5E>Z3q`h_Oe*mAYd!OX8!xV1M z`Gq-S)vJ*=O5n=9Dc^KO_@RuRrUWjGf5sbNh#Qnh1y?UPT&SPg8&B#}EMTQ3!E7_*gY<@3$=3$+p}{PE*x{%xIUe#V4>w6EH3_u8pnJDO~Q zBLr}YV(k0-_NU7~t(r2mJnQNecDSvZonO7eVXm+Xp_xFA*sHKUUr`v!uWhX~dKy*A@?jKA{(HVD z|61rLz|=5CO7$9A+jek6Wu3k%C~L$wl{J0uvg%ZpAM2T@drxTcTg&&FiVKJ$=xkcf zan_nG_KO5f763&qAWz!NGdszlUB7M$U|C3!UyGMr5~p1c4l8J}9_FEO#6Zn5 zc7B-@&W+)<99j6L%X0G5WUJ8GYdXwqbr;S4RMhQ8ivj(=IE;3)Ao9^>!2gE(U!pUf z1pxt!(DASg3=XRa3PhX?%ioacdC?4>cQp-8M&7!T(b|t>=I1vF1CW%-#2YzLA3o2> z7FJn*265b<2PY!#p`oWc6EoNTQ9*o`S?l7Lg?U~4P~uH5SL54ng#z95+3*Lu{@?vB z?K|x&CwbxjK<)D^AA!;C+qiSMcm^o@w>EB}nq(VS-~h~Q1F#tVNf6!J^l;G&zq?oa zCI|7@zu=#ZlHY$Wz)jG8;>~HU()8B$J?4k)>+{X+n^+$7U53>hXd}twzVjt^`ltO6 zdT#U3ejJ}O*xZdA%Zq8;9O$15l;OF5p?{8RSIy*6W$L$1MsoPO3+@rrVf7!ZQ@5yh zMHqG;;N#0EV&2Q5e|d>;75_Zoh*Ea$_XVbl2xW!Dr63^#8Rs>&AHj)teiFtik!@%`U$7^kCGOvxE2oH$E1*_<61Q zk0;(l96&{B0K}O={D;*sV9Y0P!3?^zX9kXbzpnj5g7$v}M<_6bKe8>DUK=+43EC)* z00&1u2Yja2m96d9=Rv`zZ9hkSA&-qux3)jX9k$=a*Phne{vbYVzl)#On*VrVIO1R` zYN1ru{+YJ@c)0ym?T3#eaSuZ-q8Yrv2pvW6Vkv`Zz_EMUe}!wm?nSko|Ae|k9MQHP z_)IT#2O+&pcYPideCmGK-Hvr{{9|kTgWRG1(8bpdZf$=MAGY7c&uh(pyf7SbFcnE5 z;@UsMw*Q|8Zhx|k{#l>y7~0CAv_8FbgIk~4`x~;%hac>>KK;KRqc!)kvNS(l1e)CP zbSaQ({`ywy)8QfSjm|&2w`}F?98K5JOP6Ii#nP!pS3q5q+aF^ryMIBP6iSmN7u)rM zS@Q;b{f~~Xhrb#h6AugL50z_TTR5m@X9D%_9A(-RCg1NFa+R8*h3r!?^6re*$_A>s z=DPSnD!@1;iYGz$==dZq9Li?Hz{+48*=(H-xKeI0dO)oD^wgf^ z^gq&z;;rWeyjA%77WN0#p{sEmxNn?ytY#m;Wl7vzGgm2-tFB=i3J1-vK-S}RO^ON5@=GtZ&#C>U+yAn zP=~YY`{()L{`Q=W)_!cz2oMUzXU@v}B|B%hb-;ic+iA5p0}-unaEVSWkyb>D^J4ky zxsMUf`+4QC|)HXAArRs7~-gR4=15L+eN*F4P_ASM3W@R#U}9tdRt{7i?8)cVjT z8$0aSv*tQ`3qqQ%zuOD2E|yWBvKe{ z5$uxQ_O9@a=+~wL{pP*TZ}OD??Y8a)Re;qU8zi{a^UBpJg3GfTC6@jsWADLI7c)UK zt`r3uhjWD?_AMJK3ua}$N+!DRsG$2^=F@iHQeJHSQEOa}6|hTs5t1Ckd~Z9P>@Fnz zs{O%#ooDw4*;K8^h~h22S|V>NEbv>xiJm%mzO*Sf5Ntl)6x1g0uyA4jA=fb-4_)BX zFFLVia?M)5x1)Ub>gfORHu^6B zImMEPe=^L{?`DjHXfwyJrr1NiZXkKd^V1WXWHGYV+9t1mA3OM*!&?UIDg69SjPxay zBs1@%SV?DAOv!@*c`X2}23y3lqUQOd%sL)GzS*-qhTlyok364hhRa10^ z8L@#ICwP(naT4s;s9SF9g}Rm2kX-_HJRT$P}!ckbqBwMreOla+da*fCJ82-h*o#lf0RCw0QxHqA zXBOYpIXd09#G@3xC!>%_(qK+1)(?jOdQ2Ma#T&OkT`yX#gc9k zo@aHp%<`kn?yaH{tBkChtK{%GQqAgZXIP!H=jKY@%{|l`FX}cJL}M7JA(J|S{uxTy z!Tkq7WbY0Nd~@g75@g2dBVOHIpij9KGs_>hEGws#hu_Iw7M88n6b!^+_|rT)0A(y2 z6jGAMi|!tzn(<6SbgH*mvQ_os@`g|B7L_KJm<(m!uUEB#k`^|o7)%w9_x!&~<&KX! z6@xqRxXnMb%%7p~^2C{BeMzs?Mdv)lAgIi&ueX*jGhdoQXw08l;4KKx;CLNp+Cw=D z!gDw=aox3O@M1f*cmvmXiANP*H$zsUPv|1^8OwM}_xy`9yc1}r%tHaIw9KEmhBZrh zvO7cVr+SSgd%{}o(Qb<9%d7dKbw?%p^kHcF%jBQD$!4QS?WmL`BmEQ;i#BFe7rk(D zI1M)Feu_RTHFsHpw3wo1iQ$tF!?QUxQJQ;uY26I%P?RiS5TvC#JFgfT0C}QO&I0{( z5(p?O{Ao20+-H=$kVQg{x>d*MwsOEJhg+5hU@foQ#roBK%Ibxb_lg*sUju)*XHK%hGCu^t8DKvu-WyQn32L*b09qG z`P>HEg)io=;n}jR@Uv*yXWp3A)?I*Nl99`<{XU($%8b894Z(>C!PkbzAL^Ml)1%YS zv5|(e-FmhlRIc=PyzVuAaYpT1np*E%(^_up zif2+sVFN257Fb|t{U)k?vEz5s%%AUe zpxc=Eu2U8&ODwUxEPCEzSp;*!Y@_U5YI8S|SBF4lBHXx3xm=hUuH4Y@i@7Fz*&4p2 z3CmX&9uA+IDC+PHsLR3oPS69 z{sR;j5|Ewptb1Jl99C6apS{}4U+WiDet3=JkA<&Pen-P=tcKS(YmxW4dH)sRmC1|# zVEeg549mhps#T~GAJBZ9@Sjj<;sJg6b#wV`X-wE>d{^<5Eb`)e-2Qr~2qZs^ynIPZ zBSVR#Ia6Ry^~!2(B)Hb%p?1Z!*kZi+chjnl)xh?^OAs$}W3fIg&MVMAJT15@aPY4+ z_r9sH8HW|5Rn1bOoKzg<(bH;MAL9_ClYcg^RR77S&oWesX5wdZmc))hf3{CbW?u^% z$%xFHq#)jNlm%&(g^jYXWN=w3_Rd0NW)VT;=$W2*CT|vmqj-V8^;!L8OY_@sASmi_ z87cPzsb>0wkQ6M}GgYesdazusxijeNL&Bs*hweV%hvG{Co;_oe|Ie`Kza!K zj!T42KrQkZ@gh_F<>{^RuzH|KDAUpvL8j*Iz(Zl|T_Y?6ei&@OS^Pz7e{YEZF*)cn zjw^wOGj)3p*RJ~6UClL|GJCyJJErFXr{K%7Y+Rx+Z-~ODVM?~o9-QRvcwt)A2(rCq ztJp7_l^=Iw)4NYr%@wQ&+N~2Lv@LgXE&SWcw=J13a9k6MVn>3Gx}cxC_1#h<5{