Updated from external repo on ansible deploy

This commit is contained in:
aperez 2025-09-15 11:10:29 +03:00
parent 9fd06b4a7b
commit ea4f4c7aea
144 changed files with 5868 additions and 15999 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.aider*
*/.DS_Store

1
.vault_pass Normal file
View File

@ -0,0 +1 @@
ytdlp-ops

79
README.md Normal file
View File

@ -0,0 +1,79 @@
# Airflow/YT-DLP Cluster Deployment
This repository contains Ansible playbooks and configuration files for deploying a distributed Airflow cluster with YT-DLP services.
## Prerequisites
1. Install Ansible on your control machine
2. Ensure SSH access to all target nodes
3. Set up your vault password in `.vault_pass` file
## Initial Setup
Generate the inventory and configuration files from your cluster definition:
```bash
./tools/generate-inventory.py cluster.test.yml
cd ansible
```
## Full Deployment
### Deploy entire cluster with proxies (recommended for new setups):
```bash
ansible-playbook playbook-full-with-proxies.yml
```
### Deploy cluster without proxies:
```bash
ansible-playbook playbook-full.yml
```
## Targeted Deployments
### Deploy only to master node:
```bash
ansible-playbook playbook-master.yml --limit="af-test"
```
### Deploy only to worker nodes:
```bash
ansible-playbook playbook-worker.yml
```
## Deploy Specific Steps
To start at a specific task (useful for debugging or partial deployments):
```bash
ansible-playbook playbook-master.yml --limit="af-test" --start-at-task="Prepare Caddy asset extraction directory"
```
## Debug Deployments
Run with dry-run and verbose output for debugging:
```bash
ansible-playbook playbook-full.yml --check --diff -vv
```
## DAGs Only Deployment
To update only DAG files and configurations:
```bash
ansible-playbook playbook-dags.yml
```
## Vault Management
All sensitive data is encrypted with Ansible Vault. The vault password should be stored in `.vault_pass` file in the project root.
To edit vault files:
```bash
ansible-vault edit group_vars/all/vault.yml
```

View File

@ -1,5 +1,6 @@
redis-data
minio-data
postgres-data
logs
downloadfiles
addfiles

View File

@ -58,7 +58,10 @@ RUN pip install --no-cache-dir \
apache-airflow-providers-http \
apache-airflow-providers-amazon \
"botocore>=1.34.118" \
psycopg2-binary "gunicorn==20.1.0"
psycopg2-binary \
"gunicorn==20.1.0" \
"python-ffmpeg==2.0.12" \
"ffprobe3"
# --- Install the custom yt_ops_services package ---
# Copy all the necessary source code for the package.

View File

@ -1,30 +1,10 @@
# Stage 1: Extract static assets from the Airflow image
FROM pangramia/ytdlp-ops-airflow:latest AS asset-extractor
# Switch to root to create and write to the /assets directory
USER root
# Create a temporary directory for extracted assets
WORKDIR /assets
# Copy static assets from the Airflow image.
# This dynamically finds the paths to flask_appbuilder and airflow static assets
# to be resilient to version changes.
RUN cp -R $(python -c 'import os, flask_appbuilder; print(os.path.join(os.path.dirname(flask_appbuilder.__file__), "static"))') ./appbuilder && \
cp -R $(python -c 'import os, airflow; print(os.path.join(os.path.dirname(airflow.__file__), "www/static/dist"))') ./dist
# Pre-compress the static assets using gzip
# This improves performance by allowing Caddy to serve compressed files directly.
RUN find ./appbuilder -type f -print0 | xargs -0 gzip -k -9 && \
find ./dist -type f -print0 | xargs -0 gzip -k -9
# Stage 2: Build the final Caddy image
# Build the final Caddy image
FROM caddy:2-alpine
# Copy the pre-compressed static assets from the first stage
COPY --from=asset-extractor /assets/appbuilder /usr/share/caddy/static/appbuilder
COPY --from=asset-extractor /assets/dist /usr/share/caddy/static/dist
# Copy the pre-compressed static assets from the build context.
# These assets are extracted from the main Airflow image by the Ansible playbook.
COPY caddy_build_assets/appbuilder /usr/share/caddy/static/appbuilder
COPY caddy_build_assets/dist /usr/share/caddy/static/dist
# Copy the Caddyfile configuration. The build context is the project root,
# so the path is relative to that.

View File

@ -1,249 +0,0 @@
# Стратегия Управления Прокси и Аккаунтами
В этом документе описывается интеллектуальная стратегия управления ресурсами (прокси и аккаунтами), используемая в `ytdlp-ops-server`. Цель этой системы — максимизировать процент успешных операций, минимизировать блокировки и обеспечить отказоустойчивость.
Сервер может работать в разных ролях для поддержки распределенной архитектуры, разделяя задачи управления и задачи генерации токенов.
---
## Роли Сервиса и Архитектура
Сервер предназначен для работы в одной из трех ролей, указываемых флагом `--service-role`:
- **`management`**: Один легковесный экземпляр сервиса, отвечающий за все вызовы API управления.
- **Назначение**: Предоставляет централизованную точку входа для мониторинга и управления состоянием всех прокси и аккаунтов в системе.
- **Поведение**: Предоставляет только функции управления (`getProxyStatus`, `banAccount` и т.д.). Вызовы функций генерации токенов будут завершаться ошибкой.
- **Развертывание**: Запускается как один контейнер (`ytdlp-ops-management`) и напрямую открывает свой порт на хост (например, порт `9091`), минуя Envoy.
- **`worker`**: Основная "рабочая лошадка" для генерации токенов и `info.json`.
- **Назначение**: Обрабатывает все запросы на генерацию токенов.
- **Поведение**: Реализует полный API, но его функции управления ограничены его собственным `server_identity`.
- **Развертывание**: Запускается как масштабируемый сервис (`ytdlp-ops-worker`) за балансировщиком нагрузки Envoy (например, порт `9080`).
- **`all-in-one`** (По умолчанию): Один экземпляр, который выполняет как управленческие, так и рабочие функции. Идеально подходит для локальной разработки или небольших развертываний.
Эта архитектура позволяет создать надежную, федеративную систему, где воркеры управляют своими ресурсами локально, в то время как центральный сервис предоставляет глобальное представление для управления и мониторинга.
---
## 1. Управление Жизненным Циклом Аккаунтов (Cooldown / Resting)
**Цель:** Предотвратить чрезмерное использование и последующую блокировку аккаунтов, предоставляя им периоды "отдыха" после интенсивной работы.
### Как это работает:
Жизненный цикл аккаунта состоит из трех состояний:
- **`ACTIVE`**: Аккаунт активен и используется для выполнения задач. При первом успешном использовании запускается таймер его активности.
- **`RESTING`**: Если аккаунт был в состоянии `ACTIVE` дольше установленного лимита, `AccountManager` автоматически переводит его в состояние "отдыха". В этом состоянии Airflow worker не будет выбирать его для новых задач.
- **Возврат в `ACTIVE`**: После завершения периода "отдыха" `AccountManager` автоматически возвращает аккаунт в состояние `ACTIVE`, делая его снова доступным.
### Конфигурация:
Эти параметры настраиваются при запуске `ytdlp-ops-server`.
- `--account-active-duration-min`: "Время работы" в **минутах**, которое аккаунт может быть непрерывно активным до перехода в `RESTING`.
- **Значение по умолчанию:** `30` (минут).
- `--account-cooldown-duration-min`: "Время отдыха" в **минутах**, которое аккаунт должен находиться в состоянии `RESTING`.
- **Значение по умолчанию:** `60` (минут).
**Где настраивать:**
Параметры передаются как аргументы командной строки при запуске сервера. При использовании Docker Compose это делается в файле `airflow/docker-compose-ytdlp-ops.yaml`:
```yaml
command:
# ... другие параметры
- "--account-active-duration-min"
- "${ACCOUNT_ACTIVE_DURATION_MIN:-30}"
- "--account-cooldown-duration-min"
- "${ACCOUNT_COOLDOWN_DURATION_MIN:-60}"
```
Вы можете изменить значения по умолчанию, установив переменные окружения `ACCOUNT_ACTIVE_DURATION_MIN` и `ACCOUNT_COOLDOWN_DURATION_MIN` в вашем `.env` файле.
**Соответствующие файлы:**
- `server_fix/account_manager.py`: Содержит основную логику для переключения состояний.
- `ytdlp_ops_server_fix.py`: Обрабатывает аргументы командной строки.
- `airflow/docker-compose-ytdlp-ops.yaml`: Передает аргументы в контейнер сервера.
---
## 2. Умная Стратегия Банов
**Цель:** Избежать необоснованных банов хороших прокси. Проблема часто может быть в аккаунте, а не в прокси, через который он работает.
### Как это работает:
#### Этап 1: Сначала Бан Аккаунта
- При возникновении серьезной ошибки, требующей бана (например, `BOT_DETECTED` или `SOCKS5_CONNECTION_FAILED`), система применяет санкции **только к аккаунту**, который вызвал ошибку.
- Для прокси эта ошибка просто фиксируется как один сбой, но сам прокси **не банится** и остается в работе.
#### Этап 2: Бан Прокси по "Скользящему Окну"
- Прокси блокируется автоматически, только если он демонстрирует **систематические сбои с РАЗНЫМИ аккаунтами** за короткий промежуток времени.
- Это является надежным индикатором того, что проблема именно в прокси. `ProxyManager` на сервере отслеживает это и автоматически банит такой прокси.
### Конфигурация:
Эти параметры **жестко заданы** как константы в коде и для их изменения требуется редактирование файла.
**Где настраивать:**
- **Файл:** `server_fix/proxy_manager.py`
- **Константы** в классе `ProxyManager`:
- `FAILURE_WINDOW_SECONDS`: Временное окно в секундах для анализа сбоев.
- **Значение по умолчанию:** `3600` (1 час).
- `FAILURE_THRESHOLD_COUNT`: Минимальное общее количество сбоев для запуска проверки.
- **Значение по умолчанию:** `3`.
- `FAILURE_THRESHOLD_UNIQUE_ACCOUNTS`: Минимальное количество **уникальных аккаунтов**, с которыми произошли сбои, чтобы забанить прокси.
- **Значение по умолчанию:** `3`.
**Соответствующие файлы:**
- `server_fix/proxy_manager.py`: Содержит логику "скользящего окна" и константы.
- `airflow/dags/ytdlp_ops_worker_per_url.py`: Функция `handle_bannable_error_callable` реализует политику бана "только аккаунт".
---
### Расшифровка Статусов Аккаунтов
Вы можете просмотреть статус всех аккаунтов с помощью DAG `ytdlp_mgmt_proxy_account`. Статусы имеют следующие значения:
- **`ACTIVE`**: Аккаунт исправен и доступен для использования. По умолчанию, аккаунт считается `ACTIVE`, если у него не установлен конкретный статус.
- **`BANNED`**: Аккаунт временно отключен из-за повторяющихся сбоев (например, ошибок `BOT_DETECTED`) или забанен вручную. В статусе будет указано время, оставшееся до его автоматического возвращения в `ACTIVE` (например, `BANNED (active in 55m)`).
- **`RESTING`**: Аккаунт использовался в течение длительного времени и находится в обязательном периоде "отдыха" для предотвращения "выгорания". В статусе будет указано время, оставшееся до его возвращения в `ACTIVE` (например, `RESTING (active in 25m)`).
- **(Пустой Статус)**: В более старых версиях аккаунт, у которого были только сбои (и ни одного успеха), мог отображаться с пустым статусом. Это было исправлено; теперь такие аккаунты корректно отображаются как `ACTIVE`.
---
## 3. Сквозной Процесс Ротации: Как Всё Работает Вместе
Этот раздел описывает пошаговый процесс того, как воркер получает аккаунт и прокси для одной задачи, объединяя все вышеописанные стратегии управления.
1. **Инициализация Воркера (`ytdlp_ops_worker_per_url`)**
- Запускается DAG, инициированный либо оркестратором, либо предыдущим успешным запуском самого себя.
- Задача `pull_url_from_redis` извлекает URL из очереди `_inbox` в Redis.
2. **Выбор Аккаунта (Воркер Airflow)**
- Выполняется задача `assign_account`.
- Она генерирует полный список потенциальных ID аккаунтов на основе параметра `account_pool` (например, от `my_prefix_01` до `my_prefix_50`).
- Она подключается к Redis и проверяет статус каждого аккаунта из этого списка.
- Она создает новый временный список, содержащий только те аккаунты, которые **не** находятся в состоянии `BANNED` или `RESTING`.
- Если итоговый список активных аккаунтов пуст, воркер завершается с ошибкой (если не включено автосоздание).
- Затем из отфильтрованного списка активных аккаунтов с помощью **`random.choice()`** выбирается один.
- Выбранный `account_id` передается следующей задаче.
3. **Выбор Прокси (`ytdlp-ops-server`)**
- Выполняется задача `get_token`, которая отправляет случайно выбранный `account_id` в Thrift RPC-вызове на `ytdlp-ops-server`.
- На сервере у `ProxyManager` запрашивается прокси.
- `ProxyManager`:
a. Обновляет свое внутреннее состояние, загружая статусы всех прокси из Redis.
b. Фильтрует список, оставляя только прокси со статусом `ACTIVE`.
c. Применяет политику бана по "скользящему окну", потенциально блокируя прокси, которые недавно слишком часто выходили из строя.
d. Выбирает следующий доступный прокси из активного списка, используя индекс **round-robin** (по кругу).
e. Возвращает выбранный `proxy_url`.
4. **Выполнение и Отчетность**
- Теперь у сервера есть и `account_id` (от Airflow), и `proxy_url` (от его `ProxyManager`).
- Он приступает к процессу генерации токенов, используя эти ресурсы.
- По завершении (успешном или неудачном) он сообщает о результате в Redis, обновляя статусы для конкретного аккаунта и прокси, которые были использованы. Это влияет на их счетчики сбоев, таймеры "отдыха" и т.д. для следующего запуска.
Это разделение ответственности является ключевым:
- **Воркер Airflow (задача `assign_account`)** отвечает за **случайный выбор активного аккаунта**, сохраняя при этом "привязку" (повторно используя тот же аккаунт после успеха).
- **Сервер `ytdlp-ops-server`** отвечает за **циклический выбор (round-robin) активного прокси**.
---
## 4. Автоматический Бан Аккаунтов по Количеству Сбоев
**Цель:** Автоматически выводить из ротации аккаунты, которые постоянно вызывают ошибки, не связанные с баном (например, неверный пароль, проблемы с авторизацией).
### Как это работает:
- `AccountManager` отслеживает количество **последовательных** сбоев для каждого аккаунта.
- При успешной операции счетчик сбрасывается.
- Если количество последовательных сбоев достигает заданного порога, аккаунт автоматически банится на определенный срок.
### Конфигурация:
Эти параметры задаются в конструкторе класса `AccountManager`.
**Где настраивать:**
- **Файл:** `server_fix/account_manager.py`
- **Параметры** в `__init__` метода `AccountManager`:
- `failure_threshold`: Количество последовательных сбоев до бана.
- **Значение по умолчанию:** `5`.
- `ban_duration_s`: Длительность бана в секундах.
- **Значение по умолчанию:** `3600` (1 час).
---
## 5. Мониторинг и Восстановление
### Как Проверить Статусы
DAG **`ytdlp_mgmt_proxy_account`** — это основной инструмент для мониторинга состояния ваших ресурсов. Он подключается напрямую к **сервису управления** для выполнения действий.
- **ID DAG'а:** `ytdlp_mgmt_proxy_account`
- **Как использовать:** Запустите DAG из интерфейса Airflow. Убедитесь, что параметры `management_host` и `management_port` правильно указывают на ваш экземпляр сервиса `ytdlp-ops-management`. Для получения полного обзора установите параметры:
- `entity`: `all`
- `action`: `list`
- **Результат:** В логе DAG'а будут отображены таблицы с текущим статусом всех аккаунтов и прокси. Для аккаунтов в состоянии `BANNED` или `RESTING` будет показано время, оставшееся до их активации (например, `RESTING (active in 45m)`). Для прокси будет подсвечено, какой из них является следующим `(next)` в ротации для конкретного воркера.
### Что Произойдет, если Все Аккаунты Будут Забанены или в "Отдыхе"?
Если весь пул аккаунтов станет недоступен (в статусе `BANNED` или `RESTING`), система по умолчанию приостановит работу.
- DAG `ytdlp_ops_worker_per_url` завершится с ошибкой `AirflowException` на шаге `assign_account`, так как пул активных аккаунтов будет пуст.
- Это остановит циклы обработки. Система будет находиться в состоянии паузы до тех пор, пока аккаунты не будут разбанены вручную или пока не истечет их таймер бана/отдыха. После этого вы сможете перезапустить циклы обработки с помощью DAG'а `ytdlp_ops_orchestrator`.
- Граф выполнения DAG `ytdlp_ops_worker_per_url` теперь явно показывает такие задачи, как `assign_account`, `get_token`, `ban_account`, `retry_get_token` и т.д., что делает поток выполнения и точки сбоя более наглядными.
Систему можно настроить на автоматическое создание новых аккаунтов, чтобы предотвратить полную остановку обработки.
#### Автоматическое Создание Аккаунтов при Исчерпании
- **Цель**: Обеспечить непрерывную работу конвейера обработки, даже если все аккаунты в основном пуле временно забанены или находятся в "отдыхе".
- **Как это работает**: Если параметр `auto_create_new_accounts_on_exhaustion` установлен в `True` и пул аккаунтов задан с помощью префикса (а не явного списка), система сгенерирует новый уникальный ID аккаунта, когда обнаружит, что активный пул пуст.
- **Именование новых аккаунтов**: Новые аккаунты создаются в формате `{prefix}-auto-{уникальный_id}`.
- **Конфигурация**:
- **Параметр**: `auto_create_new_accounts_on_exhaustion`
- **Где настраивать**: В конфигурации DAG `ytdlp_ops_orchestrator` при запуске.
- **Значение по умолчанию**: `True`.
---
## 6. Обработка Сбоев и Политика Повторных Попыток
**Цель:** Обеспечить гибкое управление поведением системы, когда воркер сталкивается с ошибкой, требующей бана (например, `BOT_DETECTED`).
### Как это работает
Когда задача `get_token` воркера завершается с ошибкой, требующей бана, поведение системы определяется политикой `on_bannable_failure`, которую можно настроить при запуске `ytdlp_ops_orchestrator`.
### Конфигурация
- **Параметр**: `on_bannable_failure`
- **Где настраивать**: В конфигурации DAG `ytdlp_ops_orchestrator`.
- **Опции**:
- `stop_loop` (Самая строгая):
- Использованный аккаунт банится.
- URL помечается как сбойный в хэше `_fail` в Redis.
- Цикл обработки воркера **останавливается**. "Линия" обработки становится неактивной.
- `retry_with_new_account` (По умолчанию, самая отказоустойчивая):
- Аккаунт, вызвавший сбой, банится.
- Воркер немедленно повторяет обработку **того же URL** с новым, неиспользованным аккаунтом из пула.
- Если повторная попытка успешна, воркер продолжает свой цикл для обработки следующего URL.
- Если повторная попытка также завершается сбоем, второй аккаунт **и использованный прокси** также банятся, и цикл работы воркера останавливается.
- `retry_and_ban_account_only`:
- Похожа на `retry_with_new_account`, но при втором сбое банится **только второй аккаунт**, а не прокси.
- Это полезно, когда вы доверяете своим прокси, но хотите агрессивно перебирать сбойные аккаунты.
- `retry_without_ban` (Самая мягкая):
- Воркер повторяет попытку с новым аккаунтом, но **ни аккаунты, ни прокси никогда не банятся**.
- Эта политика полезна для отладки или когда вы уверены, что сбои являются временными и не вызваны проблемами с ресурсами.
Эта политика позволяет системе быть устойчивой к сбоям отдельных аккаунтов, не теряя URL, и в то же время обеспечивает гранулярный контроль над тем, когда банить аккаунты и/или прокси, если проблема сохраняется.
---
## 7. Логика Работы Worker DAG (`ytdlp_ops_worker_per_url`)
Этот DAG является "рабочей лошадкой" системы. Он спроектирован как самоподдерживающийся цикл для обработки одного URL за запуск. Логика обработки сбоев и повторных попыток теперь явно видна в графе задач DAG.
### Задачи и их назначение:
- **`pull_url_from_redis`**: Извлекает один URL из очереди `_inbox` в Redis. Если очередь пуста, DAG завершается со статусом `skipped`, останавливая эту "линию" обработки.
- **`assign_account`**: Выбирает аккаунт для задачи. Он поддерживает **привязку аккаунта (affinity)**, повторно используя тот же аккаунт из предыдущего успешного запуска в своей "линии". Если это первый запуск или предыдущий был неудачным, он выбирает случайный активный аккаунт.
- **`get_token`**: Основная попытка получить токены и `info.json` путем вызова `ytdlp-ops-server`.
- **`handle_bannable_error_branch`**: Задача-развилка, которая запускается в случае сбоя `get_token`. Она анализирует ошибку и определяет следующий шаг на основе политики `on_bannable_failure`.
- **`ban_account_and_prepare_for_retry`**: Если разрешен повтор, эта задача банит сбойный аккаунт и выбирает новый.
- **`retry_get_token`**: Вторая попытка получить токен с использованием нового аккаунта.
- **`ban_second_account_and_proxy`**: Если и повторная попытка завершается неудачей, эта задача банит второй аккаунт и использованный прокси.
- **`download_and_probe`**: Если `get_token` или `retry_get_token` завершается успешно, эта задача использует `yt-dlp` для скачивания медиа и `ffmpeg` для проверки целостности файла.
- **`mark_url_as_success`**: Если `download_and_probe` завершается успешно, эта задача записывает успешный результат в хэш `_result` в Redis.
- **`handle_generic_failure`**: Если любая задача завершается с неисправимой ошибкой, эта задача записывает подробную информацию об ошибке в хэш `_fail` в Redis.
- **`decide_what_to_do_next`**: Финальная задача-развилка, которая решает, продолжать ли цикл (`trigger_self_run`), остановить его корректно (`stop_loop`) или пометить как сбойный (`fail_loop`).
- **`trigger_self_run`**: Задача, которая фактически запускает следующий экземпляр DAG, создавая непрерывный цикл.

View File

@ -1,407 +0,0 @@
Diff to getpot_bgutil_http
def _validate_get_pot(self, client: str, ydl: YoutubeDL, visitor_data=None, data_sync_id=None, player_url=None, **kwargs):
if client != 'ios':
raise UnsupportedRequest(f'Client {client} is not supported')
base_url = ydl.get_info_extractor('Youtube')._configuration_arg(
'getpot_bgutil_baseurl', ['http://127.0.0.1:4416'], casesense=True)[0]
# Validate visitor data format for ios client
if visitor_data and not visitor_data.startswith('Cg'):
raise UnsupportedRequest('Invalid visitor data format for ios client')
if not data_sync_id and not visitor_data:
raise UnsupportedRequest(
'One of [data_sync_id, visitor_data] must be passed')
>>>>>>> 559b875 (feat: Add support for pre-provided ios PO tokens and client-specific validation)
try:
self.logger.trace(
f'Checking server availability at {self._base_url}/ping')
response = json.load(self._request_webpage(Request(
f'{self._base_url}/ping', extensions={'timeout': self._GET_SERVER_VSN_TIMEOUT}, proxies={'all': None}),
note=False))
except TransportError as e:
# the server may be down
script_path_provided = self.ie._configuration_arg(
ie_key='youtubepot-bgutilscript', key='script_path', default=[None])[0] is not None
warning_base = f'Error reaching GET {self._base_url}/ping (caused by {e.__class__.__name__}). '
if script_path_provided: # server down is expected, log info
self._info_and_raise(
warning_base + 'This is expected if you are using the script method.')
else:
self._warn_and_raise(
warning_base + f'Please make sure that the server is reachable at {self._base_url}.')
return
except HTTPError as e:
# may be an old server, don't raise
self.logger.warning(
f'HTTP Error reaching GET /ping (caused by {e!r})', once=True)
return
except json.JSONDecodeError as e:
# invalid server
self._warn_and_raise(
f'Error parsing ping response JSON (caused by {e!r})')
return
except Exception as e:
self._warn_and_raise(
f'Unknown error reaching GET /ping (caused by {e!r})', raise_from=e)
return
else:
self._check_version(response.get('version', ''), name='HTTP server')
self._server_available = True
return True
finally:
self._last_server_check = time.time()
<<<<<<< HEAD
def is_available(self):
return self._server_available or self._last_server_check + 60 < int(time.time())
def _real_request_pot(
self,
request: PoTokenRequest,
) -> PoTokenResponse:
if not self._check_server_availability(request):
raise PoTokenProviderRejectedRequest(
f'{self.PROVIDER_NAME} server is not available')
# used for CI check
self.logger.trace('Generating POT via HTTP server')
=======
def _validate_get_pot(self, client: str, ydl: YoutubeDL, visitor_data=None, data_sync_id=None, player_url=None, **kwargs):
if client != 'ios':
raise UnsupportedRequest(f'Client {client} is not supported')
base_url = ydl.get_info_extractor('Youtube')._configuration_arg(
'getpot_bgutil_baseurl', ['http://127.0.0.1:4416'], casesense=True)[0]
# Validate visitor data format for ios client
if visitor_data and not visitor_data.startswith('Cg'):
raise UnsupportedRequest('Invalid visitor data format for ios client')
if not data_sync_id and not visitor_data:
raise UnsupportedRequest(
'One of [data_sync_id, visitor_data] must be passed')
>>>>>>> 559b875 (feat: Add support for pre-provided ios PO tokens and client-specific validation)
try:
self.logger.trace(
f'Checking server availability at {self._base_url}/ping')
response = json.load(self._request_webpage(Request(
f'{self._base_url}/ping', extensions={'timeout': self._GET_SERVER_VSN_TIMEOUT}, proxies={'all': None}),
note=False))
except TransportError as e:
# the server may be down
script_path_provided = self.ie._configuration_arg(
ie_key='youtubepot-bgutilscript', key='script_path', default=[None])[0] is not None
warning_base = f'Error reaching GET {self._base_url}/ping (caused by {e.__class__.__name__}). '
if script_path_provided: # server down is expected, log info
self._info_and_raise(
warning_base + 'This is expected if you are using the script method.')
else:
self._warn_and_raise(
warning_base + f'Please make sure that the server is reachable at {self._base_url}.')
return
except HTTPError as e:
# may be an old server, don't raise
self.logger.warning(
f'HTTP Error reaching GET /ping (caused by {e!r})', once=True)
return
except json.JSONDecodeError as e:
# invalid server
self._warn_and_raise(
f'Error parsing ping response JSON (caused by {e!r})')
return
except Exception as e:
self._warn_and_raise(
f'Unknown error reaching GET /ping (caused by {e!r})', raise_from=e)
return
else:
self._check_version(response.get('version', ''), name='HTTP server')
self._server_available = True
return True
finally:
self._last_server_check = time.time()
<<<<<<< HEAD
def is_available(self):
return self._server_available or self._last_server_check + 60 < int(time.time())
def _real_request_pot(
self,
request: PoTokenRequest,
) -> PoTokenResponse:
if not self._check_server_availability(request):
raise PoTokenProviderRejectedRequest(
f'{self.PROVIDER_NAME} server is not available')
# used for CI check
self.logger.trace('Generating POT via HTTP server')
=======
def _get_pot(self, client: str, ydl: YoutubeDL, visitor_data=None, data_sync_id=None, player_url=None, **kwargs) -> str:
# Check if we have a pre-provided token
if client == 'ios' and kwargs.get('po_token'):
self._logger.info('Using provided ios PO token')
return kwargs['po_token']
self._logger.info(f'Generating POT via HTTP server for {client} client')
if ((proxy := select_proxy('https://jnn-pa.googleapis.com', self.proxies))
!= select_proxy('https://youtube.com', self.proxies)):
self._logger.warning(
'Proxies for https://youtube.com and https://jnn-pa.googleapis.com are different. '
'This is likely to cause subsequent errors.')
>>>>>>> 559b875 (feat: Add support for pre-provided ios PO tokens and client-specific validation)
try:
response = self._request_webpage(
request=Request(
f'{self._base_url}/get_pot', data=json.dumps({
'content_binding': get_webpo_content_binding(request)[0],
'proxy': request.request_proxy,
'bypass_cache': request.bypass_cache,
'source_address': request.request_source_address,
'disable_tls_verification': not request.request_verify_tls,
}).encode(), headers={'Content-Type': 'application/json'},
extensions={'timeout': self._GETPOT_TIMEOUT}, proxies={'all': None}),
note=f'Generating a {request.context.value} PO Token for '
f'{request.internal_client_name} client via bgutil HTTP server',
)
except Exception as e:
raise PoTokenProviderError(
f'Error reaching POST /get_pot (caused by {e!r})') from e
try:
response_json = json.load(response)
except Exception as e:
raise PoTokenProviderError(
f'Error parsing response JSON (caused by {e!r}). response = {response.read().decode()}') from e
if error_msg := response_json.get('error'):
raise PoTokenProviderError(error_msg)
if 'poToken' not in response_json:
raise PoTokenProviderError(
f'Server did not respond with a poToken. Received response: {json.dumps(response_json)}')
po_token = response_json['poToken']
self.logger.trace(f'Generated POT: {po_token}')
return PoTokenResponse(po_token=po_token)
@register_preference(BgUtilHTTPPTP)
def bgutil_HTTP_getpot_preference(provider, request):
return 100
__all__ = [BgUtilHTTPPTP.__name__,
bgutil_HTTP_getpot_preference.__name__]
-------------------------
Diff to getpot_bgutil_script.py
from __future__ import annotations
import contextlib
import functools
import json
import os.path
import re
import shutil
import subprocess
from yt_dlp.extractor.youtube.pot.utils import get_webpo_content_binding
from yt_dlp.utils import Popen
with contextlib.suppress(ImportError):
from yt_dlp_plugins.extractor.getpot_bgutil import BgUtilPTPBase
from yt_dlp.extractor.youtube.pot.provider import (
PoTokenProviderError,
PoTokenRequest,
PoTokenResponse,
register_preference,
register_provider,
)
@register_provider
class BgUtilScriptPTP(BgUtilPTPBase):
PROVIDER_NAME = 'bgutil:script'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._check_script = functools.cache(self._check_script_impl)
@functools.cached_property
def _script_path(self):
script_path = self._configuration_arg(
'script_path', casesense=True, default=[None])[0]
if script_path:
return os.path.expandvars(script_path)
# check deprecated arg
deprecated_script_path = self.ie._configuration_arg(
ie_key='youtube', key='getpot_bgutil_script', default=[None])[0]
if deprecated_script_path:
self._warn_and_raise(
"'youtube:getpot_bgutil_script' extractor arg is deprecated, use 'youtubepot-bgutilscript:script_path' instead")
# default if no arg was passed
home = os.path.expanduser('~')
default_path = os.path.join(
home, 'bgutil-ytdlp-pot-provider', 'server', 'build', 'generate_once.js')
self.logger.debug(
f'No script path passed, defaulting to {default_path}')
return default_path
<<<<<<< HEAD
def is_available(self):
return self._check_script(self._script_path)
@functools.cached_property
def _node_path(self):
node_path = shutil.which('node')
if node_path is None:
self.logger.trace('node is not in PATH')
vsn = self._check_node_version(node_path)
if vsn:
self.logger.trace(f'Node version: {vsn}')
return node_path
def _check_script_impl(self, script_path):
=======
def _validate_get_pot(self, client: str, ydl: YoutubeDL, visitor_data=None, data_sync_id=None, player_url=None, **kwargs):
script_path = ydl.get_info_extractor('Youtube')._configuration_arg(
'getpot_bgutil_script', [self._default_script_path], casesense=True)[0]
# If a specific client is requested, validate it's supported
requested_client = ydl.params.get('extractor_args', {}).get('youtube', {}).get('formats')
if requested_client and client != requested_client:
raise UnsupportedRequest(f'Skipping {client} as {requested_client} was specifically requested')
if not data_sync_id and not visitor_data:
raise UnsupportedRequest(
'One of [data_sync_id, visitor_data] must be passed')
>>>>>>> 046a994 (refactor: support client-specific requests via extractor_args in POT providers)
if not os.path.isfile(script_path):
self.logger.debug(
f"Script path doesn't exist: {script_path}")
return False
if os.path.basename(script_path) != 'generate_once.js':
self.logger.warning(
'Incorrect script passed to extractor args. Path to generate_once.js required', once=True)
return False
node_path = self._node_path
if not node_path:
return False
stdout, stderr, returncode = Popen.run(
[self._node_path, script_path, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True,
timeout=self._GET_SERVER_VSN_TIMEOUT)
if returncode:
self.logger.warning(
f'Failed to check script version. '
f'Script returned {returncode} exit status. '
f'Script stdout: {stdout}; Script stderr: {stderr}',
once=True)
return False
else:
self._check_version(stdout.strip(), name='script')
return True
def _check_node_version(self, node_path):
try:
stdout, stderr, returncode = Popen.run(
[node_path, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True,
timeout=self._GET_SERVER_VSN_TIMEOUT)
stdout = stdout.strip()
mobj = re.match(r'v(\d+)\.(\d+)\.(\d+)', stdout)
if returncode or not mobj:
raise ValueError
node_vsn = tuple(map(int, mobj.groups()))
if node_vsn >= self._MIN_NODE_VSN:
return node_vsn
raise RuntimeError
except RuntimeError:
min_vsn_str = 'v' + '.'.join(str(v) for v in self._MIN_NODE_VSN)
self.logger.warning(
f'Node version too low. '
f'(got {stdout}, but at least {min_vsn_str} is required)')
except (subprocess.TimeoutExpired, ValueError):
self.logger.warning(
f'Failed to check node version. '
f'Node returned {returncode} exit status. '
f'Node stdout: {stdout}; Node stderr: {stderr}')
def _real_request_pot(
self,
request: PoTokenRequest,
) -> PoTokenResponse:
# used for CI check
self.logger.trace(
f'Generating POT via script: {self._script_path}')
command_args = [self._node_path, self._script_path]
if proxy := request.request_proxy:
command_args.extend(['-p', proxy])
command_args.extend(['-c', get_webpo_content_binding(request)[0]])
if request.bypass_cache:
command_args.append('--bypass-cache')
if request.request_source_address:
command_args.extend(
['--source-address', request.request_source_address])
if request.request_verify_tls is False:
command_args.append('--disable-tls-verification')
self.logger.info(
f'Generating a {request.context.value} PO Token for '
f'{request.internal_client_name} client via bgutil script',
)
self.logger.debug(
f'Executing command to get POT via script: {" ".join(command_args)}')
try:
stdout, stderr, returncode = Popen.run(
command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True,
timeout=self._GETPOT_TIMEOUT)
except subprocess.TimeoutExpired as e:
raise PoTokenProviderError(
f'_get_pot_via_script failed: Timeout expired when trying to run script (caused by {e!r})')
except Exception as e:
raise PoTokenProviderError(
f'_get_pot_via_script failed: Unable to run script (caused by {e!r})') from e
msg = f'stdout:\n{stdout.strip()}'
if stderr.strip(): # Empty strings are falsy
msg += f'\nstderr:\n{stderr.strip()}'
self.logger.trace(msg)
if returncode:
raise PoTokenProviderError(
f'_get_pot_via_script failed with returncode {returncode}')
try:
# The JSON response is always the last line
script_data_resp = json.loads(stdout.splitlines()[-1])
except json.JSONDecodeError as e:
raise PoTokenProviderError(
f'Error parsing JSON response from _get_pot_via_script (caused by {e!r})') from e
if 'poToken' not in script_data_resp:
raise PoTokenProviderError(
'The script did not respond with a po_token')
return PoTokenResponse(po_token=script_data_resp['poToken'])
@register_preference(BgUtilScriptPTP)
def bgutil_script_getpot_preference(provider, request):
return 1
__all__ = [BgUtilScriptPTP.__name__,
bgutil_script_getpot_preference.__name__]

View File

@ -28,6 +28,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libnss3 libnspr4 libdbus-1-3 libatk1.0-0 libatk-bridge2.0-0 libcups2 libdrm2 libxkbcommon0 libxcomposite1 libxdamage1 libxfixes3 libxrandr2 libgbm1 libpango-1.0-0 libcairo2 libasound2 \
libgtk-3-0 libx11-xcb1 fonts-liberation tzdata \
xauth util-linux x11-xserver-utils \
curl \
&& \
# Configure timezone
ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime && \

View File

@ -80,7 +80,7 @@ def monitor_resources(server_ports, proxy_url):
process_cpu = current_process.cpu_percent()
# Update active connections using psutil
all_connections = current_process.net_connections(kind='inet')
all_connections = psutil.net_connections(kind='inet')
new_active_connections = defaultdict(int)
for conn in all_connections:
if conn.status == psutil.CONN_ESTABLISHED and conn.laddr.port in server_ports:

BIN
airflow/config/.DS_Store vendored Normal file

Binary file not shown.

3167
airflow/config/airflow.cfg Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,4 @@
# This file should be generated from ansible/templates/.env.ytdlp.j2
# Do not edit manually - your changes will be overwritten.
#
# To generate this file, run the Ansible playbook that processes the templates.

View File

@ -0,0 +1,4 @@
# This file should be generated from ansible/templates/.env.ytdlp.j2
# Do not edit manually - your changes will be overwritten.
#
# To generate this file, run the Ansible playbook that processes the templates.

View File

@ -28,8 +28,8 @@ x-airflow-common:
environment:
&airflow-common-env
AIRFLOW__CORE__PARALLELISM: 64
AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG: 32
AIRFLOW__CORE__PARALLELISM: 128
AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG: 64
AIRFLOW__SCHEDULER__PARSING_PROCESSES: 4
AIRFLOW__WEBSERVER__WORKERS: 5
AIRFLOW__WEBSERVER__WORKER_CLASS: "gevent"
@ -49,8 +49,8 @@ x-airflow-common:
# Backend connections - These should point to the master node
# Set MASTER_HOST_IP, POSTGRES_PASSWORD, and REDIS_PASSWORD in your .env file
AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:${{ '{' }}POSTGRES_PASSWORD{{ '}' }}@${{ '{' }}MASTER_HOST_IP{{ '}' }}:{{ postgres_port }}/airflow
IRFLOW__CELERY__RESULT_BACKEND: db+postgresql+psycopg2://airflow:${{ '{' }}POSTGRES_PASSWORD{{ '}' }}@${{ '{' }}MASTER_HOST_IP{{ '}' }}:{{ postgres_port }}/airflow
AIRFLOW__CELERY__BROKER_URL: redis://:${REDIS_PASSWORD}@${MASTER_HOST_IP}:52909/0
AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql+psycopg2://airflow:${{ '{' }}POSTGRES_PASSWORD{{ '}' }}@${{ '{' }}MASTER_HOST_IP{{ '}' }}:{{ postgres_port }}/airflow
AIRFLOW__CELERY__BROKER_URL: redis://:${REDIS_PASSWORD}@${MASTER_HOST_IP}:{{ redis_port }}/0
# Remote Logging - connection is configured directly via environment variables
#_PIP_ADDITIONAL_REQUIREMENTS: ${{ '{' }}_PIP_ADDITIONAL_REQUIREMENTS:- apache-airflow-providers-docker apache-airflow-providers-http thrift>=0.16.0,<=0.20.0 backoff>=2.2.1 python-dotenv==1.0.1 psutil>=5.9.0 apache-airflow-providers-amazon{{ '}' }}
@ -75,8 +75,8 @@ x-airflow-common:
- ${AIRFLOW_PROJ_DIR:-.}/downloadfiles:/opt/airflow/downloadfiles
- ${AIRFLOW_PROJ_DIR:-.}/addfiles:/opt/airflow/addfiles
- ${AIRFLOW_PROJ_DIR:-.}/inputfiles:/opt/airflow/inputfiles
# Use AIRFLOW_UID from .env file to fix permission issues.
user: "${AIRFLOW_UID:-50000}"
# Use AIRFLOW_UID from .env file to fix permission issues. GID is set to 0 for compatibility with the Airflow image.
user: "${{ '{' }}AIRFLOW_UID:-50000{{ '}' }}:0"
services:
airflow-worker:
@ -108,7 +108,9 @@ services:
AIRFLOW__CELERY__WORKER_QUEUES: "queue-dl,queue-dl-${HOSTNAME:-dl001}"
AIRFLOW__CELERY__WORKER_TAGS: "dl"
AIRFLOW__CELERY__WORKER_PREFETCH_MULTIPLIER: "1"
AIRFLOW__CELERY__WORKER_CONCURRENCY: ${AIRFLOW_WORKER_DOWNLOAD_CONCURRENCY:-16}
# Use autoscaling to adjust number of workers based on load.
# Format is max_concurrency,min_concurrency.
AIRFLOW__CELERY__WORKER_AUTOSCALE: "16,4"
# Use prefork pool for better compatibility with blocking libraries.
AIRFLOW__CELERY__POOL: "prefork"
AIRFLOW__CELERY__TASK_ACKS_LATE: "False"

View File

@ -66,10 +66,10 @@ x-airflow-common:
- proxynet
environment:
&airflow-common-env
AIRFLOW__CORE__PARALLELISM: 64
AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG: 32
AIRFLOW__CORE__PARALLELISM: 128
AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG: 64
AIRFLOW__SCHEDULER__PARSING_PROCESSES: 4
AIRFLOW__WEBSERVER__WORKER_CLASS: gevent
AIRFLOW__WEBSERVER__WORKER_CLASS: sync
AIRFLOW__WEBSERVER__WORKERS: 8
AIRFLOW__LOGGING__SECRET_MASK_EXCEPTION_ARGS: 'false'
@ -101,7 +101,7 @@ x-airflow-common:
AIRFLOW__LOGGING__REMOTE_LOGGING: 'true'
AIRFLOW__LOGGING__REMOTE_BASE_LOG_FOLDER: "s3://airflow-logs"
AIRFLOW__LOGGING__REMOTE_LOG_CONN_ID: minio_default
AIRFLOW__LOGGING__ENCRYPT_S3_LOGS: 'false'
AIRROW__LOGGING__ENCRYPT_S3_LOGS: 'false'
AIRFLOW__CORE__LOCAL_SETTINGS_PATH: "/opt/airflow/config/custom_task_hooks.py"
volumes:
- ${{ '{' }}AIRFLOW_PROJ_DIR:-.{{ '}' }}/dags:/opt/airflow/dags
@ -310,17 +310,17 @@ services:
ports:
- "8080:8080"
depends_on:
- airflow-webserver
airflow-webserver:
condition: service_started
restart: always
airflow-webserver:
<<: *airflow-common
command: webserver
expose:
- "8080"
environment:
<<: *airflow-common-env
# Trigger gevent monkeypatching for webserver.
# See: https://github.com/apache/airflow/pull/28283
_AIRFLOW_PATCH_GEVENT: "1"
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8080/health"]
interval: 30s
@ -511,14 +511,14 @@ services:
- -c
- airflow
# You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up
# or by explicitly targeted on the command line e.g. docker-compose up flower.
# See: https://docs.docker.com/compose/profiles/
flower:
<<: *airflow-common
command: celery flower
ports:
- "5555:5555"
environment:
<<: *airflow-common-env
FLOWER_BASIC_AUTH: "flower:${{ '{' }}FLOWER_PASSWORD{{ '}' }}"
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:5555/"]
interval: 30s
@ -530,8 +530,6 @@ services:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
profiles:
- flower
docker-socket-proxy:
profiles:

View File

@ -4,17 +4,21 @@ include:
# This automatically includes the generated camoufox service definitions and dependencies.
# It simplifies the docker-compose command, as you no longer need to specify both files with -f.
# The file is generated by the config-generator service and will be created even if empty.
- docker-compose.camoufox.yaml
- ./configs/docker-compose.camoufox.yaml
{% endif %}
services:
envoy:
image: envoyproxy/envoy:v1.29-latest
{% if service_role != 'management' %}
container_name: envoy-thrift-lb-${HOSTNAME}
{% else %}
container_name: envoy-thrift-lb
{% endif %}
restart: unless-stopped
volumes:
# Mount the generated config file from the host
- ./envoy.yaml:/etc/envoy/envoy.yaml:ro
- ./configs/envoy.yaml:/etc/envoy/envoy.yaml:ro
ports:
# This is the single public port for all Thrift traffic
- "${ENVOY_PORT:-9080}:${ENVOY_PORT:-9080}"
@ -37,12 +41,12 @@ services:
{% endif %}
# Ports are no longer exposed directly. Envoy will connect to them on the internal network.
env_file:
- ./.env # Path is relative to the compose file location (configs directory)
- ./.env # Path is relative to the project directory
volumes:
- context-data:/app/context-data
{% if service_role != 'management' %}
# Mount the generated endpoints file to make it available to the server
- ../camoufox/camoufox_endpoints.json:/app/config/camoufox_endpoints.json:ro
- ./configs/camoufox_endpoints.json:/app/config/camoufox_endpoints.json:ro
{% endif %}
# Mount the plugin source code for live updates without rebuilding the image.
# Assumes the plugin source is in a 'bgutil-ytdlp-pot-provider' directory
@ -95,8 +99,6 @@ services:
volumes:
context-data:
name: context-data
external: true
{% if service_role == 'management' or not camoufox_proxies %}
networks:

View File

@ -5,16 +5,17 @@
# and adds the necessary dependencies to the main services.
services:
{% for proxy in camoufox_proxies %}
{% set proxy_port = _get_port_from_proxy_url(proxy.url) | int %}
{% set container_base_port = camoufox_port + loop.index0 * worker_count %}
{% set host_base_port = container_base_port %}
camoufox-{{ loop.index }}:
camoufox-{{ proxy_port }}-{{ loop.index }}:
build:
context: ../camoufox
dockerfile: Dockerfile
args:
VNC_PASSWORD: "{{ vnc_password }}"
image: camoufox:latest
container_name: ytdlp-ops-camoufox-{{ loop.index }}-1
container_name: ytdlp-ops-camoufox-{{ proxy_port }}-{{ loop.index }}-1
restart: unless-stopped
shm_size: '2gb' # Mitigates browser crashes due to shared memory limitations
ports:
@ -27,7 +28,7 @@ services:
- CAMOUFOX_RESTART_THRESHOLD_MB=1500
volumes:
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- camoufox-data-{{ loop.index }}:/app/context-data
- camoufox-data-{{ proxy_port }}-{{ loop.index }}:/app/context-data
- camoufox-browser-cache:/root/.cache/ms-playwright # Persist browser binaries
command: [
"--ws-host", "0.0.0.0",
@ -62,7 +63,8 @@ services:
restart: "no"
depends_on:
{% for proxy in camoufox_proxies %}
- camoufox-{{ loop.index }}
{% set proxy_port = _get_port_from_proxy_url(proxy.url) | int %}
- camoufox-{{ proxy_port }}-{{ loop.index }}
{% endfor %}
networks:
- proxynet
@ -70,7 +72,8 @@ services:
volumes:
{% for proxy in camoufox_proxies %}
camoufox-data-{{ loop.index }}:
{% set proxy_port = _get_port_from_proxy_url(proxy.url) | int %}
camoufox-data-{{ proxy_port }}-{{ loop.index }}:
{% endfor %}
{% if camoufox_proxies %}
camoufox-browser-cache:

View File

@ -1,6 +1,5 @@
# This file is used to generate the necessary configuration files for the main application stack.
# It should be run as a one-off command before starting the main services.
# Example: docker-compose -f airflow/docker-compose.config-generate.yaml run --rm config-generator
version: '3.8'
services:
config-generator:
image: python:3.12-slim
@ -9,6 +8,6 @@ services:
- ./.env
volumes:
# Mount the entire project directory to access scripts and write output files
- ./:/app
- ../:/app
command: >
sh -c "pip install jinja2 && python3 /app/generate_envoy_config.py"

BIN
airflow/dags/.DS_Store vendored Normal file

Binary file not shown.

88
airflow/dags/README.ru.md Normal file
View File

@ -0,0 +1,88 @@
# Архитектура и описание YTDLP Airflow DAGs
Этот документ описывает архитектуру и назначение DAG'ов, используемых для скачивания видео с YouTube. Система построена на модели непрерывного, самоподдерживающегося цикла для параллельной и отказоустойчивой обработки.
## Основной цикл обработки
Обработка выполняется двумя основными DAG'ами, которые работают в паре: оркестратор и воркер.
### `ytdlp_ops_orchestrator` (Система "зажигания")
- **Назначение:** Этот DAG действует как "система зажигания" для запуска обработки. Он запускается вручную для старта указанного количества параллельных циклов-воркеров.
- **Принцип работы:**
- Он **не** обрабатывает URL-адреса самостоятельно.
- Его единственная задача — запустить сконфигурированное количество DAG'ов `ytdlp_ops_worker_per_url`.
- Он передает всю необходимую конфигурацию (пул аккаунтов, подключение к Redis и т.д.) воркерам.
### `ytdlp_ops_worker_per_url` (Самоподдерживающийся воркер)
- **Назначение:** Этот DAG обрабатывает один URL и спроектирован для работы в непрерывном цикле.
- **Принцип работы:**
1. **Запуск:** Начальный запуск инициируется `ytdlp_ops_orchestrator`.
2. **Получение задачи:** Воркер извлекает один URL из очереди `_inbox` в Redis. Если очередь пуста, выполнение воркера завершается, и его "линия" обработки останавливается.
3. **Обработка:** Он взаимодействует с сервисом `ytdlp-ops-server` для получения `info.json` и прокси, после чего скачивает видео.
4. **Продолжение или остановка:**
- **В случае успеха:** Он запускает новый экземпляр самого себя, создавая непрерывный цикл для обработки следующего URL.
- **В случае сбоя:** Цикл прерывается (если `stop_on_failure` установлено в `True`), останавливая эту "линию" обработки. Это предотвращает остановку всей системы из-за одного проблемного URL или аккаунта.
## Управляющие DAG'и
### `ytdlp_mgmt_proxy_account`
- **Назначение:** Это основной инструмент для мониторинга и управления состоянием ресурсов, используемых `ytdlp-ops-server`.
- **Функциональность:**
- **Просмотр статусов:** Позволяет увидеть текущий статус всех прокси и аккаунтов (например, `ACTIVE`, `BANNED`, `RESTING`).
- **Управление прокси:** Позволяет вручную банить, разбанивать или сбрасывать статус прокси.
- **Управление аккаунтами:** Позволяет вручную банить или разбанивать аккаунты.
### `ytdlp_mgmt_queues`
- **Назначение:** Предоставляет набор инструментов для управления очередями Redis, используемыми в конвейере обработки.
- **Функциональность (через параметр `action`):**
- `add_videos`: Добавление одного или нескольких URL-адресов YouTube в очередь.
- `clear_queue`: Очистка (удаление) указанного ключа Redis.
- `list_contents`: Просмотр содержимого ключа Redis (списка или хэша).
- `check_status`: Проверка общего состояния очередей (тип, размер).
- `requeue_failed`: Перемещение всех URL-адресов из очереди сбоев `_fail` обратно в очередь `_inbox` для повторной обработки.
## Стратегия управления ресурсами (Прокси и Аккаунты)
Система использует интеллектуальную стратегию для управления жизненным циклом и состоянием аккаунтов и прокси, чтобы максимизировать процент успеха и минимизировать блокировки.
- **Жизненный цикл аккаунта ("Cooldown"):**
- Чтобы предотвратить "выгорание", аккаунты автоматически переходят в состояние "отдыха" (`RESTING`) после периода интенсивного использования.
- По истечении периода отдыха они автоматически возвращаются в `ACTIVE` и снова становятся доступными для воркеров.
- **Умная стратегия банов:**
- **Сначала бан аккаунта:** При возникновении серьезной ошибки (например, `BOT_DETECTED`) система наказывает **только аккаунт**, который вызвал сбой. Прокси при этом продолжает работать.
- **Бан прокси по "скользящему окну":** Прокси банится автоматически, только если он демонстрирует **систематические сбои с РАЗНЫМИ аккаунтами** за короткий промежуток времени. Это является надежным индикатором того, что проблема именно в прокси.
- **Мониторинг:**
- DAG `ytdlp_mgmt_proxy_account` является основным инструментом для мониторинга. Он показывает текущий статус всех ресурсов, включая время, оставшееся до активации забаненных или отдыхающих аккаунтов.
- Граф выполнения DAG `ytdlp_ops_worker_per_url` теперь явно показывает шаги, такие как `assign_account`, `get_token`, `ban_account`, `retry_get_token`, что делает процесс отладки более наглядным.
## Внешние сервисы
### `ytdlp-ops-server` (Thrift Service)
- **Назначение:** Внешний сервис, который предоставляет аутентификационные данные (токены, cookies, proxy) для скачивания видео.
- **Взаимодействие:** Worker DAG (`ytdlp_ops_worker_per_url`) обращается к этому сервису перед началом загрузки для получения необходимых данных для `yt-dlp`.
## Логика работы Worker DAG (`ytdlp_ops_worker_per_url`)
Этот DAG является "рабочей лошадкой" системы. Он спроектирован как самоподдерживающийся цикл для обработки одного URL за запуск.
### Задачи и их назначение:
- **`pull_url_from_redis`**: Извлекает один URL из очереди `_inbox` в Redis. Если очередь пуста, DAG завершается со статусом `skipped`, останавливая эту "линию" обработки.
- **`assign_account`**: Выбирает аккаунт для выполнения задачи. Он будет повторно использовать тот же аккаунт, который был успешно использован в предыдущем запуске в своей "линии" (привязка аккаунта). Если это первый запуск, он выбирает случайный аккаунт.
- **`get_token`**: Основная задача. Она обращается к `ytdlp-ops-server` для получения `info.json`.
- **`handle_bannable_error_branch`**: Если `get_token` завершается с ошибкой, требующей бана, эта задача-развилка решает, что делать дальше, в зависимости от политики `on_bannable_failure`.
- **`ban_account_and_prepare_for_retry`**: Если политика разрешает повтор, эта задача банит сбойный аккаунт и выбирает новый для повторной попытки.
- **`retry_get_token`**: Выполняет вторую попытку получить токен с новым аккаунтом.
- **`ban_second_account_and_proxy`**: Если и вторая попытка неудачна, эта задача банит второй аккаунт и использованный прокси.
- **`download_and_probe`**: Если `get_token` (или `retry_get_token`) завершилась успешно, эта задача использует `yt-dlp` для скачивания медиа и `ffmpeg` для проверки целостности скачанного файла.
- **`mark_url_as_success`**: Если `download_and_probe` завершилась успешно, эта задача записывает результат в хэш `_result` в Redis.
- **`handle_generic_failure`**: Если любая из основных задач завершается с неисправимой ошибкой, эта задача записывает подробную информацию об ошибке в хэш `_fail` в Redis.
- **`decide_what_to_do_next`**: Задача-развилка, которая запускается после успеха или неудачи. Она решает, продолжать ли цикл.
- **`trigger_self_run`**: Задача, которая фактически запускает следующий экземпляр DAG, создавая непрерывный цикл.

View File

@ -634,7 +634,7 @@ with DAG(
),
# --- Params for 'list_contents' ---
"queue_to_list": Param(
'video_queue_inbox,video_queue_fail',
'video_queue_inbox,video_queue_result,video_queue_fail',
type="string",
title="[list_contents] Queues to List",
description="Comma-separated list of exact Redis key names to list.",

View File

@ -238,9 +238,15 @@ def handle_bannable_error_branch(task_id_to_check: str, **context):
logger.error(f"Task {task_id_to_check} failed without error details. Marking as fatal.")
return 'handle_fatal_error'
error_message = error_details.get('error_message', '').strip()
error_code = error_details.get('error_code', '').strip()
policy = params.get('on_bannable_failure', 'retry_with_new_account')
# Check if this is an age confirmation error - should not stop the loop
if "Sign in to confirm your age" in error_message or "confirm your age" in error_message.lower():
logger.info(f"Age confirmation error detected for '{task_id_to_check}'. This is a content restriction, not a bot detection issue.")
return 'handle_age_restriction_error'
# Fatal Thrift connection errors that should stop all processing.
if error_code == 'TRANSPORT_ERROR':
logger.error(f"Fatal Thrift connection error from '{task_id_to_check}'. Stopping processing.")
@ -574,8 +580,15 @@ def report_failure_and_continue(**context):
try:
client = _get_redis_client(params['redis_conn_id'])
client.hset(f"{params['queue_name']}_result", url, json.dumps(result_data))
logger.info(f"Stored failure result for URL '{url}'.")
result_queue = f"{params['queue_name']}_result"
fail_queue = f"{params['queue_name']}_fail"
with client.pipeline() as pipe:
pipe.hset(result_queue, url, json.dumps(result_data))
pipe.hset(fail_queue, url, json.dumps(result_data))
pipe.execute()
logger.info(f"Stored failure result for URL '{url}' in '{result_queue}' and '{fail_queue}'.")
except Exception as e:
logger.error(f"Could not report failure to Redis: {e}", exc_info=True)
@ -610,8 +623,8 @@ def handle_fatal_error(**context):
# Report failure to Redis so the URL can be reprocessed later
try:
result_data = {
'status': 'failed',
'end_time': time.time(),
'status': 'failed',
'end_time': time.time(),
'url': url,
'dag_run_id': context['dag_run'].run_id,
'error': 'fatal_error',
@ -619,8 +632,15 @@ def handle_fatal_error(**context):
'error_details': error_details
}
client = _get_redis_client(params['redis_conn_id'])
client.hset(f"{params['queue_name']}_result", url, json.dumps(result_data))
logger.info(f"Stored fatal error result for URL '{url}' in Redis for later reprocessing.")
result_queue = f"{params['queue_name']}_result"
fail_queue = f"{params['queue_name']}_fail"
with client.pipeline() as pipe:
pipe.hset(result_queue, url, json.dumps(result_data))
pipe.hset(fail_queue, url, json.dumps(result_data))
pipe.execute()
logger.info(f"Stored fatal error result for URL '{url}' in '{result_queue}' and '{fail_queue}' for later reprocessing.")
except Exception as e:
logger.error(f"Could not report fatal error to Redis: {e}", exc_info=True)
@ -669,8 +689,14 @@ def handle_retry_failure_branch(task_id_to_check: str, **context):
if not error_details:
return 'handle_fatal_error'
error_message = error_details.get('error_message', '').strip()
error_code = error_details.get('error_code', '').strip()
# Check if this is an age confirmation error - should not stop the loop
if "Sign in to confirm your age" in error_message or "confirm your age" in error_message.lower():
logger.info(f"Age confirmation error detected on retry from '{task_id_to_check}'. Reporting failure and continuing loop.")
return 'report_failure_and_continue'
if error_code == 'TRANSPORT_ERROR':
logger.error(f"Fatal Thrift connection error on retry from '{task_id_to_check}'.")
return 'handle_fatal_error'
@ -715,6 +741,61 @@ def coalesce_token_data(get_token_result=None, retry_get_token_result=None):
# This should not be reached if trigger_rule='one_success' is working correctly.
raise AirflowException("Could not find a successful token result from any attempt.")
@task(trigger_rule='one_failed')
def handle_age_restriction_error(**context):
"""
Handles age restriction errors specifically. These are content restrictions
that cannot be bypassed by using different accounts, so we report the failure
and continue the processing loop rather than stopping it.
"""
params = context['params']
ti = context['task_instance']
url = params.get('url_to_process', 'unknown')
# Collect error details
error_details = {}
first_token_task_id = 'get_token'
retry_token_task_id = 'retry_get_token'
first_token_error = ti.xcom_pull(task_ids=first_token_task_id, key='error_details')
retry_token_error = ti.xcom_pull(task_ids=retry_token_task_id, key='error_details')
# Use the most recent error details
if retry_token_error:
error_details = retry_token_error
elif first_token_error:
error_details = first_token_error
logger.error(f"Age restriction error for URL '{url}'. This content requires age confirmation and cannot be bypassed.")
# Report failure to Redis so the URL can be marked as failed
try:
result_data = {
'status': 'failed',
'end_time': time.time(),
'url': url,
'dag_run_id': context['dag_run'].run_id,
'error': 'age_restriction',
'error_message': 'Content requires age confirmation',
'error_details': error_details
}
client = _get_redis_client(params['redis_conn_id'])
result_queue = f"{params['queue_name']}_result"
fail_queue = f"{params['queue_name']}_fail"
with client.pipeline() as pipe:
pipe.hset(result_queue, url, json.dumps(result_data))
pipe.hset(fail_queue, url, json.dumps(result_data))
pipe.execute()
logger.info(f"Stored age restriction error for URL '{url}' in '{result_queue}' and '{fail_queue}'.")
except Exception as e:
logger.error(f"Could not report age restriction error to Redis: {e}", exc_info=True)
# This is NOT a fatal error for the processing loop - we just continue with the next URL
# =============================================================================
# DAG Definition with TaskGroups
# =============================================================================
@ -755,6 +836,7 @@ with DAG(
fatal_error_task = handle_fatal_error()
report_failure_task = report_failure_and_continue()
continue_loop_task = continue_processing_loop()
age_restriction_task = handle_age_restriction_error()
# --- Task Group 1: Initial Attempt ---
with TaskGroup("initial_attempt", tooltip="Initial token acquisition attempt") as initial_attempt_group:
@ -770,7 +852,7 @@ with DAG(
)
first_token_attempt >> initial_branch_task
initial_branch_task >> [fatal_error_task, ban_and_report_immediately_task]
initial_branch_task >> [fatal_error_task, ban_and_report_immediately_task, age_restriction_task]
# --- Task Group 2: Retry Logic ---
with TaskGroup("retry_logic", tooltip="Retry logic with account management") as retry_logic_group:
@ -820,7 +902,7 @@ with DAG(
direct_retry_account_task >> coalesced_retry_data
coalesced_retry_data >> retry_token_task
retry_token_task >> retry_branch_task
retry_branch_task >> [fatal_error_task, report_failure_task, ban_after_retry_report_task]
retry_branch_task >> [fatal_error_task, report_failure_task, ban_after_retry_report_task, age_restriction_task]
ban_after_retry_report_task >> report_failure_task
# --- Task Group 3: Download and Processing ---
@ -849,10 +931,13 @@ with DAG(
# --- DAG Dependencies between TaskGroups ---
# Initial attempt can lead to retry logic or direct failure
initial_branch_task >> [retry_logic_group, fatal_error_task, ban_and_report_immediately_task]
initial_branch_task >> [retry_logic_group, fatal_error_task, ban_and_report_immediately_task, age_restriction_task]
# Retry logic leads to download processing on success or failure reporting on failure
retry_branch_task >> [download_processing_group, report_failure_task]
# Ban and report immediately leads to failure reporting
ban_and_report_immediately_task >> report_failure_task
# Age restriction error leads to failure reporting and continues the loop
age_restriction_task >> continue_loop_task

View File

@ -113,6 +113,8 @@ def generate_configs():
# The templates are in the 'configs' directory.
env = Environment(loader=FileSystemLoader(configs_dir), trim_blocks=True, lstrip_blocks=True)
# Make the helper function available to Jinja2 templates
env.globals['_get_port_from_proxy_url'] = _get_port_from_proxy_url
# Get service role from environment to determine what to generate
service_role = os.getenv('SERVICE_ROLE', 'all-in-one')
@ -165,11 +167,14 @@ def generate_configs():
for i, proxy in enumerate(camoufox_proxies):
proxy_port = _get_port_from_proxy_url(proxy['url'])
if proxy_port:
# Use the correct container name pattern that matches the docker-compose template
# The container name in the template is: ytdlp-ops-camoufox-{{ proxy_port }}-{{ loop.index }}-1
container_name = f"ytdlp-ops-camoufox-{proxy_port}-{i+1}-1"
container_base_port = camoufox_port + i * worker_count
endpoints = []
for j in range(worker_count):
port = container_base_port + j
endpoints.append(f"ws://{camoufox_backend_prefix}{i+1}:{port}/mypath")
endpoints.append(f"ws://{container_name}:{port}/mypath")
endpoints_map[proxy_port] = {
"ws_endpoints": endpoints

View File

@ -1,9 +0,0 @@
thrift>=0.16.0,<=0.20.0
backoff>=2.2.1
python-dotenv==1.0.1
psutil>=5.9.0
docker>=6.0.0
apache-airflow-providers-docker
redis
ffprobe3
ffmpeg-python

View File

@ -9,85 +9,222 @@
path: "{{ airflow_master_dir }}"
state: directory
owner: "{{ ssh_user }}"
group: ytdl
group: "{{ deploy_group }}"
mode: '0755'
become: yes
when: not master_dir_stat.stat.exists
- name: Ensure Airflow master configs directory exists
file:
path: "{{ airflow_master_dir }}/configs"
state: directory
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
mode: '0755'
become: yes
- name: Ensure Airflow master config directory exists
file:
path: "{{ airflow_master_dir }}/config"
state: directory
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
mode: '0755'
become: yes
- name: Ensure Airflow operational directories exist with correct permissions
file:
path: "{{ airflow_master_dir }}/{{ item }}"
state: directory
owner: "{{ airflow_uid }}"
group: "{{ deploy_group }}"
mode: '0775'
become: yes
loop:
- "dags"
- "logs"
- "plugins"
- "downloadfiles"
- "addfiles"
- "inputfiles"
- name: Check if source directories exist
stat:
path: "{{ playbook_dir }}/../{{ item }}"
path: "../{{ item }}"
register: source_dirs
loop:
- "airflow/inputfiles"
- "airflow/plugins"
- "airflow/addfiles"
- "airflow/bgutil-ytdlp-pot-provider"
- name: "Log: Syncing Airflow core files"
debug:
msg: "Syncing DAGs, configs, and Python source code to the master node."
- name: Sync Airflow master files
synchronize:
src: "{{ playbook_dir }}/../{{ item }}"
src: "../{{ item }}"
dest: "{{ airflow_master_dir }}/"
archive: yes
recursive: yes
delete: yes
rsync_path: "sudo rsync"
rsync_opts: "{{ rsync_default_opts }}"
loop:
- "airflow/Dockerfile"
- "airflow/docker-compose-master.yaml"
- "airflow/dags/"
- "airflow/config/"
- "airflow/Dockerfile.caddy"
- "airflow/.dockerignore"
- "airflow/dags"
- "airflow/inputfiles"
- "setup.py"
- "yt_ops_services/"
- "thrift_model/"
- "yt_ops_services"
- "thrift_model"
- "VERSION"
- "airflow/init-airflow.sh"
- "airflow/nginx.conf"
- "airflow/update-yt-dlp.sh"
- "get_info_json_client.py"
- "proxy_manager_client.py"
- "utils"
- name: Copy custom Python config files to master
copy:
src: "../airflow/config/{{ item }}"
dest: "{{ airflow_master_dir }}/config/{{ item }}"
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
mode: '0644'
become: yes
loop:
- "custom_task_hooks.py"
- "airflow_local_settings.py"
- name: Ensure any existing airflow.cfg directory is removed
file:
path: "{{ airflow_master_dir }}/config/airflow.cfg"
state: absent
become: yes
ignore_errors: yes
- name: Copy airflow.cfg to master
copy:
src: "../airflow/airflow.cfg"
dest: "{{ airflow_master_dir }}/config/airflow.cfg"
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
mode: '0644'
become: yes
- name: Sync Airflow master config files
synchronize:
src: "../airflow/configs/{{ item }}"
dest: "{{ airflow_master_dir }}/configs/"
archive: yes
recursive: yes
rsync_path: "sudo rsync"
rsync_opts: "{{ rsync_default_opts }}"
loop:
- "nginx.conf"
- "Caddyfile"
- name: Sync optional directories if they exist
synchronize:
src: "{{ playbook_dir }}/../{{ item }}/"
dest: "{{ airflow_master_dir }}/{{ item | basename }}/"
src: "../{{ item.item }}/"
dest: "{{ airflow_master_dir }}/{{ item.item | basename }}/"
archive: yes
recursive: yes
delete: yes
rsync_path: "sudo rsync"
rsync_opts: "{{ rsync_default_opts }}"
loop:
- "airflow/inputfiles"
- "airflow/plugins"
- "airflow/addfiles"
- "airflow/bgutil-ytdlp-pot-provider"
when: source_dirs.results | selectattr('item', 'equalto', item) | map(attribute='stat.exists') | first
loop: "{{ source_dirs.results }}"
when: item.stat.exists
- name: Sync pangramia thrift files
synchronize:
src: "{{ playbook_dir }}/../thrift_model/gen_py/pangramia/"
src: "../thrift_model/gen_py/pangramia/"
dest: "{{ airflow_master_dir }}/pangramia/"
archive: yes
recursive: yes
delete: yes
rsync_path: "sudo rsync"
rsync_opts: "{{ rsync_default_opts }}"
- name: Create .env file for Airflow master service
- name: Template docker-compose file for master
template:
src: "../../templates/.env.master.j2"
dest: "{{ airflow_master_dir }}/.env"
src: "{{ playbook_dir }}/../airflow/configs/docker-compose-master.yaml.j2"
dest: "{{ airflow_master_dir }}/configs/docker-compose-master.yaml"
mode: "{{ file_permissions }}"
owner: "{{ ssh_user }}"
group: ytdl
vars:
service_role: "master"
group: "{{ deploy_group }}"
become: yes
- name: Template Redis connection file
template:
src: "../airflow/config/redis_default_conn.json.j2"
dest: "{{ airflow_master_dir }}/config/redis_default_conn.json"
mode: "{{ file_permissions }}"
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
become: yes
- name: Template Minio connection file for master
template:
src: "../airflow/config/minio_default_conn.json.j2"
dest: "{{ airflow_master_dir }}/config/minio_default_conn.json"
mode: "{{ file_permissions }}"
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
become: yes
- name: Ensure config directory is group-writable for Airflow initialization
file:
path: "{{ airflow_master_dir }}/config"
state: directory
mode: '0775'
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
become: yes
- name: Ensure airflow.cfg is group-writable for Airflow initialization
file:
path: "{{ airflow_master_dir }}/config/airflow.cfg"
state: file
mode: '0664'
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
become: yes
- name: Create symlink for docker-compose.yaml
file:
src: "{{ airflow_master_dir }}/docker-compose-master.yaml"
src: "{{ airflow_master_dir }}/configs/docker-compose-master.yaml"
dest: "{{ airflow_master_dir }}/docker-compose.yaml"
state: link
owner: "{{ ssh_user }}"
group: ytdl
group: "{{ deploy_group }}"
force: yes
follow: no
- name: Ensure correct permissions for build context
file:
path: "{{ airflow_master_dir }}"
state: directory
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
recurse: yes
become: yes
- name: Ensure postgres-data directory exists on master and has correct permissions
file:
path: "{{ airflow_master_dir }}/postgres-data"
state: directory
owner: "999" # UID for the 'postgres' user in the official postgres image
group: "999" # GID for the 'postgres' group in the official postgres image
mode: '0700'
become: yes
- name: Set group-writable and setgid permissions on master logs directory contents
shell: |
find {{ airflow_master_dir }}/logs -type d -exec chmod g+rws {} +
find {{ airflow_master_dir }}/logs -type f -exec chmod g+rw {} +
become: yes
- name: Verify Dockerfile exists in build directory
stat:
@ -99,26 +236,96 @@
msg: "Dockerfile not found in {{ airflow_master_dir }}. Cannot build image."
when: not dockerfile_stat.stat.exists
- name: "Log: Building Airflow Docker image"
debug:
msg: "Building the main Airflow Docker image ({{ airflow_image_name }}) locally on the master node. This may take a few minutes."
- name: Build Airflow master image
community.docker.docker_image:
name: "{{ airflow_image_name }}"
build:
path: "{{ airflow_master_dir }}"
dockerfile: "Dockerfile"
dockerfile: "Dockerfile" # Explicitly specify the Dockerfile name
source: build
force_source: true
when: not fast_deploy | default(false)
- name: Run Airflow init script
shell:
cmd: "chmod +x init-airflow.sh && ./init-airflow.sh"
chdir: "{{ airflow_master_dir }}"
- name: "Log: Preparing assets for Caddy image"
debug:
msg: "Extracting static assets from the Airflow image to build the Caddy reverse proxy."
when: not fast_deploy | default(false)
- name: Prepare Caddy asset extraction directory
file:
path: "{{ airflow_master_dir }}/caddy_build_assets"
state: "{{ item }}"
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
mode: '0755'
loop:
- absent
- directory
become: yes
become_user: "{{ ssh_user }}"
when: not fast_deploy | default(false)
- name: Ensure subdirectories exist with correct permissions
file:
path: "{{ airflow_master_dir }}/caddy_build_assets/{{ item }}"
state: directory
owner: "{{ ssh_user }}"
group: "{{ deploy_group }}"
mode: '0755'
loop:
- "appbuilder"
- "dist"
become: yes
when: not fast_deploy | default(false)
- name: Extract static assets from Airflow image for Caddy build
shell: |
set -e
CONTAINER_ID=$(docker create {{ airflow_image_name }})
# Dynamically find paths inside the container
APPBUILDER_PATH=$(docker run --rm --entrypoint "" {{ airflow_image_name }} python -c 'import os, flask_appbuilder; print(os.path.join(os.path.dirname(flask_appbuilder.__file__), "static", "appbuilder"))')
AIRFLOW_DIST_PATH=$(docker run --rm --entrypoint "" {{ airflow_image_name }} python -c 'import os, airflow; print(os.path.join(os.path.dirname(airflow.__file__), "www/static/dist"))')
# Copy assets from container to host
docker cp "${CONTAINER_ID}:${APPBUILDER_PATH}/." "./caddy_build_assets/appbuilder"
docker cp "${CONTAINER_ID}:${AIRFLOW_DIST_PATH}/." "./caddy_build_assets/dist"
docker rm -f $CONTAINER_ID
# Pre-compress assets
find ./caddy_build_assets/appbuilder -type f -print0 | xargs -0 gzip -k -9
find ./caddy_build_assets/dist -type f -print0 | xargs -0 gzip -k -9
args:
chdir: "{{ airflow_master_dir }}"
executable: /bin/bash
become: yes
register: asset_extraction
changed_when: asset_extraction.rc == 0
when: not fast_deploy | default(false)
- name: "Log: Building Caddy reverse proxy image"
debug:
msg: "Building the Caddy image (pangramia/ytdlp-ops-caddy:latest) to serve static assets."
- name: Build Caddy image
community.docker.docker_image:
name: "pangramia/ytdlp-ops-caddy:latest"
build:
path: "{{ airflow_master_dir }}"
dockerfile: "Dockerfile.caddy"
source: build
force_source: true
when: not fast_deploy | default(false)
- name: "Log: Starting Airflow services"
debug:
msg: "Starting Airflow core services (webserver, scheduler, etc.) on the master node using docker-compose."
- name: Start Airflow master service
community.docker.docker_compose_v2:
project_src: "{{ airflow_master_dir }}"
files:
- "docker-compose-master.yaml"
- "configs/docker-compose-master.yaml"
state: present
remove_orphans: true
pull: "{{ 'never' if fast_deploy | default(false) else 'missing' }}"

View File

@ -1,103 +0,0 @@
---
- name: Check if Airflow worker deployment directory exists
stat:
path: "{{ airflow_worker_dir }}"
register: worker_dir_stat
- name: Ensure Airflow worker deployment directory exists
file:
path: "{{ airflow_worker_dir }}"
state: directory
owner: "{{ ssh_user }}"
group: ytdl
mode: '0755'
become: yes
when: not worker_dir_stat.stat.exists
- name: Sync Airflow worker files
synchronize:
src: "{{ playbook_dir }}/../{{ item }}"
dest: "{{ airflow_worker_dir }}/"
archive: yes
recursive: yes
delete: yes
rsync_opts: "{{ rsync_default_opts }}"
loop:
- "airflow/Dockerfile"
- "airflow/docker-compose-dl.yaml"
- "airflow/dags/"
- "airflow/config/"
- "setup.py"
- "yt_ops_services/"
- "thrift_model/"
- "VERSION"
- "airflow/init-airflow.sh"
- "get_info_json_client.py"
- "proxy_manager_client.py"
- "token_generator/"
- "utils/"
- name: Check if inputfiles directory exists
stat:
path: "{{ playbook_dir }}/../airflow/inputfiles"
register: inputfiles_stat
- name: Sync inputfiles directory if it exists
synchronize:
src: "{{ playbook_dir }}/../airflow/inputfiles/"
dest: "{{ airflow_worker_dir }}/inputfiles/"
archive: yes
recursive: yes
delete: yes
rsync_opts: "{{ rsync_default_opts }}"
when: inputfiles_stat.stat.exists
- name: Sync pangramia thrift files
synchronize:
src: "{{ playbook_dir }}/../thrift_model/gen_py/pangramia/"
dest: "{{ airflow_worker_dir }}/pangramia/"
archive: yes
recursive: yes
delete: yes
rsync_opts: "{{ rsync_default_opts }}"
- name: Create .env file for Airflow worker service
template:
src: "../../templates/.env.worker.j2"
dest: "{{ airflow_worker_dir }}/.env"
mode: "{{ file_permissions }}"
owner: "{{ ssh_user }}"
group: ytdl
vars:
service_role: "worker"
- name: Create symlink for docker-compose.yaml
file:
src: "{{ airflow_worker_dir }}/docker-compose-dl.yaml"
dest: "{{ airflow_worker_dir }}/docker-compose.yaml"
state: link
owner: "{{ ssh_user }}"
group: ytdl
- name: Build Airflow worker image
community.docker.docker_image:
name: "{{ airflow_image_name }}"
build:
path: "{{ airflow_worker_dir }}"
source: build
force_source: true
- name: Run Airflow init script
shell:
cmd: "chmod +x init-airflow.sh && ./init-airflow.sh"
chdir: "{{ airflow_worker_dir }}"
become: yes
become_user: "{{ ssh_user }}"
- name: Start Airflow worker service
community.docker.docker_compose_v2:
project_src: "{{ airflow_worker_dir }}"
files:
- "docker-compose-dl.yaml"
state: present
remove_orphans: true

View File

@ -1,17 +0,0 @@
#!/bin/sh
set -e
# Wait for MinIO to be ready
until (mc alias set local http://minio:9000 admin 0153093693-0009) do
echo 'Waiting for MinIO...'
sleep 1
done
# Create bucket if it doesn't exist
if ! mc ls local/airflow-logs >/dev/null 2>&1; then
mc mb local/airflow-logs
mc anonymous set download local/airflow-logs
echo 'MinIO bucket initialized'
else
echo 'MinIO bucket already exists'
fi

View File

@ -1,564 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def ping(self):
pass
def reportError(self, message, details):
"""
Parameters:
- message
- details
"""
pass
def shutdown(self):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def ping(self):
self.send_ping()
return self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serviceExp is not None:
raise result.serviceExp
if result.userExp is not None:
raise result.userExp
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result")
def reportError(self, message, details):
"""
Parameters:
- message
- details
"""
self.send_reportError(message, details)
return self.recv_reportError()
def send_reportError(self, message, details):
self._oprot.writeMessageBegin('reportError', TMessageType.CALL, self._seqid)
args = reportError_args()
args.message = message
args.details = details
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_reportError(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = reportError_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serviceExp is not None:
raise result.serviceExp
if result.userExp is not None:
raise result.userExp
raise TApplicationException(TApplicationException.MISSING_RESULT, "reportError failed: unknown result")
def shutdown(self):
self.send_shutdown()
def send_shutdown(self):
self._oprot.writeMessageBegin('shutdown', TMessageType.ONEWAY, self._seqid)
args = shutdown_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["ping"] = Processor.process_ping
self._processMap["reportError"] = Processor.process_reportError
self._processMap["shutdown"] = Processor.process_shutdown
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
result.success = self._handler.ping()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except pangramia.yt.exceptions.ttypes.PBServiceException as serviceExp:
msg_type = TMessageType.REPLY
result.serviceExp = serviceExp
except pangramia.yt.exceptions.ttypes.PBUserException as userExp:
msg_type = TMessageType.REPLY
result.userExp = userExp
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ping", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reportError(self, seqid, iprot, oprot):
args = reportError_args()
args.read(iprot)
iprot.readMessageEnd()
result = reportError_result()
try:
result.success = self._handler.reportError(args.message, args.details)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except pangramia.yt.exceptions.ttypes.PBServiceException as serviceExp:
msg_type = TMessageType.REPLY
result.serviceExp = serviceExp
except pangramia.yt.exceptions.ttypes.PBUserException as userExp:
msg_type = TMessageType.REPLY
result.userExp = userExp
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("reportError", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_shutdown(self, seqid, iprot, oprot):
args = shutdown_args()
args.read(iprot)
iprot.readMessageEnd()
try:
self._handler.shutdown()
except TTransport.TTransportException:
raise
except Exception:
logging.exception('Exception in oneway handler')
# HELPER FUNCTIONS AND STRUCTURES
class ping_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ping_args)
ping_args.thrift_spec = (
)
class ping_result(object):
"""
Attributes:
- success
- serviceExp
- userExp
"""
def __init__(self, success=None, serviceExp=None, userExp=None,):
self.success = success
self.serviceExp = serviceExp
self.userExp = userExp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serviceExp = pangramia.yt.exceptions.ttypes.PBServiceException.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.userExp = pangramia.yt.exceptions.ttypes.PBUserException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ping_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.serviceExp is not None:
oprot.writeFieldBegin('serviceExp', TType.STRUCT, 1)
self.serviceExp.write(oprot)
oprot.writeFieldEnd()
if self.userExp is not None:
oprot.writeFieldBegin('userExp', TType.STRUCT, 2)
self.userExp.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ping_result)
ping_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'serviceExp', [pangramia.yt.exceptions.ttypes.PBServiceException, None], None, ), # 1
(2, TType.STRUCT, 'userExp', [pangramia.yt.exceptions.ttypes.PBUserException, None], None, ), # 2
)
class reportError_args(object):
"""
Attributes:
- message
- details
"""
def __init__(self, message=None, details=None,):
self.message = message
self.details = details
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.details = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.details[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('reportError_args')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.details is not None:
oprot.writeFieldBegin('details', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.details))
for kiter7, viter8 in self.details.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(reportError_args)
reportError_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
(2, TType.MAP, 'details', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2
)
class reportError_result(object):
"""
Attributes:
- success
- serviceExp
- userExp
"""
def __init__(self, success=None, serviceExp=None, userExp=None,):
self.success = success
self.serviceExp = serviceExp
self.userExp = userExp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serviceExp = pangramia.yt.exceptions.ttypes.PBServiceException.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.userExp = pangramia.yt.exceptions.ttypes.PBUserException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('reportError_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.serviceExp is not None:
oprot.writeFieldBegin('serviceExp', TType.STRUCT, 1)
self.serviceExp.write(oprot)
oprot.writeFieldEnd()
if self.userExp is not None:
oprot.writeFieldBegin('userExp', TType.STRUCT, 2)
self.userExp.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(reportError_result)
reportError_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'serviceExp', [pangramia.yt.exceptions.ttypes.PBServiceException, None], None, ), # 1
(2, TType.STRUCT, 'userExp', [pangramia.yt.exceptions.ttypes.PBUserException, None], None, ), # 2
)
class shutdown_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('shutdown_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(shutdown_args)
shutdown_args.thrift_spec = (
)
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'BaseService']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,20 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import pangramia.yt.common.ttypes
import pangramia.yt.exceptions.ttypes
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'YTAccountsOpService']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,21 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import pangramia.yt.common.ttypes
import pangramia.yt.exceptions.ttypes
import pangramia.base_service.ttypes
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,905 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class JobState(object):
SUCCESS = 0
FAIL = 1
BOT_FORBIDDEN_ON_URL_ACCESS = 2
BOT_FORBIDDEN_ON_FILE_DOWNLOAD = 3
BOT_CAPTCHA = 4
BOT_AUTH_RELOGIN_REQUIRED = 5
BOT_AUTH_SMS_REQUIRED = 6
BOT_AUTH_DEVICE_QR_REQUIRED = 7
BOT_ACCOUNT_BANNED = 8
BOT_IP_BANNED = 9
_VALUES_TO_NAMES = {
0: "SUCCESS",
1: "FAIL",
2: "BOT_FORBIDDEN_ON_URL_ACCESS",
3: "BOT_FORBIDDEN_ON_FILE_DOWNLOAD",
4: "BOT_CAPTCHA",
5: "BOT_AUTH_RELOGIN_REQUIRED",
6: "BOT_AUTH_SMS_REQUIRED",
7: "BOT_AUTH_DEVICE_QR_REQUIRED",
8: "BOT_ACCOUNT_BANNED",
9: "BOT_IP_BANNED",
}
_NAMES_TO_VALUES = {
"SUCCESS": 0,
"FAIL": 1,
"BOT_FORBIDDEN_ON_URL_ACCESS": 2,
"BOT_FORBIDDEN_ON_FILE_DOWNLOAD": 3,
"BOT_CAPTCHA": 4,
"BOT_AUTH_RELOGIN_REQUIRED": 5,
"BOT_AUTH_SMS_REQUIRED": 6,
"BOT_AUTH_DEVICE_QR_REQUIRED": 7,
"BOT_ACCOUNT_BANNED": 8,
"BOT_IP_BANNED": 9,
}
class TokenUpdateMode(object):
AUTOREFRESH_AND_REMAIN_ANONYMOUS = 0
AUTOREFRESH_AND_ALLOW_AUTH = 1
AUTOREFRESH_AND_ONLY_AUTH = 2
CLEANUP_THEN_AUTOREFRESH_AND_ONLY_AUTH = 3
CLEANUP_THEN_AUTOREFRESH_AND_REMAIN_ANONYMOUS = 4
CLEANUP_THEN_AUTOREFRESH_AND_ALLOW_AUTH = 5
AUTO = 6
_VALUES_TO_NAMES = {
0: "AUTOREFRESH_AND_REMAIN_ANONYMOUS",
1: "AUTOREFRESH_AND_ALLOW_AUTH",
2: "AUTOREFRESH_AND_ONLY_AUTH",
3: "CLEANUP_THEN_AUTOREFRESH_AND_ONLY_AUTH",
4: "CLEANUP_THEN_AUTOREFRESH_AND_REMAIN_ANONYMOUS",
5: "CLEANUP_THEN_AUTOREFRESH_AND_ALLOW_AUTH",
6: "AUTO",
}
_NAMES_TO_VALUES = {
"AUTOREFRESH_AND_REMAIN_ANONYMOUS": 0,
"AUTOREFRESH_AND_ALLOW_AUTH": 1,
"AUTOREFRESH_AND_ONLY_AUTH": 2,
"CLEANUP_THEN_AUTOREFRESH_AND_ONLY_AUTH": 3,
"CLEANUP_THEN_AUTOREFRESH_AND_REMAIN_ANONYMOUS": 4,
"CLEANUP_THEN_AUTOREFRESH_AND_ALLOW_AUTH": 5,
"AUTO": 6,
}
class AccountPairState(object):
ACTIVE = 0
PAUSED = 1
REMOVED = 2
IN_PROGRESS = 3
ALL = 4
_VALUES_TO_NAMES = {
0: "ACTIVE",
1: "PAUSED",
2: "REMOVED",
3: "IN_PROGRESS",
4: "ALL",
}
_NAMES_TO_VALUES = {
"ACTIVE": 0,
"PAUSED": 1,
"REMOVED": 2,
"IN_PROGRESS": 3,
"ALL": 4,
}
class JobTokenData(object):
"""
Attributes:
- infoJson
- ytdlpCommand
- socks
- jobId
- url
- cookiesBlob
"""
def __init__(self, infoJson=None, ytdlpCommand=None, socks=None, jobId=None, url=None, cookiesBlob=None,):
self.infoJson = infoJson
self.ytdlpCommand = ytdlpCommand
self.socks = socks
self.jobId = jobId
self.url = url
self.cookiesBlob = cookiesBlob
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.infoJson = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ytdlpCommand = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.socks = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.jobId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.cookiesBlob = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('JobTokenData')
if self.infoJson is not None:
oprot.writeFieldBegin('infoJson', TType.STRING, 1)
oprot.writeString(self.infoJson.encode('utf-8') if sys.version_info[0] == 2 else self.infoJson)
oprot.writeFieldEnd()
if self.ytdlpCommand is not None:
oprot.writeFieldBegin('ytdlpCommand', TType.STRING, 2)
oprot.writeString(self.ytdlpCommand.encode('utf-8') if sys.version_info[0] == 2 else self.ytdlpCommand)
oprot.writeFieldEnd()
if self.socks is not None:
oprot.writeFieldBegin('socks', TType.STRING, 3)
oprot.writeString(self.socks.encode('utf-8') if sys.version_info[0] == 2 else self.socks)
oprot.writeFieldEnd()
if self.jobId is not None:
oprot.writeFieldBegin('jobId', TType.STRING, 4)
oprot.writeString(self.jobId.encode('utf-8') if sys.version_info[0] == 2 else self.jobId)
oprot.writeFieldEnd()
if self.url is not None:
oprot.writeFieldBegin('url', TType.STRING, 5)
oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url)
oprot.writeFieldEnd()
if self.cookiesBlob is not None:
oprot.writeFieldBegin('cookiesBlob', TType.STRING, 6)
oprot.writeString(self.cookiesBlob.encode('utf-8') if sys.version_info[0] == 2 else self.cookiesBlob)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AccountData(object):
"""
Attributes:
- username
- password
- countryCode
"""
def __init__(self, username=None, password=None, countryCode=None,):
self.username = username
self.password = password
self.countryCode = countryCode
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.countryCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AccountData')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password.encode('utf-8') if sys.version_info[0] == 2 else self.password)
oprot.writeFieldEnd()
if self.countryCode is not None:
oprot.writeFieldBegin('countryCode', TType.STRING, 3)
oprot.writeString(self.countryCode.encode('utf-8') if sys.version_info[0] == 2 else self.countryCode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.username is None:
raise TProtocolException(message='Required field username is unset!')
if self.password is None:
raise TProtocolException(message='Required field password is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProxyData(object):
"""
Attributes:
- proxyUrl
- countryCode
"""
def __init__(self, proxyUrl=None, countryCode=None,):
self.proxyUrl = proxyUrl
self.countryCode = countryCode
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.proxyUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.countryCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ProxyData')
if self.proxyUrl is not None:
oprot.writeFieldBegin('proxyUrl', TType.STRING, 1)
oprot.writeString(self.proxyUrl.encode('utf-8') if sys.version_info[0] == 2 else self.proxyUrl)
oprot.writeFieldEnd()
if self.countryCode is not None:
oprot.writeFieldBegin('countryCode', TType.STRING, 2)
oprot.writeString(self.countryCode.encode('utf-8') if sys.version_info[0] == 2 else self.countryCode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.proxyUrl is None:
raise TProtocolException(message='Required field proxyUrl is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AccountPairWithState(object):
"""
Attributes:
- accountId
- proxyId
- accountPairState
- machineId
"""
def __init__(self, accountId=None, proxyId=None, accountPairState=None, machineId=None,):
self.accountId = accountId
self.proxyId = proxyId
self.accountPairState = accountPairState
self.machineId = machineId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.accountId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.proxyId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.accountPairState = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.machineId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AccountPairWithState')
if self.accountId is not None:
oprot.writeFieldBegin('accountId', TType.STRING, 1)
oprot.writeString(self.accountId.encode('utf-8') if sys.version_info[0] == 2 else self.accountId)
oprot.writeFieldEnd()
if self.proxyId is not None:
oprot.writeFieldBegin('proxyId', TType.STRING, 2)
oprot.writeString(self.proxyId.encode('utf-8') if sys.version_info[0] == 2 else self.proxyId)
oprot.writeFieldEnd()
if self.accountPairState is not None:
oprot.writeFieldBegin('accountPairState', TType.I32, 3)
oprot.writeI32(self.accountPairState)
oprot.writeFieldEnd()
if self.machineId is not None:
oprot.writeFieldBegin('machineId', TType.STRING, 4)
oprot.writeString(self.machineId.encode('utf-8') if sys.version_info[0] == 2 else self.machineId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.accountId is None:
raise TProtocolException(message='Required field accountId is unset!')
if self.proxyId is None:
raise TProtocolException(message='Required field proxyId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobData(object):
"""
Attributes:
- jobId
- url
- cookiesBlob
- potoken
- visitorId
- ytdlpCommand
- createdTime
- telemetry
- state
- errorMessage
- socks5Id
"""
def __init__(self, jobId=None, url=None, cookiesBlob=None, potoken=None, visitorId=None, ytdlpCommand=None, createdTime=None, telemetry=None, state=None, errorMessage=None, socks5Id=None,):
self.jobId = jobId
self.url = url
self.cookiesBlob = cookiesBlob
self.potoken = potoken
self.visitorId = visitorId
self.ytdlpCommand = ytdlpCommand
self.createdTime = createdTime
self.telemetry = telemetry
self.state = state
self.errorMessage = errorMessage
self.socks5Id = socks5Id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.jobId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.cookiesBlob = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.potoken = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.visitorId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.ytdlpCommand = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.createdTime = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.MAP:
self.telemetry = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.telemetry[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.state = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.errorMessage = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.socks5Id = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('JobData')
if self.jobId is not None:
oprot.writeFieldBegin('jobId', TType.STRING, 1)
oprot.writeString(self.jobId.encode('utf-8') if sys.version_info[0] == 2 else self.jobId)
oprot.writeFieldEnd()
if self.url is not None:
oprot.writeFieldBegin('url', TType.STRING, 2)
oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url)
oprot.writeFieldEnd()
if self.cookiesBlob is not None:
oprot.writeFieldBegin('cookiesBlob', TType.STRING, 3)
oprot.writeString(self.cookiesBlob.encode('utf-8') if sys.version_info[0] == 2 else self.cookiesBlob)
oprot.writeFieldEnd()
if self.potoken is not None:
oprot.writeFieldBegin('potoken', TType.STRING, 4)
oprot.writeString(self.potoken.encode('utf-8') if sys.version_info[0] == 2 else self.potoken)
oprot.writeFieldEnd()
if self.visitorId is not None:
oprot.writeFieldBegin('visitorId', TType.STRING, 5)
oprot.writeString(self.visitorId.encode('utf-8') if sys.version_info[0] == 2 else self.visitorId)
oprot.writeFieldEnd()
if self.ytdlpCommand is not None:
oprot.writeFieldBegin('ytdlpCommand', TType.STRING, 6)
oprot.writeString(self.ytdlpCommand.encode('utf-8') if sys.version_info[0] == 2 else self.ytdlpCommand)
oprot.writeFieldEnd()
if self.createdTime is not None:
oprot.writeFieldBegin('createdTime', TType.STRING, 7)
oprot.writeString(self.createdTime.encode('utf-8') if sys.version_info[0] == 2 else self.createdTime)
oprot.writeFieldEnd()
if self.telemetry is not None:
oprot.writeFieldBegin('telemetry', TType.MAP, 8)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.telemetry))
for kiter7, viter8 in self.telemetry.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 9)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.errorMessage is not None:
oprot.writeFieldBegin('errorMessage', TType.STRING, 10)
oprot.writeString(self.errorMessage.encode('utf-8') if sys.version_info[0] == 2 else self.errorMessage)
oprot.writeFieldEnd()
if self.socks5Id is not None:
oprot.writeFieldBegin('socks5Id', TType.STRING, 11)
oprot.writeString(self.socks5Id.encode('utf-8') if sys.version_info[0] == 2 else self.socks5Id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.jobId is None:
raise TProtocolException(message='Required field jobId is unset!')
if self.url is None:
raise TProtocolException(message='Required field url is unset!')
if self.cookiesBlob is None:
raise TProtocolException(message='Required field cookiesBlob is unset!')
if self.potoken is None:
raise TProtocolException(message='Required field potoken is unset!')
if self.visitorId is None:
raise TProtocolException(message='Required field visitorId is unset!')
if self.ytdlpCommand is None:
raise TProtocolException(message='Required field ytdlpCommand is unset!')
if self.createdTime is None:
raise TProtocolException(message='Required field createdTime is unset!')
if self.telemetry is None:
raise TProtocolException(message='Required field telemetry is unset!')
if self.state is None:
raise TProtocolException(message='Required field state is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RichCollectionPagination(object):
"""
Attributes:
- hasNext
- totalCount
- page
- pageSize
"""
def __init__(self, hasNext=None, totalCount=None, page=None, pageSize=None,):
self.hasNext = hasNext
self.totalCount = totalCount
self.page = page
self.pageSize = pageSize
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.hasNext = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.totalCount = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.page = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.pageSize = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('RichCollectionPagination')
if self.hasNext is not None:
oprot.writeFieldBegin('hasNext', TType.BOOL, 1)
oprot.writeBool(self.hasNext)
oprot.writeFieldEnd()
if self.totalCount is not None:
oprot.writeFieldBegin('totalCount', TType.I32, 2)
oprot.writeI32(self.totalCount)
oprot.writeFieldEnd()
if self.page is not None:
oprot.writeFieldBegin('page', TType.I32, 3)
oprot.writeI32(self.page)
oprot.writeFieldEnd()
if self.pageSize is not None:
oprot.writeFieldBegin('pageSize', TType.I32, 4)
oprot.writeI32(self.pageSize)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.hasNext is None:
raise TProtocolException(message='Required field hasNext is unset!')
if self.totalCount is None:
raise TProtocolException(message='Required field totalCount is unset!')
if self.page is None:
raise TProtocolException(message='Required field page is unset!')
if self.pageSize is None:
raise TProtocolException(message='Required field pageSize is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RichCollectionJobData(object):
"""
Attributes:
- items
- pagination
"""
def __init__(self, items=None, pagination=None,):
self.items = items
self.pagination = pagination
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.items = []
(_etype12, _size9) = iprot.readListBegin()
for _i13 in range(_size9):
_elem14 = JobData()
_elem14.read(iprot)
self.items.append(_elem14)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.pagination = RichCollectionPagination()
self.pagination.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('RichCollectionJobData')
if self.items is not None:
oprot.writeFieldBegin('items', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.items))
for iter15 in self.items:
iter15.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.pagination is not None:
oprot.writeFieldBegin('pagination', TType.STRUCT, 2)
self.pagination.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.items is None:
raise TProtocolException(message='Required field items is unset!')
if self.pagination is None:
raise TProtocolException(message='Required field pagination is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(JobTokenData)
JobTokenData.thrift_spec = (
None, # 0
(1, TType.STRING, 'infoJson', 'UTF8', None, ), # 1
(2, TType.STRING, 'ytdlpCommand', 'UTF8', None, ), # 2
(3, TType.STRING, 'socks', 'UTF8', None, ), # 3
(4, TType.STRING, 'jobId', 'UTF8', None, ), # 4
(5, TType.STRING, 'url', 'UTF8', None, ), # 5
(6, TType.STRING, 'cookiesBlob', 'UTF8', None, ), # 6
)
all_structs.append(AccountData)
AccountData.thrift_spec = (
None, # 0
(1, TType.STRING, 'username', 'UTF8', None, ), # 1
(2, TType.STRING, 'password', 'UTF8', None, ), # 2
(3, TType.STRING, 'countryCode', 'UTF8', None, ), # 3
)
all_structs.append(ProxyData)
ProxyData.thrift_spec = (
None, # 0
(1, TType.STRING, 'proxyUrl', 'UTF8', None, ), # 1
(2, TType.STRING, 'countryCode', 'UTF8', None, ), # 2
)
all_structs.append(AccountPairWithState)
AccountPairWithState.thrift_spec = (
None, # 0
(1, TType.STRING, 'accountId', 'UTF8', None, ), # 1
(2, TType.STRING, 'proxyId', 'UTF8', None, ), # 2
(3, TType.I32, 'accountPairState', None, None, ), # 3
(4, TType.STRING, 'machineId', 'UTF8', None, ), # 4
)
all_structs.append(JobData)
JobData.thrift_spec = (
None, # 0
(1, TType.STRING, 'jobId', 'UTF8', None, ), # 1
(2, TType.STRING, 'url', 'UTF8', None, ), # 2
(3, TType.STRING, 'cookiesBlob', 'UTF8', None, ), # 3
(4, TType.STRING, 'potoken', 'UTF8', None, ), # 4
(5, TType.STRING, 'visitorId', 'UTF8', None, ), # 5
(6, TType.STRING, 'ytdlpCommand', 'UTF8', None, ), # 6
(7, TType.STRING, 'createdTime', 'UTF8', None, ), # 7
(8, TType.MAP, 'telemetry', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 8
(9, TType.I32, 'state', None, None, ), # 9
(10, TType.STRING, 'errorMessage', 'UTF8', None, ), # 10
(11, TType.STRING, 'socks5Id', 'UTF8', None, ), # 11
)
all_structs.append(RichCollectionPagination)
RichCollectionPagination.thrift_spec = (
None, # 0
(1, TType.BOOL, 'hasNext', None, None, ), # 1
(2, TType.I32, 'totalCount', None, None, ), # 2
(3, TType.I32, 'page', None, None, ), # 3
(4, TType.I32, 'pageSize', None, None, ), # 4
)
all_structs.append(RichCollectionJobData)
RichCollectionJobData.thrift_spec = (
None, # 0
(1, TType.LIST, 'items', (TType.STRUCT, [JobData, None], False), None, ), # 1
(2, TType.STRUCT, 'pagination', [RichCollectionPagination, None], None, ), # 2
)
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,254 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class PBServiceException(TException):
"""
Attributes:
- message
- errorCode
- context
"""
def __init__(self, message=None, errorCode=None, context=None,):
super(PBServiceException, self).__setattr__('message', message)
super(PBServiceException, self).__setattr__('errorCode', errorCode)
super(PBServiceException, self).__setattr__('context', context)
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __delattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __hash__(self):
return hash(self.__class__) ^ hash((self.message, self.errorCode, self.context, ))
@classmethod
def read(cls, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and cls.thrift_spec is not None:
return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec])
iprot.readStructBegin()
message = None
errorCode = None
context = None
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
errorCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
context = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
context[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
return cls(
message=message,
errorCode=errorCode,
context=context,
)
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PBServiceException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.errorCode is not None:
oprot.writeFieldBegin('errorCode', TType.STRING, 2)
oprot.writeString(self.errorCode.encode('utf-8') if sys.version_info[0] == 2 else self.errorCode)
oprot.writeFieldEnd()
if self.context is not None:
oprot.writeFieldBegin('context', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.context))
for kiter7, viter8 in self.context.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PBUserException(TException):
"""
Attributes:
- message
- errorCode
- context
"""
def __init__(self, message=None, errorCode=None, context=None,):
super(PBUserException, self).__setattr__('message', message)
super(PBUserException, self).__setattr__('errorCode', errorCode)
super(PBUserException, self).__setattr__('context', context)
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __delattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __hash__(self):
return hash(self.__class__) ^ hash((self.message, self.errorCode, self.context, ))
@classmethod
def read(cls, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and cls.thrift_spec is not None:
return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec])
iprot.readStructBegin()
message = None
errorCode = None
context = None
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
errorCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
context = {}
(_ktype10, _vtype11, _size9) = iprot.readMapBegin()
for _i13 in range(_size9):
_key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
context[_key14] = _val15
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
return cls(
message=message,
errorCode=errorCode,
context=context,
)
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PBUserException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.errorCode is not None:
oprot.writeFieldBegin('errorCode', TType.STRING, 2)
oprot.writeString(self.errorCode.encode('utf-8') if sys.version_info[0] == 2 else self.errorCode)
oprot.writeFieldEnd()
if self.context is not None:
oprot.writeFieldBegin('context', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.context))
for kiter16, viter17 in self.context.items():
oprot.writeString(kiter16.encode('utf-8') if sys.version_info[0] == 2 else kiter16)
oprot.writeString(viter17.encode('utf-8') if sys.version_info[0] == 2 else viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(PBServiceException)
PBServiceException.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
(2, TType.STRING, 'errorCode', 'UTF8', None, ), # 2
(3, TType.MAP, 'context', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
all_structs.append(PBUserException)
PBUserException.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
(2, TType.STRING, 'errorCode', 'UTF8', None, ), # 2
(3, TType.MAP, 'context', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'YTTokenOpService']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,21 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import pangramia.yt.common.ttypes
import pangramia.yt.exceptions.ttypes
import pangramia.base_service.ttypes
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

View File

@ -1,564 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def ping(self):
pass
def reportError(self, message, details):
"""
Parameters:
- message
- details
"""
pass
def shutdown(self):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def ping(self):
self.send_ping()
return self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serviceExp is not None:
raise result.serviceExp
if result.userExp is not None:
raise result.userExp
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result")
def reportError(self, message, details):
"""
Parameters:
- message
- details
"""
self.send_reportError(message, details)
return self.recv_reportError()
def send_reportError(self, message, details):
self._oprot.writeMessageBegin('reportError', TMessageType.CALL, self._seqid)
args = reportError_args()
args.message = message
args.details = details
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_reportError(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = reportError_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.serviceExp is not None:
raise result.serviceExp
if result.userExp is not None:
raise result.userExp
raise TApplicationException(TApplicationException.MISSING_RESULT, "reportError failed: unknown result")
def shutdown(self):
self.send_shutdown()
def send_shutdown(self):
self._oprot.writeMessageBegin('shutdown', TMessageType.ONEWAY, self._seqid)
args = shutdown_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["ping"] = Processor.process_ping
self._processMap["reportError"] = Processor.process_reportError
self._processMap["shutdown"] = Processor.process_shutdown
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
result.success = self._handler.ping()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except pangramia.yt.exceptions.ttypes.PBServiceException as serviceExp:
msg_type = TMessageType.REPLY
result.serviceExp = serviceExp
except pangramia.yt.exceptions.ttypes.PBUserException as userExp:
msg_type = TMessageType.REPLY
result.userExp = userExp
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ping", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reportError(self, seqid, iprot, oprot):
args = reportError_args()
args.read(iprot)
iprot.readMessageEnd()
result = reportError_result()
try:
result.success = self._handler.reportError(args.message, args.details)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except pangramia.yt.exceptions.ttypes.PBServiceException as serviceExp:
msg_type = TMessageType.REPLY
result.serviceExp = serviceExp
except pangramia.yt.exceptions.ttypes.PBUserException as userExp:
msg_type = TMessageType.REPLY
result.userExp = userExp
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("reportError", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_shutdown(self, seqid, iprot, oprot):
args = shutdown_args()
args.read(iprot)
iprot.readMessageEnd()
try:
self._handler.shutdown()
except TTransport.TTransportException:
raise
except Exception:
logging.exception('Exception in oneway handler')
# HELPER FUNCTIONS AND STRUCTURES
class ping_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ping_args)
ping_args.thrift_spec = (
)
class ping_result(object):
"""
Attributes:
- success
- serviceExp
- userExp
"""
def __init__(self, success=None, serviceExp=None, userExp=None,):
self.success = success
self.serviceExp = serviceExp
self.userExp = userExp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serviceExp = pangramia.yt.exceptions.ttypes.PBServiceException.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.userExp = pangramia.yt.exceptions.ttypes.PBUserException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ping_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.serviceExp is not None:
oprot.writeFieldBegin('serviceExp', TType.STRUCT, 1)
self.serviceExp.write(oprot)
oprot.writeFieldEnd()
if self.userExp is not None:
oprot.writeFieldBegin('userExp', TType.STRUCT, 2)
self.userExp.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ping_result)
ping_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'serviceExp', [pangramia.yt.exceptions.ttypes.PBServiceException, None], None, ), # 1
(2, TType.STRUCT, 'userExp', [pangramia.yt.exceptions.ttypes.PBUserException, None], None, ), # 2
)
class reportError_args(object):
"""
Attributes:
- message
- details
"""
def __init__(self, message=None, details=None,):
self.message = message
self.details = details
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.details = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.details[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('reportError_args')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.details is not None:
oprot.writeFieldBegin('details', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.details))
for kiter7, viter8 in self.details.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(reportError_args)
reportError_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
(2, TType.MAP, 'details', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2
)
class reportError_result(object):
"""
Attributes:
- success
- serviceExp
- userExp
"""
def __init__(self, success=None, serviceExp=None, userExp=None,):
self.success = success
self.serviceExp = serviceExp
self.userExp = userExp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.serviceExp = pangramia.yt.exceptions.ttypes.PBServiceException.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.userExp = pangramia.yt.exceptions.ttypes.PBUserException.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('reportError_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.serviceExp is not None:
oprot.writeFieldBegin('serviceExp', TType.STRUCT, 1)
self.serviceExp.write(oprot)
oprot.writeFieldEnd()
if self.userExp is not None:
oprot.writeFieldBegin('userExp', TType.STRUCT, 2)
self.userExp.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(reportError_result)
reportError_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'serviceExp', [pangramia.yt.exceptions.ttypes.PBServiceException, None], None, ), # 1
(2, TType.STRUCT, 'userExp', [pangramia.yt.exceptions.ttypes.PBUserException, None], None, ), # 2
)
class shutdown_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('shutdown_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(shutdown_args)
shutdown_args.thrift_spec = (
)
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'BaseService']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,20 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import pangramia.yt.common.ttypes
import pangramia.yt.exceptions.ttypes
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'YTAccountsOpService']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,21 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import pangramia.yt.common.ttypes
import pangramia.yt.exceptions.ttypes
import pangramia.base_service.ttypes
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,905 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class JobState(object):
SUCCESS = 0
FAIL = 1
BOT_FORBIDDEN_ON_URL_ACCESS = 2
BOT_FORBIDDEN_ON_FILE_DOWNLOAD = 3
BOT_CAPTCHA = 4
BOT_AUTH_RELOGIN_REQUIRED = 5
BOT_AUTH_SMS_REQUIRED = 6
BOT_AUTH_DEVICE_QR_REQUIRED = 7
BOT_ACCOUNT_BANNED = 8
BOT_IP_BANNED = 9
_VALUES_TO_NAMES = {
0: "SUCCESS",
1: "FAIL",
2: "BOT_FORBIDDEN_ON_URL_ACCESS",
3: "BOT_FORBIDDEN_ON_FILE_DOWNLOAD",
4: "BOT_CAPTCHA",
5: "BOT_AUTH_RELOGIN_REQUIRED",
6: "BOT_AUTH_SMS_REQUIRED",
7: "BOT_AUTH_DEVICE_QR_REQUIRED",
8: "BOT_ACCOUNT_BANNED",
9: "BOT_IP_BANNED",
}
_NAMES_TO_VALUES = {
"SUCCESS": 0,
"FAIL": 1,
"BOT_FORBIDDEN_ON_URL_ACCESS": 2,
"BOT_FORBIDDEN_ON_FILE_DOWNLOAD": 3,
"BOT_CAPTCHA": 4,
"BOT_AUTH_RELOGIN_REQUIRED": 5,
"BOT_AUTH_SMS_REQUIRED": 6,
"BOT_AUTH_DEVICE_QR_REQUIRED": 7,
"BOT_ACCOUNT_BANNED": 8,
"BOT_IP_BANNED": 9,
}
class TokenUpdateMode(object):
AUTOREFRESH_AND_REMAIN_ANONYMOUS = 0
AUTOREFRESH_AND_ALLOW_AUTH = 1
AUTOREFRESH_AND_ONLY_AUTH = 2
CLEANUP_THEN_AUTOREFRESH_AND_ONLY_AUTH = 3
CLEANUP_THEN_AUTOREFRESH_AND_REMAIN_ANONYMOUS = 4
CLEANUP_THEN_AUTOREFRESH_AND_ALLOW_AUTH = 5
AUTO = 6
_VALUES_TO_NAMES = {
0: "AUTOREFRESH_AND_REMAIN_ANONYMOUS",
1: "AUTOREFRESH_AND_ALLOW_AUTH",
2: "AUTOREFRESH_AND_ONLY_AUTH",
3: "CLEANUP_THEN_AUTOREFRESH_AND_ONLY_AUTH",
4: "CLEANUP_THEN_AUTOREFRESH_AND_REMAIN_ANONYMOUS",
5: "CLEANUP_THEN_AUTOREFRESH_AND_ALLOW_AUTH",
6: "AUTO",
}
_NAMES_TO_VALUES = {
"AUTOREFRESH_AND_REMAIN_ANONYMOUS": 0,
"AUTOREFRESH_AND_ALLOW_AUTH": 1,
"AUTOREFRESH_AND_ONLY_AUTH": 2,
"CLEANUP_THEN_AUTOREFRESH_AND_ONLY_AUTH": 3,
"CLEANUP_THEN_AUTOREFRESH_AND_REMAIN_ANONYMOUS": 4,
"CLEANUP_THEN_AUTOREFRESH_AND_ALLOW_AUTH": 5,
"AUTO": 6,
}
class AccountPairState(object):
ACTIVE = 0
PAUSED = 1
REMOVED = 2
IN_PROGRESS = 3
ALL = 4
_VALUES_TO_NAMES = {
0: "ACTIVE",
1: "PAUSED",
2: "REMOVED",
3: "IN_PROGRESS",
4: "ALL",
}
_NAMES_TO_VALUES = {
"ACTIVE": 0,
"PAUSED": 1,
"REMOVED": 2,
"IN_PROGRESS": 3,
"ALL": 4,
}
class JobTokenData(object):
"""
Attributes:
- infoJson
- ytdlpCommand
- socks
- jobId
- url
- cookiesBlob
"""
def __init__(self, infoJson=None, ytdlpCommand=None, socks=None, jobId=None, url=None, cookiesBlob=None,):
self.infoJson = infoJson
self.ytdlpCommand = ytdlpCommand
self.socks = socks
self.jobId = jobId
self.url = url
self.cookiesBlob = cookiesBlob
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.infoJson = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ytdlpCommand = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.socks = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.jobId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.cookiesBlob = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('JobTokenData')
if self.infoJson is not None:
oprot.writeFieldBegin('infoJson', TType.STRING, 1)
oprot.writeString(self.infoJson.encode('utf-8') if sys.version_info[0] == 2 else self.infoJson)
oprot.writeFieldEnd()
if self.ytdlpCommand is not None:
oprot.writeFieldBegin('ytdlpCommand', TType.STRING, 2)
oprot.writeString(self.ytdlpCommand.encode('utf-8') if sys.version_info[0] == 2 else self.ytdlpCommand)
oprot.writeFieldEnd()
if self.socks is not None:
oprot.writeFieldBegin('socks', TType.STRING, 3)
oprot.writeString(self.socks.encode('utf-8') if sys.version_info[0] == 2 else self.socks)
oprot.writeFieldEnd()
if self.jobId is not None:
oprot.writeFieldBegin('jobId', TType.STRING, 4)
oprot.writeString(self.jobId.encode('utf-8') if sys.version_info[0] == 2 else self.jobId)
oprot.writeFieldEnd()
if self.url is not None:
oprot.writeFieldBegin('url', TType.STRING, 5)
oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url)
oprot.writeFieldEnd()
if self.cookiesBlob is not None:
oprot.writeFieldBegin('cookiesBlob', TType.STRING, 6)
oprot.writeString(self.cookiesBlob.encode('utf-8') if sys.version_info[0] == 2 else self.cookiesBlob)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AccountData(object):
"""
Attributes:
- username
- password
- countryCode
"""
def __init__(self, username=None, password=None, countryCode=None,):
self.username = username
self.password = password
self.countryCode = countryCode
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.countryCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AccountData')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password.encode('utf-8') if sys.version_info[0] == 2 else self.password)
oprot.writeFieldEnd()
if self.countryCode is not None:
oprot.writeFieldBegin('countryCode', TType.STRING, 3)
oprot.writeString(self.countryCode.encode('utf-8') if sys.version_info[0] == 2 else self.countryCode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.username is None:
raise TProtocolException(message='Required field username is unset!')
if self.password is None:
raise TProtocolException(message='Required field password is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProxyData(object):
"""
Attributes:
- proxyUrl
- countryCode
"""
def __init__(self, proxyUrl=None, countryCode=None,):
self.proxyUrl = proxyUrl
self.countryCode = countryCode
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.proxyUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.countryCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ProxyData')
if self.proxyUrl is not None:
oprot.writeFieldBegin('proxyUrl', TType.STRING, 1)
oprot.writeString(self.proxyUrl.encode('utf-8') if sys.version_info[0] == 2 else self.proxyUrl)
oprot.writeFieldEnd()
if self.countryCode is not None:
oprot.writeFieldBegin('countryCode', TType.STRING, 2)
oprot.writeString(self.countryCode.encode('utf-8') if sys.version_info[0] == 2 else self.countryCode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.proxyUrl is None:
raise TProtocolException(message='Required field proxyUrl is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AccountPairWithState(object):
"""
Attributes:
- accountId
- proxyId
- accountPairState
- machineId
"""
def __init__(self, accountId=None, proxyId=None, accountPairState=None, machineId=None,):
self.accountId = accountId
self.proxyId = proxyId
self.accountPairState = accountPairState
self.machineId = machineId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.accountId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.proxyId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.accountPairState = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.machineId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AccountPairWithState')
if self.accountId is not None:
oprot.writeFieldBegin('accountId', TType.STRING, 1)
oprot.writeString(self.accountId.encode('utf-8') if sys.version_info[0] == 2 else self.accountId)
oprot.writeFieldEnd()
if self.proxyId is not None:
oprot.writeFieldBegin('proxyId', TType.STRING, 2)
oprot.writeString(self.proxyId.encode('utf-8') if sys.version_info[0] == 2 else self.proxyId)
oprot.writeFieldEnd()
if self.accountPairState is not None:
oprot.writeFieldBegin('accountPairState', TType.I32, 3)
oprot.writeI32(self.accountPairState)
oprot.writeFieldEnd()
if self.machineId is not None:
oprot.writeFieldBegin('machineId', TType.STRING, 4)
oprot.writeString(self.machineId.encode('utf-8') if sys.version_info[0] == 2 else self.machineId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.accountId is None:
raise TProtocolException(message='Required field accountId is unset!')
if self.proxyId is None:
raise TProtocolException(message='Required field proxyId is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobData(object):
"""
Attributes:
- jobId
- url
- cookiesBlob
- potoken
- visitorId
- ytdlpCommand
- createdTime
- telemetry
- state
- errorMessage
- socks5Id
"""
def __init__(self, jobId=None, url=None, cookiesBlob=None, potoken=None, visitorId=None, ytdlpCommand=None, createdTime=None, telemetry=None, state=None, errorMessage=None, socks5Id=None,):
self.jobId = jobId
self.url = url
self.cookiesBlob = cookiesBlob
self.potoken = potoken
self.visitorId = visitorId
self.ytdlpCommand = ytdlpCommand
self.createdTime = createdTime
self.telemetry = telemetry
self.state = state
self.errorMessage = errorMessage
self.socks5Id = socks5Id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.jobId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.cookiesBlob = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.potoken = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.visitorId = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.ytdlpCommand = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.createdTime = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.MAP:
self.telemetry = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.telemetry[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.state = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.errorMessage = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.socks5Id = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('JobData')
if self.jobId is not None:
oprot.writeFieldBegin('jobId', TType.STRING, 1)
oprot.writeString(self.jobId.encode('utf-8') if sys.version_info[0] == 2 else self.jobId)
oprot.writeFieldEnd()
if self.url is not None:
oprot.writeFieldBegin('url', TType.STRING, 2)
oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url)
oprot.writeFieldEnd()
if self.cookiesBlob is not None:
oprot.writeFieldBegin('cookiesBlob', TType.STRING, 3)
oprot.writeString(self.cookiesBlob.encode('utf-8') if sys.version_info[0] == 2 else self.cookiesBlob)
oprot.writeFieldEnd()
if self.potoken is not None:
oprot.writeFieldBegin('potoken', TType.STRING, 4)
oprot.writeString(self.potoken.encode('utf-8') if sys.version_info[0] == 2 else self.potoken)
oprot.writeFieldEnd()
if self.visitorId is not None:
oprot.writeFieldBegin('visitorId', TType.STRING, 5)
oprot.writeString(self.visitorId.encode('utf-8') if sys.version_info[0] == 2 else self.visitorId)
oprot.writeFieldEnd()
if self.ytdlpCommand is not None:
oprot.writeFieldBegin('ytdlpCommand', TType.STRING, 6)
oprot.writeString(self.ytdlpCommand.encode('utf-8') if sys.version_info[0] == 2 else self.ytdlpCommand)
oprot.writeFieldEnd()
if self.createdTime is not None:
oprot.writeFieldBegin('createdTime', TType.STRING, 7)
oprot.writeString(self.createdTime.encode('utf-8') if sys.version_info[0] == 2 else self.createdTime)
oprot.writeFieldEnd()
if self.telemetry is not None:
oprot.writeFieldBegin('telemetry', TType.MAP, 8)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.telemetry))
for kiter7, viter8 in self.telemetry.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 9)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.errorMessage is not None:
oprot.writeFieldBegin('errorMessage', TType.STRING, 10)
oprot.writeString(self.errorMessage.encode('utf-8') if sys.version_info[0] == 2 else self.errorMessage)
oprot.writeFieldEnd()
if self.socks5Id is not None:
oprot.writeFieldBegin('socks5Id', TType.STRING, 11)
oprot.writeString(self.socks5Id.encode('utf-8') if sys.version_info[0] == 2 else self.socks5Id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.jobId is None:
raise TProtocolException(message='Required field jobId is unset!')
if self.url is None:
raise TProtocolException(message='Required field url is unset!')
if self.cookiesBlob is None:
raise TProtocolException(message='Required field cookiesBlob is unset!')
if self.potoken is None:
raise TProtocolException(message='Required field potoken is unset!')
if self.visitorId is None:
raise TProtocolException(message='Required field visitorId is unset!')
if self.ytdlpCommand is None:
raise TProtocolException(message='Required field ytdlpCommand is unset!')
if self.createdTime is None:
raise TProtocolException(message='Required field createdTime is unset!')
if self.telemetry is None:
raise TProtocolException(message='Required field telemetry is unset!')
if self.state is None:
raise TProtocolException(message='Required field state is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RichCollectionPagination(object):
"""
Attributes:
- hasNext
- totalCount
- page
- pageSize
"""
def __init__(self, hasNext=None, totalCount=None, page=None, pageSize=None,):
self.hasNext = hasNext
self.totalCount = totalCount
self.page = page
self.pageSize = pageSize
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.hasNext = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.totalCount = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.page = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.pageSize = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('RichCollectionPagination')
if self.hasNext is not None:
oprot.writeFieldBegin('hasNext', TType.BOOL, 1)
oprot.writeBool(self.hasNext)
oprot.writeFieldEnd()
if self.totalCount is not None:
oprot.writeFieldBegin('totalCount', TType.I32, 2)
oprot.writeI32(self.totalCount)
oprot.writeFieldEnd()
if self.page is not None:
oprot.writeFieldBegin('page', TType.I32, 3)
oprot.writeI32(self.page)
oprot.writeFieldEnd()
if self.pageSize is not None:
oprot.writeFieldBegin('pageSize', TType.I32, 4)
oprot.writeI32(self.pageSize)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.hasNext is None:
raise TProtocolException(message='Required field hasNext is unset!')
if self.totalCount is None:
raise TProtocolException(message='Required field totalCount is unset!')
if self.page is None:
raise TProtocolException(message='Required field page is unset!')
if self.pageSize is None:
raise TProtocolException(message='Required field pageSize is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RichCollectionJobData(object):
"""
Attributes:
- items
- pagination
"""
def __init__(self, items=None, pagination=None,):
self.items = items
self.pagination = pagination
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.items = []
(_etype12, _size9) = iprot.readListBegin()
for _i13 in range(_size9):
_elem14 = JobData()
_elem14.read(iprot)
self.items.append(_elem14)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.pagination = RichCollectionPagination()
self.pagination.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('RichCollectionJobData')
if self.items is not None:
oprot.writeFieldBegin('items', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.items))
for iter15 in self.items:
iter15.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.pagination is not None:
oprot.writeFieldBegin('pagination', TType.STRUCT, 2)
self.pagination.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.items is None:
raise TProtocolException(message='Required field items is unset!')
if self.pagination is None:
raise TProtocolException(message='Required field pagination is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(JobTokenData)
JobTokenData.thrift_spec = (
None, # 0
(1, TType.STRING, 'infoJson', 'UTF8', None, ), # 1
(2, TType.STRING, 'ytdlpCommand', 'UTF8', None, ), # 2
(3, TType.STRING, 'socks', 'UTF8', None, ), # 3
(4, TType.STRING, 'jobId', 'UTF8', None, ), # 4
(5, TType.STRING, 'url', 'UTF8', None, ), # 5
(6, TType.STRING, 'cookiesBlob', 'UTF8', None, ), # 6
)
all_structs.append(AccountData)
AccountData.thrift_spec = (
None, # 0
(1, TType.STRING, 'username', 'UTF8', None, ), # 1
(2, TType.STRING, 'password', 'UTF8', None, ), # 2
(3, TType.STRING, 'countryCode', 'UTF8', None, ), # 3
)
all_structs.append(ProxyData)
ProxyData.thrift_spec = (
None, # 0
(1, TType.STRING, 'proxyUrl', 'UTF8', None, ), # 1
(2, TType.STRING, 'countryCode', 'UTF8', None, ), # 2
)
all_structs.append(AccountPairWithState)
AccountPairWithState.thrift_spec = (
None, # 0
(1, TType.STRING, 'accountId', 'UTF8', None, ), # 1
(2, TType.STRING, 'proxyId', 'UTF8', None, ), # 2
(3, TType.I32, 'accountPairState', None, None, ), # 3
(4, TType.STRING, 'machineId', 'UTF8', None, ), # 4
)
all_structs.append(JobData)
JobData.thrift_spec = (
None, # 0
(1, TType.STRING, 'jobId', 'UTF8', None, ), # 1
(2, TType.STRING, 'url', 'UTF8', None, ), # 2
(3, TType.STRING, 'cookiesBlob', 'UTF8', None, ), # 3
(4, TType.STRING, 'potoken', 'UTF8', None, ), # 4
(5, TType.STRING, 'visitorId', 'UTF8', None, ), # 5
(6, TType.STRING, 'ytdlpCommand', 'UTF8', None, ), # 6
(7, TType.STRING, 'createdTime', 'UTF8', None, ), # 7
(8, TType.MAP, 'telemetry', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 8
(9, TType.I32, 'state', None, None, ), # 9
(10, TType.STRING, 'errorMessage', 'UTF8', None, ), # 10
(11, TType.STRING, 'socks5Id', 'UTF8', None, ), # 11
)
all_structs.append(RichCollectionPagination)
RichCollectionPagination.thrift_spec = (
None, # 0
(1, TType.BOOL, 'hasNext', None, None, ), # 1
(2, TType.I32, 'totalCount', None, None, ), # 2
(3, TType.I32, 'page', None, None, ), # 3
(4, TType.I32, 'pageSize', None, None, ), # 4
)
all_structs.append(RichCollectionJobData)
RichCollectionJobData.thrift_spec = (
None, # 0
(1, TType.LIST, 'items', (TType.STRUCT, [JobData, None], False), None, ), # 1
(2, TType.STRUCT, 'pagination', [RichCollectionPagination, None], None, ), # 2
)
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,254 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class PBServiceException(TException):
"""
Attributes:
- message
- errorCode
- context
"""
def __init__(self, message=None, errorCode=None, context=None,):
super(PBServiceException, self).__setattr__('message', message)
super(PBServiceException, self).__setattr__('errorCode', errorCode)
super(PBServiceException, self).__setattr__('context', context)
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __delattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __hash__(self):
return hash(self.__class__) ^ hash((self.message, self.errorCode, self.context, ))
@classmethod
def read(cls, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and cls.thrift_spec is not None:
return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec])
iprot.readStructBegin()
message = None
errorCode = None
context = None
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
errorCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
context = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
context[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
return cls(
message=message,
errorCode=errorCode,
context=context,
)
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PBServiceException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.errorCode is not None:
oprot.writeFieldBegin('errorCode', TType.STRING, 2)
oprot.writeString(self.errorCode.encode('utf-8') if sys.version_info[0] == 2 else self.errorCode)
oprot.writeFieldEnd()
if self.context is not None:
oprot.writeFieldBegin('context', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.context))
for kiter7, viter8 in self.context.items():
oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)
oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class PBUserException(TException):
"""
Attributes:
- message
- errorCode
- context
"""
def __init__(self, message=None, errorCode=None, context=None,):
super(PBUserException, self).__setattr__('message', message)
super(PBUserException, self).__setattr__('errorCode', errorCode)
super(PBUserException, self).__setattr__('context', context)
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __delattr__(self, *args):
raise TypeError("can't modify immutable instance")
def __hash__(self):
return hash(self.__class__) ^ hash((self.message, self.errorCode, self.context, ))
@classmethod
def read(cls, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and cls.thrift_spec is not None:
return iprot._fast_decode(None, iprot, [cls, cls.thrift_spec])
iprot.readStructBegin()
message = None
errorCode = None
context = None
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
errorCode = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
context = {}
(_ktype10, _vtype11, _size9) = iprot.readMapBegin()
for _i13 in range(_size9):
_key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
_val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
context[_key14] = _val15
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
return cls(
message=message,
errorCode=errorCode,
context=context,
)
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('PBUserException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.errorCode is not None:
oprot.writeFieldBegin('errorCode', TType.STRING, 2)
oprot.writeString(self.errorCode.encode('utf-8') if sys.version_info[0] == 2 else self.errorCode)
oprot.writeFieldEnd()
if self.context is not None:
oprot.writeFieldBegin('context', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.context))
for kiter16, viter17 in self.context.items():
oprot.writeString(kiter16.encode('utf-8') if sys.version_info[0] == 2 else kiter16)
oprot.writeString(viter17.encode('utf-8') if sys.version_info[0] == 2 else viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(PBServiceException)
PBServiceException.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
(2, TType.STRING, 'errorCode', 'UTF8', None, ), # 2
(3, TType.MAP, 'context', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
all_structs.append(PBUserException)
PBUserException.thrift_spec = (
None, # 0
(1, TType.STRING, 'message', 'UTF8', None, ), # 1
(2, TType.STRING, 'errorCode', 'UTF8', None, ), # 2
(3, TType.MAP, 'context', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'YTTokenOpService']

View File

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

View File

@ -1,21 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.20.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import pangramia.yt.common.ttypes
import pangramia.yt.exceptions.ttypes
import pangramia.base_service.ttypes
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

View File

@ -1 +0,0 @@
../../thrift_model/gen_py/pangramia

View File

@ -1,11 +0,0 @@
flask
psutil
PySocks>=1.7.0
python-dotenv==1.0.1
redis>=4.0.0
requests>=2.31.0
tabulate>=0.9.0
thrift>=0.16.0,<=0.20.0
waitress
yt_dlp>=2025.3.27
yt-dlp-get-pot==0.3.0

View File

@ -1,2 +0,0 @@
# This file is no longer needed and will be removed.
# The packaging logic has been consolidated into the root setup.py file.

View File

@ -1,9 +0,0 @@
Metadata-Version: 2.2
Name: yt_ops_services
Version: 1.6.2.dev0
Requires-Python: >=3.9
Requires-Dist: thrift<=0.20.0,>=0.16.0
Requires-Dist: python-dotenv>=1.0.0
Requires-Dist: psutil
Dynamic: requires-dist
Dynamic: requires-python

View File

@ -1,48 +0,0 @@
setup.py
./pangramia/__init__.py
./pangramia/base_service/BaseService.py
./pangramia/base_service/__init__.py
./pangramia/base_service/constants.py
./pangramia/base_service/ttypes.py
./pangramia/yt/__init__.py
./pangramia/yt/admin_ops/YTAccountsOpService.py
./pangramia/yt/admin_ops/__init__.py
./pangramia/yt/admin_ops/constants.py
./pangramia/yt/admin_ops/ttypes.py
./pangramia/yt/common/__init__.py
./pangramia/yt/common/constants.py
./pangramia/yt/common/ttypes.py
./pangramia/yt/exceptions/__init__.py
./pangramia/yt/exceptions/constants.py
./pangramia/yt/exceptions/ttypes.py
./pangramia/yt/tokens_ops/YTTokenOpService.py
./pangramia/yt/tokens_ops/__init__.py
./pangramia/yt/tokens_ops/constants.py
./pangramia/yt/tokens_ops/ttypes.py
./thrift_model/__init__.py
./thrift_model/gen_py/__init__.py
./thrift_model/gen_py/pangramia/__init__.py
./thrift_model/gen_py/pangramia/base_service/BaseService.py
./thrift_model/gen_py/pangramia/base_service/__init__.py
./thrift_model/gen_py/pangramia/base_service/constants.py
./thrift_model/gen_py/pangramia/base_service/ttypes.py
./thrift_model/gen_py/pangramia/yt/__init__.py
./thrift_model/gen_py/pangramia/yt/admin_ops/YTAccountsOpService.py
./thrift_model/gen_py/pangramia/yt/admin_ops/__init__.py
./thrift_model/gen_py/pangramia/yt/admin_ops/constants.py
./thrift_model/gen_py/pangramia/yt/admin_ops/ttypes.py
./thrift_model/gen_py/pangramia/yt/common/__init__.py
./thrift_model/gen_py/pangramia/yt/common/constants.py
./thrift_model/gen_py/pangramia/yt/common/ttypes.py
./thrift_model/gen_py/pangramia/yt/exceptions/__init__.py
./thrift_model/gen_py/pangramia/yt/exceptions/constants.py
./thrift_model/gen_py/pangramia/yt/exceptions/ttypes.py
./thrift_model/gen_py/pangramia/yt/tokens_ops/YTTokenOpService.py
./thrift_model/gen_py/pangramia/yt/tokens_ops/__init__.py
./thrift_model/gen_py/pangramia/yt/tokens_ops/constants.py
./thrift_model/gen_py/pangramia/yt/tokens_ops/ttypes.py
yt_ops_services.egg-info/PKG-INFO
yt_ops_services.egg-info/SOURCES.txt
yt_ops_services.egg-info/dependency_links.txt
yt_ops_services.egg-info/requires.txt
yt_ops_services.egg-info/top_level.txt

View File

@ -1,3 +0,0 @@
thrift<=0.20.0,>=0.16.0
python-dotenv>=1.0.0
psutil

View File

@ -1,2 +0,0 @@
pangramia
thrift_model

View File

@ -1,261 +0,0 @@
2025-04-01 14:23:28,586 - INFO - Attempting to connect to server at 85.192.30.55:9090...
2025-04-01 14:23:28,700 - INFO - Successfully connected to server
2025-04-01 14:23:28,815 - INFO - Server connection test successful
2025-04-01 14:23:28,815 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=vKTVLpmvznI
2025-04-01 14:23:32,869 - INFO - Successfully received token data from server
2025-04-01 14:23:32,870 - WARNING - infoJson attribute *MISSING* in received token_data object.
2025-04-01 14:23:32,870 - WARNING - Valid info.json was NOT received from the server.
2025-04-01 14:23:32,870 - ERROR - Failed to obtain valid info.json from the server.
2025-04-01 14:40:18,685 - INFO - Attempting to connect to server at 85.192.30.55:9090...
2025-04-01 14:40:18,800 - INFO - Successfully connected to server
2025-04-01 14:40:18,914 - INFO - Server connection test successful
2025-04-01 14:40:18,915 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=vKTVLpmvznI
2025-04-01 14:40:31,927 - INFO - Successfully received token data from server
2025-04-01 14:40:31,929 - INFO - Valid JSON with video data: Best of Dmitri Shostakovich - Essential Classical Music
2025-04-01 14:40:31,932 - INFO - Successfully saved info.json to info_json_vKTVLpmvznI_1743507631.json and latest.json to latest.json
2025-04-13 16:32:14,014 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 16:32:14,129 - INFO - Successfully connected to server
2025-04-13 16:32:14,241 - INFO - Server connection test successful
2025-04-13 16:32:14,241 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 16:32:23,236 - ERROR - Unexpected error: TSocket read 0 bytes
2025-04-13 16:32:23,243 - ERROR - Traceback (most recent call last):
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/ytdlp_ops_client.py", line 533, in main
token_data = client.getOrRefreshToken(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 138, in getOrRefreshToken
return self.recv_getOrRefreshToken()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 152, in recv_getOrRefreshToken
(fname, mtype, rseqid) = iprot.readMessageBegin()
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 134, in readMessageBegin
sz = self.readI32()
^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 217, in readI32
buff = self.trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 283, in read
self.readFrame()
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 287, in readFrame
buff = self.__trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TSocket.py", line 166, in read
raise TTransportException(type=TTransportException.END_OF_FILE,
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
2025-04-13 16:33:43,822 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 16:33:43,933 - INFO - Successfully connected to server
2025-04-13 16:33:44,046 - INFO - Server connection test successful
2025-04-13 16:33:44,047 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 16:33:50,906 - ERROR - Unexpected error: TSocket read 0 bytes
2025-04-13 16:33:50,908 - ERROR - Traceback (most recent call last):
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/ytdlp_ops_client.py", line 533, in main
token_data = client.getOrRefreshToken(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 138, in getOrRefreshToken
return self.recv_getOrRefreshToken()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 152, in recv_getOrRefreshToken
(fname, mtype, rseqid) = iprot.readMessageBegin()
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 134, in readMessageBegin
sz = self.readI32()
^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 217, in readI32
buff = self.trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 283, in read
self.readFrame()
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 287, in readFrame
buff = self.__trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TSocket.py", line 166, in read
raise TTransportException(type=TTransportException.END_OF_FILE,
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
2025-04-13 17:32:58,458 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 17:32:58,563 - INFO - Successfully connected to server
2025-04-13 17:32:58,668 - INFO - Server connection test successful
2025-04-13 17:32:58,668 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 17:33:07,768 - ERROR - Unexpected error: TSocket read 0 bytes
2025-04-13 17:33:07,773 - ERROR - Traceback (most recent call last):
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/ytdlp_ops_client.py", line 533, in main
token_data = client.getOrRefreshToken(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 138, in getOrRefreshToken
return self.recv_getOrRefreshToken()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 152, in recv_getOrRefreshToken
(fname, mtype, rseqid) = iprot.readMessageBegin()
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 134, in readMessageBegin
sz = self.readI32()
^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 217, in readI32
buff = self.trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 283, in read
self.readFrame()
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 287, in readFrame
buff = self.__trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TSocket.py", line 166, in read
raise TTransportException(type=TTransportException.END_OF_FILE,
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
2025-04-13 17:36:10,276 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 17:36:10,388 - INFO - Successfully connected to server
2025-04-13 17:36:10,501 - INFO - Server connection test successful
2025-04-13 17:36:10,501 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 17:36:17,597 - ERROR - Unexpected error: TSocket read 0 bytes
2025-04-13 17:36:17,606 - ERROR - Traceback (most recent call last):
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/ytdlp_ops_client.py", line 543, in main
token_data = client.getOrRefreshToken(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 138, in getOrRefreshToken
return self.recv_getOrRefreshToken()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 152, in recv_getOrRefreshToken
(fname, mtype, rseqid) = iprot.readMessageBegin()
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 134, in readMessageBegin
sz = self.readI32()
^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 217, in readI32
buff = self.trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 283, in read
self.readFrame()
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 287, in readFrame
buff = self.__trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TSocket.py", line 166, in read
raise TTransportException(type=TTransportException.END_OF_FILE,
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
2025-04-13 18:02:37,249 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 18:02:37,361 - INFO - Successfully connected to server
2025-04-13 18:02:37,478 - INFO - Server connection test successful
2025-04-13 18:02:37,478 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 18:02:42,457 - ERROR - Unexpected error: TSocket read 0 bytes
2025-04-13 18:02:42,467 - ERROR - Traceback (most recent call last):
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/ytdlp_ops_client.py", line 533, in main
token_data = client.getOrRefreshToken(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 138, in getOrRefreshToken
return self.recv_getOrRefreshToken()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 152, in recv_getOrRefreshToken
(fname, mtype, rseqid) = iprot.readMessageBegin()
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 134, in readMessageBegin
sz = self.readI32()
^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 217, in readI32
buff = self.trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 283, in read
self.readFrame()
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 287, in readFrame
buff = self.__trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TSocket.py", line 166, in read
raise TTransportException(type=TTransportException.END_OF_FILE,
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
2025-04-13 18:03:16,782 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 18:03:16,890 - INFO - Successfully connected to server
2025-04-13 18:03:16,999 - INFO - Server connection test successful
2025-04-13 18:03:17,000 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 18:03:26,040 - ERROR - Unexpected error: TSocket read 0 bytes
2025-04-13 18:03:26,042 - ERROR - Traceback (most recent call last):
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/ytdlp_ops_client.py", line 533, in main
token_data = client.getOrRefreshToken(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 138, in getOrRefreshToken
return self.recv_getOrRefreshToken()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/yt-ops-services/airflow/ytdlp-ops-auth/pangramia/yt/tokens_ops/YTTokenOpService.py", line 152, in recv_getOrRefreshToken
(fname, mtype, rseqid) = iprot.readMessageBegin()
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 134, in readMessageBegin
sz = self.readI32()
^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/protocol/TBinaryProtocol.py", line 217, in readI32
buff = self.trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 283, in read
self.readFrame()
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 287, in readFrame
buff = self.__trans.readAll(4)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TTransport.py", line 62, in readAll
chunk = self.read(sz - have)
^^^^^^^^^^^^^^^^^^^^
File "/Users/aperez/micromamba/envs/browser-use/lib/python3.11/site-packages/thrift/transport/TSocket.py", line 166, in read
raise TTransportException(type=TTransportException.END_OF_FILE,
thrift.transport.TTransport.TTransportException: TSocket read 0 bytes
2025-04-13 18:09:56,759 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-13 18:09:56,875 - INFO - Successfully connected to server
2025-04-13 18:09:56,988 - INFO - Server connection test successful
2025-04-13 18:09:56,988 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-13 18:10:05,434 - ERROR - Service exception: Bot detection triggered: ERROR: [youtube+GetPOT] i7SQ6ENOv5s: Sign in to confirm youre not a bot. Use --cookies-from-browser or --cookies for the authentication. See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies. Also see https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for tips on effectively exporting YouTube cookies
2025-04-14 13:45:44,486 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-14 13:45:44,593 - INFO - Successfully connected to server
2025-04-14 13:45:44,702 - INFO - Server connection test successful
2025-04-14 13:45:44,702 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-14 13:45:45,560 - ERROR - Service exception: Script execution failed: file:///app/utils/tokenUtils.js:1
import { BG, BgConfig, DescrambledChallenge } from '../node_modules/bgutils-js/dist/index.js'; // Add BgConfig, DescrambledChallenge
^^^^^^^^
SyntaxError: The requested module '../node_modules/bgutils-js/dist/index.js' does not provide an export named 'BgConfig'
at ModuleJob._instantiate (node:internal/modules/esm/module_job:123:21)
at async ModuleJob.run (node:internal/modules/esm/module_job:191:5)
at async ModuleLoader.import (node:internal/modules/esm/loader:337:24)
at async loadESM (node:internal/process/esm_loader:34:7)
at async handleMainPromise (node:internal/modules/run_main:106:12)
Node.js v18.20.8
2025-04-14 14:32:59,820 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-14 14:32:59,925 - INFO - Successfully connected to server
2025-04-14 14:33:00,031 - INFO - Server connection test successful
2025-04-14 14:33:00,031 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-14 14:33:12,563 - ERROR - Service exception: Bot detection triggered: ERROR: [youtube+GetPOT] i7SQ6ENOv5s: Sign in to confirm youre not a bot. Use --cookies-from-browser or --cookies for the authentication. See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies. Also see https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for tips on effectively exporting YouTube cookies
2025-04-14 14:58:31,413 - INFO - Attempting to connect to server at 89.253.221.173:9090...
2025-04-14 14:58:31,518 - INFO - Successfully connected to server
2025-04-14 14:58:31,623 - INFO - Server connection test successful
2025-04-14 14:58:31,624 - INFO - Requesting token for URL: https://www.youtube.com/watch?v=i7SQ6ENOv5s&t=1012s
2025-04-14 14:58:43,453 - ERROR - Service exception: Bot detection triggered: ERROR: [youtube+GetPOT] i7SQ6ENOv5s: Sign in to confirm youre not a bot. Use --cookies-from-browser or --cookies for the authentication. See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies. Also see https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for tips on effectively exporting YouTube cookies

View File

@ -1,721 +0,0 @@
#!/usr/bin/env python3
from typing import Dict, List, Optional, Any
import argparse
import csv
import datetime
import json
import os
import re
import subprocess
import sys
import time
import uuid
import traceback
import logging
import signal
from pathlib import Path
from tabulate import tabulate
import yt_dlp
def signal_handler(sig: int, frame) -> None:
"""Handle shutdown signals gracefully."""
logger.info(f"Received signal {sig}, shutting down...")
# Clean up any resources here
sys.exit(0)
# Register signal handlers
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Import the patch for Thrift exceptions
try:
import os
from thrift_exceptions_patch import patch_thrift_exceptions
# Explicitly call the patch function to ensure it's applied
patch_thrift_exceptions()
print("Applied Thrift exceptions patch for compatibility")
if 'AIRFLOW_HOME' in os.environ:
print("Running in Airflow environment - patch is essential")
else:
print("Not running in Airflow environment, but patch applied anyway for consistency")
except ImportError:
print("Could not import thrift_exceptions_patch, compatibility may be affected")
print("If running in Airflow, this may cause 'immutable instance' errors")
except Exception as e:
print(f"Error applying Thrift exceptions patch: {e}")
# --- Python Path Setup ---
# Ensure the script can find necessary modules, especially Thrift-generated code.
# Assumes the script is run from the project root or the path is adjusted accordingly.
project_root = Path(__file__).parent.absolute()
gen_py_dir = project_root / "thrift_model" / "gen_py"
# Add project root to sys.path (needed for the 'pangramia' symlink)
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))
# Verify paths for debugging
# print("Project Root:", project_root)
# print("Project Root:", project_root)
# print("Gen Py Dir:", gen_py_dir)
# print("Sys Path:", sys.path)
# --- End Python Path Setup ---
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
try:
from pangramia.yt.tokens_ops import YTTokenOpService
from pangramia.yt.common.ttypes import JobTokenData, TokenUpdateMode, JobState
from pangramia.yt.exceptions.ttypes import PBServiceException, PBUserException
except ImportError as e:
print(f"Error importing Thrift-generated modules: {e}")
print("Please ensure you have run './generate-thrift.py' successfully from the project root.")
print(f"Current sys.path includes: {gen_py_dir}")
sys.exit(1)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler('ytdlp_ops_client.log')
]
)
logger = logging.getLogger(__name__)
def get_info_json(token_data):
"""Get infoJson from token_data"""
if not hasattr(token_data, 'infoJson'):
logger.error("infoJson attribute missing in token_data")
raise ValueError("Server response missing infoJson")
if not token_data.infoJson or token_data.infoJson == "{}":
logger.error("Empty infoJson received from server")
raise ValueError("Empty infoJson received from server")
logger.info(f"Using infoJson from server response ({len(token_data.infoJson)} bytes)")
return token_data.infoJson
def is_valid_json(json_str):
"""Check if a string is valid JSON and not empty"""
if not json_str or json_str == "{}" or json_str == "":
logger.warning("Empty JSON string received")
return False
try:
data = json.loads(json_str)
# Check if it's an empty object
if isinstance(data, dict) and not data:
logger.warning("Empty JSON object received")
return False
# Check if it has an error field
if isinstance(data, dict) and ('error' in data or 'errorCode' in data):
# It's valid JSON but contains an error
logger.warning(f"JSON contains error: {data.get('error', 'Unknown error')} (code: {data.get('errorCode', 'none')})")
return True
# Check if it has at least some basic fields
if isinstance(data, dict) and ('id' in data or 'title' in data):
logger.info(f"Valid JSON with video data: {data.get('title', 'Unknown title')}")
return True
# Check if it has token_data which is important
if isinstance(data, dict) and 'token_data' in data and data['token_data']:
logger.info("Valid JSON with token_data")
return True
logger.warning("JSON is valid but missing expected fields")
return True
except json.JSONDecodeError as e:
logger.warning(f"Invalid JSON: {e}")
return False
except Exception as e:
logger.warning(f"Unexpected error validating JSON: {e}")
return False
def extract_video_id(url: str) -> Optional[str]:
"""Extract video ID from a YouTube URL."""
# If it's already a video ID
if re.match(r'^[a-zA-Z0-9_-]{11}$', url):
return url
# Handle youtu.be URLs
youtu_be_match = re.search(r'youtu\.be/([a-zA-Z0-9_-]{11})', url)
if youtu_be_match:
return youtu_be_match.group(1)
# Handle youtube.com URLs
youtube_match = re.search(r'(?:youtube\.com/(?:watch\?v=|embed/|v/)|youtube\.com/.*[?&]v=)([a-zA-Z0-9_-]{11})', url)
if youtube_match:
return youtube_match.group(1)
# Handle shorts URLs
shorts_match = re.search(r'youtube\.com/shorts/([a-zA-Z0-9_-]{11})', url)
if shorts_match:
return shorts_match.group(1)
return None
def list_available_formats(url: str, args: argparse.Namespace) -> Optional[List[Dict[str, Any]]]:
"""List available formats for a YouTube video."""
ydl_opts = {
'quiet': not args.no_quiet if hasattr(args, 'no_quiet') else True,
'no_warnings': True,
'skip_download': True,
'extract_flat': True,
}
try:
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(url, download=False)
if not info:
logger.error("Could not retrieve video information")
return None
formats = info.get('formats', [])
if not formats:
logger.warning("No formats available for this video")
return None
# Create a table of available formats
format_table = []
for f in formats:
format_table.append({
'format_id': f.get('format_id', 'unknown'),
'ext': f.get('ext', 'unknown'),
'resolution': f.get('resolution', 'unknown'),
'fps': f.get('fps', 'unknown'),
'vcodec': f.get('vcodec', 'unknown'),
'acodec': f.get('acodec', 'unknown'),
'filesize': f.get('filesize', 'unknown'),
'format_note': f.get('format_note', '')
})
return format_table
except Exception as e:
logger.error(f"Error listing formats: {e}")
return None
def suggest_best_formats(formats: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Suggest best formats based on resolution and codec."""
best = []
seen_resolutions = set()
# Prioritize higher resolutions and certain codecs
preferred_codecs = ["vp9", "avc1", "av01"] # In order of preference
for f in sorted(formats, key=lambda x: (
-int(x.get('height', 0) or 0), # Higher resolution first
preferred_codecs.index(x.get('vcodec', '').split('.')[0]) if x.get('vcodec', '').split('.')[0] in preferred_codecs else float('inf'), # Preferred codecs
x.get('filesize', 0) or 0 # Smaller filesize
)):
resolution = f.get('resolution')
if resolution and resolution not in seen_resolutions:
best.append(f)
seen_resolutions.add(resolution)
if len(best) >= 3: # Suggest up to 3 formats
break
return best
def load_info_json(path: str) -> Optional[Dict[str, Any]]:
"""Load and validate info.json file."""
try:
path = Path(path).resolve()
if not path.exists():
logger.error(f"Info.json file not found: {path}")
return None
with open(path, 'r') as f:
data = json.load(f)
# Basic validation
if not isinstance(data, dict):
logger.error("Invalid info.json format: not a JSON object")
return None
if 'id' not in data:
logger.warning("Info.json missing video ID")
return data
except Exception as e:
logger.error(f"Error loading info.json: {e}")
return None
def save_info_json(info_json: str, video_id: str, context_dir: str) -> Optional[str]:
"""Save info.json to disk and return the saved path."""
try:
# Ensure context directory exists
Path(context_dir).mkdir(parents=True, exist_ok=True)
# Create filename with video ID and timestamp
timestamp = int(time.time())
output_path = Path(context_dir) / f"info_json_{video_id}_{timestamp}.json"
# Write the file
with open(output_path, 'w') as f:
f.write(info_json)
# Also create a symlink or copy to the standard name for compatibility
standard_path = Path(context_dir) / f"info_json_{video_id}.json"
try:
# Try to create a symlink first (more efficient)
if os.path.exists(standard_path):
os.remove(standard_path)
os.symlink(output_path, standard_path)
except (OSError, AttributeError):
# If symlink fails (e.g., on Windows), make a copy
with open(standard_path, 'w') as f:
f.write(info_json)
# Save latest.json
latest_path = Path(context_dir) / "latest.json"
with open(latest_path, 'w') as f:
f.write(info_json)
logger.info(f"Successfully saved info.json to {output_path} and latest.json to {latest_path}")
return str(output_path)
except Exception as e:
logger.error(f"Failed to save info.json: {e}")
logger.error(traceback.format_exc())
return False
def main():
# Create main parser
parser = argparse.ArgumentParser(description='''YtdlpOpsService Client
This client connects to the YTDLP Operations Server to generate tokens for YouTube videos.
The server performs SOCKS5 proxy connection testing with a 9-second timeout for early detection
of proxy issues. If a proxy connection fails, the server will immediately stop token generation
and return an error instead of trying other clients.''')
# Add global options
parser.add_argument('--host', default=os.getenv('YTDLP_HOST', 'localhost'),
help='Server host (default: localhost or YTDLP_HOST env)')
parser.add_argument('--port', type=int, default=int(os.getenv('YTDLP_PORT', '9090')),
help='Server port (default: 9090 or YTDLP_PORT env)')
parser.add_argument('--timeout', type=int, default=30000,
help='Timeout in milliseconds (default: 30000)')
parser.add_argument('--timeout-sec', type=int, default=30,
help='Timeout in seconds (default: 30, overrides --timeout if provided)')
parser.add_argument('--context-dir', default='.', help='Context directory to save info.json (default: .)')
parser.add_argument('--load-info-json', help='Path to existing info.json file to load')
parser.add_argument('--framed-transport', action='store_true',
help='Use TFramedTransport instead of TBufferedTransport for handling very large messages')
parser.add_argument('--force-framed-transport', action='store_true',
help='Force the use of TFramedTransport (recommended for large messages)')
# Create subparsers for commands
subparsers = parser.add_subparsers(dest='command', required=True, help='Commands')
# getToken command
get_token_parser = subparsers.add_parser('getToken', help='Get token for a YouTube URL',
description='''Get token for a YouTube URL
This command connects to the server to generate tokens for a YouTube video.
The server will test any configured SOCKS5 proxy with a 9-second timeout.
If the proxy connection fails, token generation will stop immediately with an error.''')
get_token_parser.add_argument('--url', required=True,
help='YouTube URL to process')
# --format removed, format/quality is determined by the server or embedded in the command
get_token_parser.add_argument('--account_id', default='default',
help='Account ID (default: default)')
get_token_parser.add_argument('--list-formats', action='store_true',
help='List available formats for the video')
args = parser.parse_args()
# Handle info.json loading
if args.load_info_json:
info_json = load_info_json(args.load_info_json)
if info_json:
print("Loaded info.json:")
print(json.dumps(info_json, indent=2))
return
transport = None
try:
# Ensure context directory exists and is writable
try:
Path(args.context_dir).mkdir(parents=True, exist_ok=True)
test_file = Path(args.context_dir) / "test.txt"
test_file.touch()
test_file.unlink()
except Exception as e:
logger.error(f"Could not access context directory {args.context_dir}: {e}")
print(f"Error: Could not access context directory {args.context_dir}")
sys.exit(1)
try:
# Check if we should use framed transport for very large messages
use_framed_transport = args.framed_transport or args.force_framed_transport or os.environ.get('USE_FRAMED_TRANSPORT', '').lower() in ('1', 'true', 'yes')
logger.debug(f"Using framed transport: {use_framed_transport}") # Changed to DEBUG
# Create socket with configurable timeout, force IPv4
socket = TSocket.TSocket(args.host, args.port, socket_family=2) # AF_INET = 2 for IPv4
# Use timeout-sec if provided, otherwise use timeout in milliseconds
if args.timeout_sec is not None:
socket.setTimeout(args.timeout_sec * 1000) # Convert seconds to milliseconds
logger.debug(f"Using timeout of {args.timeout_sec} seconds") # Changed to DEBUG
else:
socket.setTimeout(args.timeout) # Use timeout from CLI in milliseconds
logger.debug(f"Using timeout of {args.timeout} milliseconds") # Changed to DEBUG
# Always use TFramedTransport to match the server
transport = TTransport.TFramedTransport(socket)
logger.debug("Using TFramedTransport for large messages") # Changed to DEBUG
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = YTTokenOpService.Client(protocol)
logger.info(f"Attempting to connect to server at {args.host}:{args.port}...")
try:
transport.open()
logger.info("Successfully connected to server")
except TTransport.TTransportException as e:
logger.error(f"Connection failed: {str(e)}")
print(f"Error: Could not connect to server at {args.host}:{args.port}")
print(f"Reason: {str(e)}")
sys.exit(1)
# Add connection test
try:
client.ping()
logger.info("Server connection test successful")
except Exception as e:
logger.error(f"Server connection test failed: {e}")
raise
except TTransport.TTransportException as e:
logger.error(f"Connection failed: {str(e)}")
logger.error(f"Could not connect to {args.host}:{args.port}")
sys.exit(1)
except Exception as e:
logger.error(f"Connection failed: {str(e)}")
logger.error(traceback.format_exc())
sys.exit(1)
if args.command == 'getToken':
url = args.url
# format_codes removed
# Handle format listing
if args.list_formats:
formats = list_available_formats(url, args)
if formats:
print("\nAvailable formats:")
print(tabulate(formats, headers="keys", showindex=True)) # Show index for format selection
# Suggest best formats based on resolution
best_formats = suggest_best_formats(formats)
if best_formats:
print("\nSuggested formats:")
print(tabulate(best_formats, headers="keys"))
else:
print("No formats available or could not retrieve format information")
return
elif args.youtube_url:
url = args.youtube_url
format_code = args.format
print("Warning: --youtube-url is deprecated, use 'getToken --url' instead")
else:
print("Please provide a YouTube URL using 'getToken --url' command")
return
# Get token for URL
try:
# Get token for URL
logger.info(f"Requesting token for URL: {url}")
token_data = client.getOrRefreshToken(
accountId=args.account_id,
updateType=TokenUpdateMode.AUTO,
url=url
)
if not token_data:
logger.error("Received empty token data from server")
print("Error: Received empty token data from server")
sys.exit(1)
# Validate token data
if not hasattr(token_data, 'ytdlpCommand') or not token_data.ytdlpCommand:
logger.error("Token data missing required ytdlpCommand")
print("Error: Token data missing required ytdlpCommand")
sys.exit(1)
logger.info("Successfully received token data from server")
# Log all attributes of token_data for debugging
token_attrs = [attr for attr in dir(token_data) if not attr.startswith('__') and not callable(getattr(token_data, attr))]
logger.debug(f"Received token_data attributes: {token_attrs}")
# Handle case where token_data is a dict-like object
if hasattr(token_data, 'items'):
# Convert to dict if needed
token_dict = dict(token_data.items())
logger.debug(f"Token data as dict: {token_dict}")
# If we have JSON data directly in the response
if isinstance(token_dict.get('infoJson', None), str):
received_info_json = token_dict['infoJson']
elif isinstance(token_dict.get('data', None), (dict, str)):
# Try to use the data field if it exists
data = token_dict['data']
if isinstance(data, str):
received_info_json = data
else:
received_info_json = json.dumps(data)
else:
# Create info.json from available fields
info_data = {
"id": token_dict.get('id', extract_video_id(url)),
"title": token_dict.get('title', ''),
"formats": token_dict.get('formats', []),
"timestamp": int(time.time()),
"ytdlp_command": token_dict.get('ytdlpCommand', '')
}
received_info_json = json.dumps(info_data)
else:
# Handle case where token_data is a regular object
received_info_json = getattr(token_data, 'infoJson', None)
if received_info_json:
logger.debug(f"Received info.json data ({len(received_info_json)} bytes)")
if len(received_info_json) > 100:
logger.debug(f"Preview: {received_info_json[:100]}...")
else:
logger.warning("No valid info.json data found in response")
except PBServiceException as e:
logger.error(f"Service exception: {e.message}")
if hasattr(e, 'errorCode'):
if e.errorCode == "BOT_DETECTED":
print(f"Error: {e.message}")
print("\nYouTube has detected bot activity. Authentication is required.")
# Print suggestions if available
if hasattr(e, 'context') and e.context and 'suggestions' in e.context:
print("\nSuggestions:")
for i, suggestion in enumerate(e.context['suggestions'], 1):
print(f" {i}. {suggestion}")
else:
print("\nTry:")
print(" 1. Use --cookies-from-browser to pass authentication cookies")
print(" 2. Export cookies from a logged-in browser session")
print(" 3. Try a different client type (ios, android, mweb)")
print(" 4. Use a different proxy or IP address")
print(" 5. Try again later")
sys.exit(1)
elif e.errorCode in ["SOCKS5_CONNECTION_FAILED", "SOCKS5_TIMEOUT", "SOCKS5_CONNECTION_REFUSED",
"SOCKS5_CONNECTION_TIMEOUT", "SOCKS5_HOST_NOT_FOUND", "SOCKS5_NETWORK_UNREACHABLE"]:
print(f"Error: {e.message}")
print("\nSOCKS5 proxy connection failed. Please check your proxy settings.")
# Provide more specific guidance based on error code
if e.errorCode == "SOCKS5_TIMEOUT" or e.errorCode == "SOCKS5_CONNECTION_TIMEOUT":
print("The proxy server did not respond within the timeout period (9 seconds).")
print("This could indicate network congestion or a proxy server that's overloaded.")
elif e.errorCode == "SOCKS5_CONNECTION_REFUSED":
print("The proxy server actively refused the connection.")
print("This usually means the proxy server is not running or is not accepting connections on the specified port.")
elif e.errorCode == "SOCKS5_HOST_NOT_FOUND":
print("The proxy host could not be resolved.")
print("Please check that the hostname is correct and your DNS is working properly.")
elif e.errorCode == "SOCKS5_NETWORK_UNREACHABLE":
print("The network containing the proxy server is unreachable.")
print("This could indicate network routing issues or firewall restrictions.")
print("\nPossible solutions:")
print("1. Try using a different proxy server")
print("2. Check if the proxy server is running and accessible")
print("3. Verify your network connection and firewall settings")
print("4. If using a remote proxy, check if it's accessible from your location")
# Exit with a specific error code for proxy failures
sys.exit(2)
elif e.errorCode == "GLOBAL_TIMEOUT":
print(f"Error: {e.message}")
print("\nThe server timed out while processing your request.")
print("This could be due to:")
print("1. Slow network connection")
print("2. Server overload")
print("3. Complex video that takes too long to process")
print("\nTry again later or with a different video.")
sys.exit(3)
elif e.errorCode == "CLIENT_TIMEOUT":
print(f"Error: {e.message}")
print("\nA client-specific timeout occurred while processing your request.")
print("The server has stopped processing to avoid wasting resources.")
print("\nPossible solutions:")
print("1. Try again later when network conditions improve")
print("2. Try a different video")
print("3. Check your internet connection")
sys.exit(3)
else:
print(f"Error: {e.message}")
else:
print(f"Error: {e.message}")
return
except PBUserException as e:
logger.error(f"User exception: {e.message}")
print(f"Error: {e.message}")
return
except Exception as e:
logger.error(f"Unexpected error: {str(e)}")
logger.error(traceback.format_exc())
print(f"Unexpected error: {str(e)}")
sys.exit(1)
# Log the entire token_data object for debugging AFTER potential exceptions
logger.debug(f"Processing received token_data: {token_data}")
# Check if valid infoJson was received from the server
info_json = None
if hasattr(token_data, 'infoJson') and token_data.infoJson and token_data.infoJson != "{}":
if is_valid_json(token_data.infoJson):
logger.debug("Valid info.json received from server.") # Changed to DEBUG
info_json = token_data.infoJson
else:
logger.warning("Received infoJson from server, but it is not valid JSON or is empty.")
else:
logger.warning("Valid info.json was NOT received from the server.")
# Proceed only if we have valid info_json
if info_json:
# Save info.json if present in the server response
video_id = extract_video_id(url)
if not video_id:
logger.warning(f"Could not extract video ID from URL: {url}") # Keep as WARNING
video_id = f"unknown_{int(time.time())}"
try:
info_data = json.loads(info_json)
# Check if it contains an error
if isinstance(info_data, dict) and ('error' in info_data or 'errorCode' in info_data):
error_msg = info_data.get('error', 'Unknown error')
error_code = info_data.get('errorCode', 'UNKNOWN_ERROR')
logger.warning(f"infoJson contains error: {error_msg} (code: {error_code})")
# If it's a bot detection error, raise appropriate exception
if error_code == 'BOT_DETECTED' or 'bot' in error_msg.lower() or 'sign in' in error_msg.lower():
raise PBUserException(
message=f"Bot detection triggered: {error_msg}",
errorCode="BOT_DETECTION",
context={
"video_id": extract_video_id(url),
"url": url,
"suggestions": info_data.get('suggestions', ["Try different client", "Use proxy", "Wait and retry later"])
}
)
except json.JSONDecodeError as e:
# This case should ideally not happen due to is_valid_json check, but handle defensively
logger.error(f"Invalid JSON received despite initial check: {e}")
print(f"Error: Received invalid JSON data from server.")
info_json = None # Ensure we don't proceed
# If info_json is still None after checks, handle the failure case
if not info_json:
logger.error("Failed to obtain valid info.json from the server.")
print("Error: No valid video information (info.json) was received from the server.")
# Optionally, print the raw ytdlp command if available
if hasattr(token_data, 'ytdlpCommand') and token_data.ytdlpCommand:
print("\nRaw command from server (may be incomplete or require info.json):")
print(token_data.ytdlpCommand)
sys.exit(1) # Exit with error
# --- We have valid info_json, proceed with saving and command generation ---
try:
info_data = json.loads(info_json) # We know this is valid now
# Check if it's an error response embedded in the JSON
if isinstance(info_data, dict) and "error" in info_data:
logger.error(f"Received error report from server: {info_json}")
# Check if this is a bot detection error
if (info_data.get('errorCode') == "BOT_DETECTED" or
"bot" in info_data.get('message', '').lower() or
"sign in to confirm" in info_data.get('message', '').lower() or
"sign in to confirm" in info_data.get('error', '').lower() or
"unusual traffic" in info_data.get('message', '').lower() or
"captcha" in info_data.get('message', '').lower() or
info_data.get('requires_auth') == True):
logger.error("Bot detection error detected in info.json")
# Raise PBServiceException for bot detection
raise PBServiceException(
message=f"Bot detection triggered: {info_data.get('message', 'Authentication required')}",
errorCode="BOT_DETECTED",
context={
"video_id": video_id,
"url": url,
"requires_auth": True,
"info_data": info_data,
"suggestions": info_data.get('suggestions', [
"Use --cookies-from-browser to pass authentication cookies",
"Export cookies from a logged-in browser session",
"Try a different client type (ios, android, mweb)",
"Use a different proxy or IP address"
])
}
)
else:
# Raise PBServiceException for other errors
raise PBServiceException(
message=f"Error extracting video info: {info_data.get('error', 'Unknown error')}",
errorCode=info_data.get('errorCode', "EXTRACTION_FAILED"),
context={"video_id": video_id, "url": url, "info_data": info_data}
)
# If it's a valid response, process it
if 'title' in info_data or 'id' in info_data:
print(f"Video info retrieved: {info_data.get('title', 'Unknown title')}")
saved_path = save_info_json(info_json, video_id, args.context_dir)
if saved_path:
print(f"info.json saved to: {saved_path}")
# Create simpler base command using only the saved info.json and proxy
base_cmd = f"yt-dlp --load-info-json \"{saved_path}\"" # Quote the path
if hasattr(token_data, 'socks') and token_data.socks:
if token_data.socks.startswith(('socks5://', 'ss://')):
# Quote the proxy URL as well
base_cmd += f" --proxy \"{token_data.socks}\""
# Show format listing command
print("\nTo list available formats:")
format_cmd = f"{base_cmd} -F"
print(format_cmd)
# Show download command (format is usually embedded in info.json or determined by yt-dlp)
simplified_cmd = f"{base_cmd} --simulate" # Removed format codes
print("\nTo download (with --simulate to preview):")
print(simplified_cmd)
print("\nRemove --simulate to actually download")
else:
logger.error("Failed to save info.json file")
print("Failed to save info.json file")
else:
logger.warning("info.json appears to be valid JSON but missing expected video fields")
print("Error: Received incomplete or invalid video data")
print("This usually indicates an authentication or access issue")
sys.exit(1)
except Exception as e: # Catch errors during saving or command generation
logger.error(f"Error processing valid info.json: {str(e)}")
# Re-raise the exception to be handled by the main error handler
raise
finally:
if transport:
transport.close()
if __name__ == "__main__":
main()

View File

@ -1,60 +0,0 @@
import json
import logging
import re
logger = logging.getLogger(__name__)
def get_info_json(token_data):
"""Get infoJson from token_data"""
if hasattr(token_data, 'infoJson') and token_data.infoJson:
return token_data.infoJson
# Log the issue for debugging
logger.warning("infoJson attribute missing or empty in token_data")
logger.info(f"Available attributes: {[attr for attr in dir(token_data) if not attr.startswith('__') and not callable(getattr(token_data, attr))]}")
return "{}"
def is_valid_json(json_str):
"""Check if a string is valid JSON and not empty"""
if not json_str or json_str == "{}" or json_str == "":
return False
try:
data = json.loads(json_str)
# Check if it's an empty object
if isinstance(data, dict) and not data:
return False
# Check if it has at least some basic fields
if isinstance(data, dict) and ('id' in data or 'title' in data):
return True
# Check if it has token_data which is important
if isinstance(data, dict) and 'token_data' in data and data['token_data']:
return True
return True
except Exception as e:
logger.warning(f"Invalid JSON: {e}")
return False
def extract_video_id(url):
"""Extract video ID from a YouTube URL"""
# If it's already a video ID
if re.match(r'^[a-zA-Z0-9_-]{11}$', url):
return url
# Handle youtu.be URLs
youtu_be_match = re.search(r'youtu\.be/([a-zA-Z0-9_-]{11})', url)
if youtu_be_match:
return youtu_be_match.group(1)
# Handle youtube.com URLs
youtube_match = re.search(r'(?:youtube\.com/(?:watch\?v=|embed/|v/)|youtube\.com/.*[?&]v=)([a-zA-Z0-9_-]{11})', url)
if youtube_match:
return youtube_match.group(1)
# Handle shorts URLs
shorts_match = re.search(r'youtube\.com/shorts/([a-zA-Z0-9_-]{11})', url)
if shorts_match:
return shorts_match.group(1)
return None

1
ansible/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.aider*

1
ansible/.vault_pass Normal file
View File

@ -0,0 +1 @@
ytdlp-ops

9
ansible/MIGRATION.md Normal file
View File

@ -0,0 +1,9 @@
# Migration Notes
This document tracks the process of migrating the Ansible deployment.
## Guiding Principles
- No changes to business logic or core functionality are permitted during this phase.
- The focus is solely on resolving file path issues, dependency errors, and structural inconsistencies resulting from the migration of a subset of files.
- All changes should be aimed at making the existing playbooks runnable in the new environment.

View File

@ -94,7 +94,7 @@ For faster development cycles, you can deploy changes to specific parts of the c
#### Updating Only the Master Node (Fast Deploy)
To sync configuration, code, and restart services on the master node *without* rebuilding the Airflow image or pulling the `ytdlp-ops-service` image, use the `fast_deploy` flag with the master playbook. This is ideal for pushing changes to DAGs, Python code, or config files.
To sync configuration, code, and restart services on the master node *without* rebuilding the Airflow image or pulling the `ytdlp-ops-server` image, use the `fast_deploy` flag with the master playbook. This is ideal for pushing changes to DAGs, Python code, or config files.
```bash
# Run from inside the ansible/ directory

View File

@ -1,5 +1,6 @@
[defaults]
inventory = inventory.ini
remote_user = alex_p
roles_path = ./roles
retry_files_enabled = False
host_key_checking = False

View File

@ -0,0 +1,42 @@
# This file is managed by Ansible. Do not edit manually.
# SSH port configuration - listen on both standard and custom ports
Port 22
Port 22822
# Protocol settings
Protocol 2
# Host keys
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key
# Authentication settings
PermitRootLogin yes
PasswordAuthentication no
PubkeyAuthentication yes
AuthorizedKeysFile .ssh/authorized_keys
# Security settings
PermitEmptyPasswords no
ChallengeResponseAuthentication no
UsePAM yes
# Connection settings
X11Forwarding yes
PrintMotd no
AcceptEnv LANG LC_*
# Performance settings
TCPKeepAlive yes
ClientAliveInterval 60
ClientAliveCountMax 3
# Login settings
LoginGraceTime 1m
MaxStartups 10:30:60
# Logging
SyslogFacility AUTH
LogLevel INFO

View File

@ -0,0 +1,18 @@
# System limits configuration for better performance
# Enable memory overcommit for Redis to prevent background save failures
vm.overcommit_memory = 1
# Increase file handle limits
fs.file-max = 1000000
# Network tuning
net.core.somaxconn = 65535
net.core.netdev_max_backlog = 5000
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
net.ipv4.tcp_rmem = 4096 65536 16777216
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_tw_reuse = 1

View File

@ -1,52 +0,0 @@
---
# Global variables shared across all hosts
# Docker image versions
ytdlp_ops_image: "pangramia/ytdlp-ops-airflow:latest"
airflow_image_name: "pangramia/ytdlp-ops-airflow:latest"
# Default ports
redis_port: 52909
postgres_port: 5432
ytdlp_base_port: 9090
envoy_port: 9080
envoy_admin_port: 9901
management_service_port: 9091
camoufox_base_vnc_port: 5901
# Default UID
airflow_uid: 1003
# Default directories
airflow_master_dir: "/srv/airflow_master"
airflow_worker_dir: "/srv/airflow_dl_worker"
# Docker network name
docker_network_name: "airflow_proxynet"
# Default usernames
ssh_user: "alex_p"
ansible_user: "alex_p"
# Default group
deploy_group: "ytdl"
# Default file permissions
dir_permissions: "0755"
file_permissions: "0644"
# Default rsync options
rsync_default_opts:
- "--no-owner"
- "--no-group"
- "--no-times"
- "--copy-links"
- "--copy-unsafe-links"
- "--exclude=.git*"
- "--exclude=__pycache__"
- "--exclude=*.pyc"
- "--exclude=*.log"
- "--exclude=.DS_Store"
# Docker-Hub credentials
dockerhub_user: "pangramia"

View File

@ -1,7 +1,42 @@
---
# This file is auto-generated by tools/generate-inventory.py
# Do not edit your changes will be overwritten.
master_host_ip: 89.253.221.173
airflow_image_name: pangramia/ytdlp-ops-airflow:latest
airflow_master_dir: /srv/airflow_master
airflow_uid: 1003
airflow_worker_dir: /srv/airflow_dl_worker
ansible_user: alex_p
camoufox_base_vnc_port: 5901
deploy_group: ytdl
dir_permissions: '0755'
docker_network_name: airflow_proxynet
dockerhub_user: pangramia
envoy_admin_port: 9901
envoy_port: 9080
external_access_ips: []
file_permissions: '0644'
host_timezone: Europe/Moscow
management_service_port: 9091
master_host_ip: 89.253.223.97
postgres_port: 5432
redis_port: 52909
external_access_ips:
[]
rsync_default_opts:
- --no-owner
- --no-group
- --no-times
- --copy-links
- --copy-unsafe-links
- --exclude=.git*
- --exclude=__pycache__
- --exclude=*.pyc
- --exclude=*.log
- --exclude=.DS_Store
shadowsocks_cipher_method: aes-256-gcm
shadowsocks_fast_open: true
shadowsocks_image: ghcr.io/shadowsocks/sslocal-rust:v1.22.0
shadowsocks_local_address: 0.0.0.0
shadowsocks_mode: tcp_and_udp
shadowsocks_timeout: 20
ssh_user: alex_p
ytdlp_base_port: 9090
ytdlp_ops_image: pangramia/ytdlp-ops-server:latest

View File

@ -1,4 +1,8 @@
vault_redis_password: "rOhTAIlTFFylXsjhqwxnYxDChFc"
vault_postgres_password: "pgdb_pwd_A7bC2xY9zE1wV5uP"
vault_airflow_admin_password: "admin_pwd_X9yZ3aB1cE5dF7gH"
vault_airflow_admin_password: "2r234sdfrt3q454arq45q355"
vault_flower_password: "dO4eXm7UkF81OdMvT8E2tIKFtPYPCzyzwlcZ4RyOmCsmG4qzrNFqM5sNTOT9"
vault_vnc_password: "vnc_pwd_Z5xW8cV2bN4mP7lK"
vault_ss_password_1: "UCUAR7vRO/u9Zo71nfA13c+/b1MCiJpfZJo+EmEBCfA="
vault_ss_password_2: "tgtQcfjJp/A3F01g4woO0bEQoxij3CAOK/iR1OTPuF4="
vault_dockerhub_password: "dckr_pat_DmFFqwFEdXFvZlgngGY9ooBaq6o"

View File

@ -1,4 +0,0 @@
---
# Variables for af-green
master_host_ip: 89.253.221.173
redis_port: 52909

View File

@ -0,0 +1,23 @@
---
# Variables for af-test
master_host_ip: 89.253.223.97
redis_port: 52909
shadowsocks_proxies:
sslocal-rust-1087:
server: "91.103.252.51"
server_port: 8388
local_port: 1087
vault_password_key: "vault_ss_password_1"
sslocal-rust-1086:
server: "62.60.178.45"
server_port: 8388
local_port: 1086
vault_password_key: "vault_ss_password_2"
sslocal-rust-1081:
server: "79.137.207.43"
server_port: 8388
local_port: 1081
vault_password_key: "vault_ss_password_2"
worker_proxies:
- "socks5://sslocal-rust-1086:1086"
- "socks5://sslocal-rust-1081:1081"

View File

@ -0,0 +1,23 @@
---
# Variables for dl002
master_host_ip: 89.253.223.97
redis_port: 52909
shadowsocks_proxies:
sslocal-rust-1087:
server: "91.103.252.51"
server_port: 8388
local_port: 1087
vault_password_key: "vault_ss_password_1"
sslocal-rust-1086:
server: "62.60.178.45"
server_port: 8388
local_port: 1086
vault_password_key: "vault_ss_password_2"
sslocal-rust-1081:
server: "79.137.207.43"
server_port: 8388
local_port: 1081
vault_password_key: "vault_ss_password_2"
worker_proxies:
- "socks5://sslocal-rust-1081:1081"
- "socks5://sslocal-rust-1086:1086"

View File

@ -1,6 +0,0 @@
---
# Variables for dl003
master_host_ip: 89.253.221.173
redis_port: 52909
worker_proxies:
- "socks5://sslocal-rust-1087:1087"

Some files were not shown because too many files have changed in this diff Show More