2003 lines
87 KiB
Python
2003 lines
87 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Profile Management CLI Tool (v2) for yt-ops-client.
|
|
"""
|
|
|
|
import argparse
|
|
import base64
|
|
import json
|
|
import io
|
|
import logging
|
|
import os
|
|
import random
|
|
import signal
|
|
import sys
|
|
import threading
|
|
import time
|
|
from datetime import datetime
|
|
from typing import Dict, List, Optional, Any
|
|
import collections
|
|
|
|
import redis
|
|
|
|
try:
|
|
from dotenv import load_dotenv
|
|
except ImportError:
|
|
load_dotenv = None
|
|
|
|
try:
|
|
from tabulate import tabulate
|
|
except ImportError:
|
|
print("'tabulate' library not found. Please install it with: pip install tabulate", file=sys.stderr)
|
|
tabulate = None
|
|
|
|
# Configure logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Graceful shutdown handler for live mode
|
|
shutdown_event = threading.Event()
|
|
def handle_shutdown(sig, frame):
|
|
"""Sets the shutdown_event on SIGINT or SIGTERM."""
|
|
if not shutdown_event.is_set():
|
|
# Use print to stderr to avoid messing with the live display
|
|
print("\nShutdown signal received. Stopping live view...", file=sys.stderr)
|
|
shutdown_event.set()
|
|
|
|
|
|
class ProfileManager:
|
|
"""Manages profiles in Redis with configurable prefix."""
|
|
|
|
# Profile states
|
|
STATE_ACTIVE = "ACTIVE"
|
|
STATE_PAUSED = "PAUSED"
|
|
STATE_RESTING = "RESTING"
|
|
STATE_BANNED = "BANNED"
|
|
STATE_LOCKED = "LOCKED"
|
|
STATE_COOLDOWN = "COOLDOWN"
|
|
|
|
VALID_STATES = [STATE_ACTIVE, STATE_PAUSED, STATE_RESTING, STATE_BANNED, STATE_LOCKED, STATE_COOLDOWN]
|
|
|
|
def __init__(self, redis_host='localhost', redis_port=6379,
|
|
redis_password=None, key_prefix='profile_mgmt_'):
|
|
"""Initialize Redis connection and key prefix."""
|
|
self.key_prefix = key_prefix
|
|
logger.info(f"Attempting to connect to Redis at {redis_host}:{redis_port}...")
|
|
try:
|
|
self.redis = redis.Redis(
|
|
host=redis_host,
|
|
port=redis_port,
|
|
password=redis_password,
|
|
decode_responses=True,
|
|
socket_connect_timeout=5,
|
|
socket_timeout=5
|
|
)
|
|
self.redis.ping()
|
|
logger.info(f"Successfully connected to Redis.")
|
|
logger.info(f"Using key prefix: {key_prefix}")
|
|
except redis.exceptions.ConnectionError as e:
|
|
logger.error(f"Failed to connect to Redis at {redis_host}:{redis_port}: {e}")
|
|
sys.exit(1)
|
|
|
|
def _profile_key(self, profile_name: str) -> str:
|
|
"""Get Redis key for a profile."""
|
|
return f"{self.key_prefix}profile:{profile_name}"
|
|
|
|
def _state_key(self, state: str) -> str:
|
|
"""Get Redis key for a state index."""
|
|
return f"{self.key_prefix}state:{state}"
|
|
|
|
def _activity_key(self, profile_name: str, activity_type: str) -> str:
|
|
"""Get Redis key for activity timeline."""
|
|
return f"{self.key_prefix}activity:{profile_name}:{activity_type}"
|
|
|
|
def _proxy_state_key(self, proxy_url: str) -> str:
|
|
"""Get Redis key for proxy state hash."""
|
|
encoded_proxy = base64.urlsafe_b64encode(proxy_url.encode()).decode()
|
|
return f"{self.key_prefix}proxy_state:{encoded_proxy}"
|
|
|
|
def _proxy_group_state_key(self, group_name: str) -> str:
|
|
"""Get Redis key for proxy group state hash."""
|
|
return f"{self.key_prefix}proxy_group_state:{group_name}"
|
|
|
|
def _profile_group_state_key(self, group_name: str) -> str:
|
|
"""Get Redis key for profile group state hash."""
|
|
return f"{self.key_prefix}profile_group_state:{group_name}"
|
|
|
|
def _proxy_activity_key(self, proxy_url: str, activity_type: str) -> str:
|
|
"""Get Redis key for proxy activity."""
|
|
# Use base64 to handle special chars in URL
|
|
encoded_proxy = base64.urlsafe_b64encode(proxy_url.encode()).decode()
|
|
return f"{self.key_prefix}activity:proxy:{encoded_proxy}:{activity_type}"
|
|
|
|
def _config_key(self) -> str:
|
|
"""Get Redis key for shared configuration."""
|
|
return f"{self.key_prefix}config"
|
|
|
|
def _pending_downloads_key(self, profile_name: str) -> str:
|
|
"""Get Redis key for a profile's pending downloads counter."""
|
|
return f"{self.key_prefix}downloads_pending:{profile_name}"
|
|
|
|
def increment_pending_downloads(self, profile_name: str, count: int = 1) -> Optional[int]:
|
|
"""Atomically increments the pending downloads counter for a profile."""
|
|
if count <= 0:
|
|
return None
|
|
key = self._pending_downloads_key(profile_name)
|
|
new_value = self.redis.incrby(key, count)
|
|
# Set a TTL on the key to prevent it from living forever if something goes wrong.
|
|
# 5 hours is a safe buffer for the 4-hour info.json validity.
|
|
self.redis.expire(key, 5 * 3600)
|
|
logger.info(f"Incremented pending downloads for '{profile_name}' by {count}. New count: {new_value}")
|
|
return new_value
|
|
|
|
def decrement_pending_downloads(self, profile_name: str) -> Optional[int]:
|
|
"""Atomically decrements the pending downloads counter for a profile."""
|
|
key = self._pending_downloads_key(profile_name)
|
|
|
|
# Only decrement if the key exists. This prevents stray calls from creating negative counters.
|
|
if not self.redis.exists(key):
|
|
logger.warning(f"Attempted to decrement pending downloads for '{profile_name}', but no counter exists. No action taken.")
|
|
return None
|
|
|
|
new_value = self.redis.decr(key)
|
|
|
|
logger.info(f"Decremented pending downloads for '{profile_name}'. New count: {new_value}")
|
|
if new_value <= 0:
|
|
# Clean up the key once it reaches zero.
|
|
self.redis.delete(key)
|
|
logger.info(f"Pending downloads for '{profile_name}' reached zero. Cleared counter key.")
|
|
|
|
return new_value
|
|
|
|
def get_pending_downloads(self, profile_name: str) -> int:
|
|
"""Retrieves the current pending downloads count for a profile."""
|
|
key = self._pending_downloads_key(profile_name)
|
|
value = self.redis.get(key)
|
|
return int(value) if value else 0
|
|
|
|
def clear_pending_downloads(self, profile_name: str) -> bool:
|
|
"""Deletes the pending downloads counter key for a profile."""
|
|
key = self._pending_downloads_key(profile_name)
|
|
deleted_count = self.redis.delete(key)
|
|
if deleted_count > 0:
|
|
logger.info(f"Cleared pending downloads counter for '{profile_name}'.")
|
|
return deleted_count > 0
|
|
|
|
def set_config(self, key: str, value: Any) -> bool:
|
|
"""Sets a configuration value in Redis."""
|
|
self.redis.hset(self._config_key(), key, str(value))
|
|
logger.info(f"Set config '{key}' to '{value}'")
|
|
return True
|
|
|
|
def get_config(self, key: str, default: Optional[Any] = None) -> Optional[Any]:
|
|
"""Gets a configuration value from Redis."""
|
|
value = self.redis.hget(self._config_key(), key)
|
|
if value is None:
|
|
return default
|
|
return value
|
|
|
|
def _locks_key(self) -> str:
|
|
"""Get Redis key for locks hash."""
|
|
return f"{self.key_prefix}locks"
|
|
|
|
def _failed_lock_attempts_key(self) -> str:
|
|
"""Get Redis key for the failed lock attempts counter."""
|
|
return f"{self.key_prefix}stats:failed_lock_attempts"
|
|
|
|
def create_profile(self, name: str, proxy: str, initial_state: str = STATE_ACTIVE) -> bool:
|
|
"""Create a new profile."""
|
|
if initial_state not in self.VALID_STATES:
|
|
logger.error(f"Invalid initial state: {initial_state}")
|
|
return False
|
|
|
|
profile_key = self._profile_key(name)
|
|
|
|
# Check if profile already exists
|
|
if self.redis.exists(profile_key):
|
|
logger.error(f"Profile '{name}' already exists")
|
|
return False
|
|
|
|
now = time.time()
|
|
profile_data = {
|
|
'name': name,
|
|
'proxy': proxy,
|
|
'state': initial_state,
|
|
'created_at': str(now),
|
|
'last_used': str(now),
|
|
'success_count': '0',
|
|
'failure_count': '0',
|
|
'tolerated_error_count': '0',
|
|
'download_count': '0',
|
|
'download_error_count': '0',
|
|
'global_success_count': '0',
|
|
'global_failure_count': '0',
|
|
'global_tolerated_error_count': '0',
|
|
'global_download_count': '0',
|
|
'global_download_error_count': '0',
|
|
'lock_timestamp': '0',
|
|
'lock_owner': '',
|
|
'rest_until': '0',
|
|
'last_rest_timestamp': '0',
|
|
'wait_started_at': '0',
|
|
'ban_reason': '',
|
|
'rest_reason': '',
|
|
'reason': '',
|
|
'notes': ''
|
|
}
|
|
|
|
# Use pipeline for atomic operations
|
|
pipe = self.redis.pipeline()
|
|
pipe.hset(profile_key, mapping=profile_data)
|
|
# Add to state index
|
|
pipe.zadd(self._state_key(initial_state), {name: now})
|
|
result = pipe.execute()
|
|
|
|
if result[0] > 0:
|
|
logger.info(f"Created profile '{name}' with proxy '{proxy}' (state: {initial_state})")
|
|
return True
|
|
else:
|
|
logger.error(f"Failed to create profile '{name}'")
|
|
return False
|
|
|
|
def get_profile(self, name: str) -> Optional[Dict[str, Any]]:
|
|
"""Get profile details."""
|
|
profile_key = self._profile_key(name)
|
|
data = self.redis.hgetall(profile_key)
|
|
|
|
if not data:
|
|
return None
|
|
|
|
# Convert numeric fields
|
|
numeric_fields = ['created_at', 'last_used', 'success_count', 'failure_count',
|
|
'tolerated_error_count', 'download_count', 'download_error_count',
|
|
'global_success_count', 'global_failure_count',
|
|
'global_tolerated_error_count', 'global_download_count',
|
|
'global_download_error_count',
|
|
'lock_timestamp', 'rest_until', 'last_rest_timestamp', 'wait_started_at']
|
|
for field in numeric_fields:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
|
|
return data
|
|
|
|
def list_profiles(self, state_filter: Optional[str] = None,
|
|
proxy_filter: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
"""List profiles with optional filtering."""
|
|
profiles = []
|
|
|
|
if state_filter:
|
|
# Get profiles from specific state index
|
|
state_key = self._state_key(state_filter)
|
|
profile_names = self.redis.zrange(state_key, 0, -1)
|
|
else:
|
|
# Get all profiles by scanning keys
|
|
pattern = self._profile_key('*')
|
|
keys = []
|
|
cursor = 0
|
|
while True:
|
|
cursor, found_keys = self.redis.scan(cursor=cursor, match=pattern, count=100)
|
|
keys.extend(found_keys)
|
|
if cursor == 0:
|
|
break
|
|
profile_names = [k.split(':')[-1] for k in keys]
|
|
|
|
if not profile_names:
|
|
return []
|
|
|
|
# --- Batch fetch profile data to avoid timeouts ---
|
|
all_profile_data = []
|
|
all_pending_downloads = []
|
|
batch_size = 500
|
|
|
|
for i in range(0, len(profile_names), batch_size):
|
|
batch_names = profile_names[i:i + batch_size]
|
|
|
|
# Fetch profile hashes
|
|
pipe = self.redis.pipeline()
|
|
for name in batch_names:
|
|
pipe.hgetall(self._profile_key(name))
|
|
all_profile_data.extend(pipe.execute())
|
|
|
|
# Fetch pending download counts
|
|
pipe = self.redis.pipeline()
|
|
for name in batch_names:
|
|
pipe.get(self._pending_downloads_key(name))
|
|
all_pending_downloads.extend(pipe.execute())
|
|
# --- End batch fetch ---
|
|
|
|
numeric_fields = ['created_at', 'last_used', 'success_count', 'failure_count',
|
|
'tolerated_error_count', 'download_count', 'download_error_count',
|
|
'global_success_count', 'global_failure_count',
|
|
'global_tolerated_error_count', 'global_download_count',
|
|
'global_download_error_count',
|
|
'lock_timestamp', 'rest_until', 'last_rest_timestamp', 'wait_started_at']
|
|
|
|
for i, data in enumerate(all_profile_data):
|
|
if not data:
|
|
continue
|
|
|
|
# Add pending downloads count to the profile data
|
|
pending_downloads = all_pending_downloads[i]
|
|
data['pending_downloads'] = int(pending_downloads) if pending_downloads else 0
|
|
|
|
# Convert numeric fields
|
|
for field in numeric_fields:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
|
|
if proxy_filter and proxy_filter not in data.get('proxy', ''):
|
|
continue
|
|
|
|
profiles.append(data)
|
|
|
|
# Sort by creation time (newest first)
|
|
profiles.sort(key=lambda x: x.get('created_at', 0), reverse=True)
|
|
return profiles
|
|
|
|
def update_profile_state(self, name: str, new_state: str,
|
|
reason: str = '') -> bool:
|
|
"""Update profile state."""
|
|
if new_state not in self.VALID_STATES:
|
|
logger.error(f"Invalid state: {new_state}")
|
|
return False
|
|
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
old_state = profile['state']
|
|
if old_state == new_state:
|
|
logger.info(f"Profile '{name}' already in state {new_state}")
|
|
return True
|
|
|
|
now = time.time()
|
|
profile_key = self._profile_key(name)
|
|
|
|
pipe = self.redis.pipeline()
|
|
|
|
# Update profile hash
|
|
updates = {'state': new_state, 'last_used': str(now)}
|
|
|
|
if new_state == self.STATE_BANNED and reason:
|
|
updates['ban_reason'] = reason
|
|
elif new_state == self.STATE_RESTING:
|
|
# Set rest_until to 1 hour from now by default
|
|
rest_until = now + 3600
|
|
updates['rest_until'] = str(rest_until)
|
|
if reason:
|
|
updates['rest_reason'] = reason
|
|
|
|
# Handle transitions into ACTIVE state
|
|
if new_state == self.STATE_ACTIVE:
|
|
# Clear any resting/banned state fields
|
|
updates['rest_until'] = '0'
|
|
updates['rest_reason'] = ''
|
|
updates['reason'] = ''
|
|
updates['ban_reason'] = '' # Clear ban reason on manual activation
|
|
if old_state in [self.STATE_RESTING, self.STATE_COOLDOWN]:
|
|
updates['last_rest_timestamp'] = str(now)
|
|
|
|
# When activating a profile, ensure its proxy is also active.
|
|
proxy_url = profile.get('proxy')
|
|
if proxy_url:
|
|
logger.info(f"Activating associated proxy '{proxy_url}' for profile '{name}'.")
|
|
pipe.hset(self._proxy_state_key(proxy_url), mapping={
|
|
'state': self.STATE_ACTIVE,
|
|
'rest_until': '0',
|
|
'work_start_timestamp': str(now)
|
|
})
|
|
|
|
# If moving to any state that is not LOCKED, ensure any stale lock data is cleared.
|
|
# This makes manual state changes (like 'activate' or 'unban') more robust.
|
|
if new_state != self.STATE_LOCKED:
|
|
updates['lock_owner'] = ''
|
|
updates['lock_timestamp'] = '0'
|
|
pipe.hdel(self._locks_key(), name)
|
|
if old_state == self.STATE_LOCKED:
|
|
logger.info(f"Profile '{name}' was in LOCKED state. Clearing global lock.")
|
|
|
|
pipe.hset(profile_key, mapping=updates)
|
|
|
|
# Remove from old state index, add to new state index
|
|
if old_state in self.VALID_STATES:
|
|
pipe.zrem(self._state_key(old_state), name)
|
|
pipe.zadd(self._state_key(new_state), {name: now})
|
|
|
|
result = pipe.execute()
|
|
|
|
logger.info(f"Updated profile '{name}' from {old_state} to {new_state}")
|
|
if reason:
|
|
logger.info(f"Reason: {reason}")
|
|
|
|
return True
|
|
|
|
def update_profile_field(self, name: str, field: str, value: str) -> bool:
|
|
"""Update a specific field in profile."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
profile_key = self._profile_key(name)
|
|
self.redis.hset(profile_key, field, value)
|
|
logger.info(f"Updated profile '{name}' field '{field}' to '{value}'")
|
|
return True
|
|
|
|
def delete_profile(self, name: str) -> bool:
|
|
"""Delete a profile and all associated data."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
state = profile['state']
|
|
|
|
pipe = self.redis.pipeline()
|
|
|
|
# Delete profile hash
|
|
profile_key = self._profile_key(name)
|
|
pipe.delete(profile_key)
|
|
|
|
# Remove from state index
|
|
if state in self.VALID_STATES:
|
|
pipe.zrem(self._state_key(state), name)
|
|
|
|
# Delete activity keys
|
|
for activity_type in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
activity_key = self._activity_key(name, activity_type)
|
|
pipe.delete(activity_key)
|
|
|
|
# Remove from locks if present
|
|
locks_key = self._locks_key()
|
|
pipe.hdel(locks_key, name)
|
|
|
|
result = pipe.execute()
|
|
|
|
logger.info(f"Deleted profile '{name}' and all associated data")
|
|
return True
|
|
|
|
def delete_all_data(self) -> int:
|
|
"""Deletes all keys associated with the current manager's key_prefix."""
|
|
logger.warning(f"Deleting all keys with prefix: {self.key_prefix}")
|
|
|
|
keys_to_delete = []
|
|
for key in self.redis.scan_iter(f"{self.key_prefix}*"):
|
|
keys_to_delete.append(key)
|
|
|
|
if not keys_to_delete:
|
|
logger.info("No keys found to delete.")
|
|
return 0
|
|
|
|
total_deleted = 0
|
|
chunk_size = 500
|
|
for i in range(0, len(keys_to_delete), chunk_size):
|
|
chunk = keys_to_delete[i:i + chunk_size]
|
|
total_deleted += self.redis.delete(*chunk)
|
|
|
|
logger.info(f"Deleted {total_deleted} key(s).")
|
|
return total_deleted
|
|
|
|
def record_activity(self, name: str, activity_type: str,
|
|
timestamp: Optional[float] = None) -> bool:
|
|
"""Record activity (success/failure) for a profile."""
|
|
if activity_type not in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
logger.error(f"Invalid activity type: {activity_type}")
|
|
return False
|
|
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
ts = timestamp or time.time()
|
|
activity_key = self._activity_key(name, activity_type)
|
|
|
|
# Add to sorted set
|
|
self.redis.zadd(activity_key, {str(ts): ts})
|
|
|
|
# Update counters in profile
|
|
profile_key = self._profile_key(name)
|
|
counter_field = f"{activity_type}_count"
|
|
self.redis.hincrby(profile_key, counter_field, 1)
|
|
global_counter_field = f"global_{activity_type}_count"
|
|
self.redis.hincrby(profile_key, global_counter_field, 1)
|
|
|
|
# Update last_used
|
|
self.redis.hset(profile_key, 'last_used', str(ts))
|
|
|
|
# Keep only last 1000 activities to prevent unbounded growth
|
|
self.redis.zremrangebyrank(activity_key, 0, -1001)
|
|
|
|
# Also record activity for the proxy
|
|
proxy_url = profile.get('proxy')
|
|
if proxy_url:
|
|
proxy_activity_key = self._proxy_activity_key(proxy_url, activity_type)
|
|
pipe = self.redis.pipeline()
|
|
pipe.zadd(proxy_activity_key, {str(ts): ts})
|
|
# Keep last 5000 activities per proxy (higher limit)
|
|
pipe.zremrangebyrank(proxy_activity_key, 0, -5001)
|
|
pipe.execute()
|
|
logger.debug(f"Recorded {activity_type} for proxy '{proxy_url}'")
|
|
|
|
logger.debug(f"Recorded {activity_type} for profile '{name}' at {ts}")
|
|
return True
|
|
|
|
def get_activity_rate(self, name: str, activity_type: str,
|
|
window_seconds: int) -> int:
|
|
"""Get activity count within time window."""
|
|
if activity_type not in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
return 0
|
|
|
|
activity_key = self._activity_key(name, activity_type)
|
|
now = time.time()
|
|
start = now - window_seconds
|
|
|
|
count = self.redis.zcount(activity_key, start, now)
|
|
return count
|
|
|
|
def get_proxy_activity_rate(self, proxy_url: str, activity_type: str,
|
|
window_seconds: int) -> int:
|
|
"""Get proxy activity count within time window."""
|
|
if activity_type not in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
return 0
|
|
|
|
activity_key = self._proxy_activity_key(proxy_url, activity_type)
|
|
now = time.time()
|
|
start = now - window_seconds
|
|
|
|
count = self.redis.zcount(activity_key, start, now)
|
|
return count
|
|
|
|
def reset_profile_counters(self, name: str) -> bool:
|
|
"""Resets the session counters for a single profile (does not affect global counters)."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
profile_key = self._profile_key(name)
|
|
counters_to_reset = {
|
|
'success_count': '0',
|
|
'failure_count': '0',
|
|
'tolerated_error_count': '0',
|
|
'download_count': '0',
|
|
'download_error_count': '0',
|
|
}
|
|
self.redis.hset(profile_key, mapping=counters_to_reset)
|
|
logger.info(f"Reset session counters for profile '{name}'.")
|
|
return True
|
|
|
|
def get_failed_lock_attempts(self) -> int:
|
|
"""Get the total count of failed lock attempts from Redis."""
|
|
count = self.redis.get(self._failed_lock_attempts_key())
|
|
return int(count) if count else 0
|
|
|
|
def get_global_stats(self) -> Dict[str, int]:
|
|
"""Get aggregated global stats across all profiles."""
|
|
profiles = self.list_profiles()
|
|
total_success = sum(int(p.get('global_success_count', 0)) for p in profiles)
|
|
total_failure = sum(int(p.get('global_failure_count', 0)) for p in profiles)
|
|
total_tolerated_error = sum(int(p.get('global_tolerated_error_count', 0)) for p in profiles)
|
|
total_downloads = sum(int(p.get('global_download_count', 0)) for p in profiles)
|
|
total_download_errors = sum(int(p.get('global_download_error_count', 0)) for p in profiles)
|
|
return {
|
|
'total_success': total_success,
|
|
'total_failure': total_failure,
|
|
'total_tolerated_error': total_tolerated_error,
|
|
'total_downloads': total_downloads,
|
|
'total_download_errors': total_download_errors,
|
|
}
|
|
|
|
def get_per_proxy_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
"""Get aggregated stats per proxy."""
|
|
profiles = self.list_profiles()
|
|
proxy_stats = collections.defaultdict(lambda: {
|
|
'success': 0, 'failure': 0, 'tolerated_error': 0, 'downloads': 0, 'download_errors': 0, 'profiles': 0
|
|
})
|
|
for p in profiles:
|
|
proxy = p.get('proxy')
|
|
if proxy:
|
|
proxy_stats[proxy]['success'] += int(p.get('global_success_count', 0))
|
|
proxy_stats[proxy]['failure'] += int(p.get('global_failure_count', 0))
|
|
proxy_stats[proxy]['tolerated_error'] += int(p.get('global_tolerated_error_count', 0))
|
|
proxy_stats[proxy]['downloads'] += int(p.get('global_download_count', 0))
|
|
proxy_stats[proxy]['download_errors'] += int(p.get('global_download_error_count', 0))
|
|
proxy_stats[proxy]['profiles'] += 1
|
|
return dict(proxy_stats)
|
|
|
|
def reset_global_counters(self) -> int:
|
|
"""Resets global, non-profile-specific counters."""
|
|
logger.info("Resetting global counters...")
|
|
keys_to_delete = [self._failed_lock_attempts_key()]
|
|
|
|
deleted_count = 0
|
|
if keys_to_delete:
|
|
deleted_count = self.redis.delete(*keys_to_delete)
|
|
|
|
logger.info(f"Deleted {deleted_count} global counter key(s).")
|
|
return deleted_count
|
|
|
|
def set_proxy_state(self, proxy_url: str, state: str, rest_duration_minutes: Optional[int] = None) -> bool:
|
|
"""Set the state of a proxy and propagates it to associated profiles."""
|
|
if state not in [self.STATE_ACTIVE, self.STATE_RESTING]:
|
|
logger.error(f"Invalid proxy state: {state}. Only ACTIVE and RESTING are supported for proxies.")
|
|
return False
|
|
|
|
proxy_key = self._proxy_state_key(proxy_url)
|
|
now = time.time()
|
|
updates = {'state': state}
|
|
|
|
rest_until = 0
|
|
if state == self.STATE_RESTING:
|
|
if not rest_duration_minutes or rest_duration_minutes <= 0:
|
|
logger.error("rest_duration_minutes is required when setting proxy state to RESTING.")
|
|
return False
|
|
rest_until = now + rest_duration_minutes * 60
|
|
updates['rest_until'] = str(rest_until)
|
|
updates['work_start_timestamp'] = '0' # Clear work start time
|
|
else: # ACTIVE
|
|
updates['rest_until'] = '0'
|
|
updates['work_start_timestamp'] = str(now)
|
|
|
|
self.redis.hset(proxy_key, mapping=updates)
|
|
logger.info(f"Set proxy '{proxy_url}' state to {state}.")
|
|
|
|
# Now, update associated profiles
|
|
profiles_on_proxy = self.list_profiles(proxy_filter=proxy_url)
|
|
if not profiles_on_proxy:
|
|
return True
|
|
|
|
if state == self.STATE_RESTING:
|
|
logger.info(f"Propagating RESTING state to profiles on proxy '{proxy_url}'.")
|
|
for profile in profiles_on_proxy:
|
|
if profile['state'] == self.STATE_ACTIVE:
|
|
self.update_profile_state(profile['name'], self.STATE_RESTING, "Proxy resting")
|
|
self.update_profile_field(profile['name'], 'rest_until', str(rest_until))
|
|
elif state == self.STATE_ACTIVE:
|
|
logger.info(f"Propagating ACTIVE state to profiles on proxy '{proxy_url}'.")
|
|
for profile in profiles_on_proxy:
|
|
if profile['state'] == self.STATE_RESTING and profile.get('rest_reason') == "Proxy resting":
|
|
self.update_profile_state(profile['name'], self.STATE_ACTIVE, "Proxy activated")
|
|
|
|
return True
|
|
|
|
def get_proxy_states(self, proxy_urls: List[str]) -> Dict[str, Dict[str, Any]]:
|
|
"""Get states for multiple proxies."""
|
|
if not proxy_urls:
|
|
return {}
|
|
|
|
states = {}
|
|
batch_size = 500
|
|
|
|
for i in range(0, len(proxy_urls), batch_size):
|
|
batch_urls = proxy_urls[i:i + batch_size]
|
|
|
|
pipe = self.redis.pipeline()
|
|
for proxy_url in batch_urls:
|
|
pipe.hgetall(self._proxy_state_key(proxy_url))
|
|
results = pipe.execute()
|
|
|
|
for j, data in enumerate(results):
|
|
proxy_url = batch_urls[j]
|
|
if data:
|
|
# Convert numeric fields
|
|
for field in ['rest_until', 'work_start_timestamp']:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
states[proxy_url] = data
|
|
else:
|
|
# Default to ACTIVE if no state is found
|
|
states[proxy_url] = {'state': self.STATE_ACTIVE, 'rest_until': 0.0, 'work_start_timestamp': 0.0}
|
|
|
|
return states
|
|
|
|
def set_proxy_group_membership(self, proxy_url: str, group_name: str, work_minutes: int) -> bool:
|
|
"""Records a proxy's membership in a rotation group by updating its state hash."""
|
|
proxy_key = self._proxy_state_key(proxy_url)
|
|
updates = {
|
|
'group_name': group_name,
|
|
'group_work_minutes': str(work_minutes)
|
|
}
|
|
self.redis.hset(proxy_key, mapping=updates)
|
|
logger.debug(f"Set proxy '{proxy_url}' group membership to '{group_name}'.")
|
|
return True
|
|
|
|
def set_proxy_group_state(self, group_name: str, active_proxy_index: int, next_rotation_timestamp: float) -> bool:
|
|
"""Set the state of a proxy group."""
|
|
group_key = self._proxy_group_state_key(group_name)
|
|
updates = {
|
|
'active_proxy_index': str(active_proxy_index),
|
|
'next_rotation_timestamp': str(next_rotation_timestamp)
|
|
}
|
|
self.redis.hset(group_key, mapping=updates)
|
|
logger.info(f"Set proxy group '{group_name}' state: active_index={active_proxy_index}, next_rotation at {format_timestamp(next_rotation_timestamp)}.")
|
|
return True
|
|
|
|
def get_proxy_group_states(self, group_names: List[str]) -> Dict[str, Dict[str, Any]]:
|
|
"""Get states for multiple proxy groups."""
|
|
if not group_names:
|
|
return {}
|
|
|
|
pipe = self.redis.pipeline()
|
|
for name in group_names:
|
|
pipe.hgetall(self._proxy_group_state_key(name))
|
|
|
|
results = pipe.execute()
|
|
|
|
states = {}
|
|
for i, data in enumerate(results):
|
|
group_name = group_names[i]
|
|
if data:
|
|
# Convert numeric fields
|
|
for field in ['active_proxy_index', 'next_rotation_timestamp']:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
if 'active_proxy_index' in data:
|
|
data['active_proxy_index'] = int(data['active_proxy_index'])
|
|
states[group_name] = data
|
|
else:
|
|
# Default to empty dict if no state is found
|
|
states[group_name] = {}
|
|
|
|
return states
|
|
|
|
def set_profile_group_state(self, group_name: str, state_data: Dict[str, Any]) -> bool:
|
|
"""Set or update the state of a profile group."""
|
|
group_key = self._profile_group_state_key(group_name)
|
|
# Ensure all values are strings for redis hset, and filter out None values
|
|
updates = {k: str(v) for k, v in state_data.items() if v is not None}
|
|
if not updates:
|
|
return True # Nothing to do
|
|
self.redis.hset(group_key, mapping=updates)
|
|
logger.debug(f"Set profile group '{group_name}' state: {updates}.")
|
|
return True
|
|
|
|
def get_profile_group_states(self, group_names: List[str]) -> Dict[str, Dict[str, Any]]:
|
|
"""Get states for multiple profile groups."""
|
|
if not group_names:
|
|
return {}
|
|
|
|
pipe = self.redis.pipeline()
|
|
for name in group_names:
|
|
pipe.hgetall(self._profile_group_state_key(name))
|
|
|
|
results = pipe.execute()
|
|
|
|
states = {}
|
|
for i, data in enumerate(results):
|
|
group_name = group_names[i]
|
|
if data:
|
|
numeric_fields = {
|
|
'active_profile_index': int,
|
|
'rotate_after_requests': int,
|
|
'max_active_profiles': int,
|
|
}
|
|
for field, type_converter in numeric_fields.items():
|
|
if field in data:
|
|
try:
|
|
data[field] = type_converter(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0
|
|
states[group_name] = data
|
|
else:
|
|
states[group_name] = {}
|
|
|
|
return states
|
|
|
|
def lock_profile(self, owner: str, profile_prefix: Optional[str] = None, specific_profile_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Find and lock an available ACTIVE profile.
|
|
If `specific_profile_name` is provided, it will attempt to lock only that profile.
|
|
Otherwise, it scans for available profiles, optionally filtered by `profile_prefix`.
|
|
"""
|
|
profiles_to_check = []
|
|
if specific_profile_name:
|
|
# If a specific profile is requested, we only check that one.
|
|
profiles_to_check = [specific_profile_name]
|
|
else:
|
|
# Original logic: find all active profiles, optionally filtered by prefix.
|
|
active_profiles = self.redis.zrange(self._state_key(self.STATE_ACTIVE), 0, -1)
|
|
if not active_profiles:
|
|
logger.warning("No active profiles available to lock.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
if profile_prefix:
|
|
profiles_to_check = [p for p in active_profiles if p.startswith(profile_prefix)]
|
|
if not profiles_to_check:
|
|
logger.warning(f"No active profiles with prefix '{profile_prefix}' available to lock.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
else:
|
|
profiles_to_check = active_profiles
|
|
|
|
# --- Filter by active proxy and prepare for locking ---
|
|
full_profiles = [self.get_profile(p) for p in profiles_to_check]
|
|
# Filter out any None profiles from a race condition with deletion, and ensure state is ACTIVE.
|
|
# This is especially important when locking a specific profile.
|
|
full_profiles = [p for p in full_profiles if p and p.get('proxy') and p.get('state') == self.STATE_ACTIVE]
|
|
|
|
if not full_profiles:
|
|
if specific_profile_name:
|
|
logger.warning(f"Profile '{specific_profile_name}' is not eligible for locking (e.g., not ACTIVE or missing).")
|
|
else:
|
|
logger.warning("No active profiles available to lock after filtering.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
unique_proxies = sorted(list(set(p['proxy'] for p in full_profiles)))
|
|
proxy_states = self.get_proxy_states(unique_proxies)
|
|
|
|
eligible_profiles = [
|
|
p['name'] for p in full_profiles
|
|
if proxy_states.get(p['proxy'], {}).get('state', self.STATE_ACTIVE) == self.STATE_ACTIVE
|
|
]
|
|
|
|
if not eligible_profiles:
|
|
logger.warning("No active profiles with an active proxy available to lock.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
# Make selection deterministic (use Redis's sorted set order) instead of random
|
|
# random.shuffle(active_profiles)
|
|
|
|
locks_key = self._locks_key()
|
|
|
|
for name in eligible_profiles:
|
|
# Try to acquire lock atomically
|
|
if self.redis.hsetnx(locks_key, name, owner):
|
|
# Lock acquired. Now, re-check state to avoid race condition with enforcer.
|
|
profile_key = self._profile_key(name)
|
|
current_state = self.redis.hget(profile_key, 'state')
|
|
|
|
if current_state != self.STATE_ACTIVE:
|
|
# Another process (enforcer) changed the state. Release lock and try next.
|
|
self.redis.hdel(locks_key, name)
|
|
logger.warning(f"Aborted lock for '{name}'; state changed from ACTIVE to '{current_state}' during lock acquisition.")
|
|
continue
|
|
|
|
# State is still ACTIVE, proceed with locking.
|
|
now = time.time()
|
|
|
|
pipe = self.redis.pipeline()
|
|
# Update profile state and lock info
|
|
pipe.hset(profile_key, mapping={
|
|
'state': self.STATE_LOCKED,
|
|
'lock_owner': owner,
|
|
'lock_timestamp': str(now),
|
|
'last_used': str(now)
|
|
})
|
|
# Move from ACTIVE to LOCKED state index
|
|
pipe.zrem(self._state_key(self.STATE_ACTIVE), name)
|
|
pipe.zadd(self._state_key(self.STATE_LOCKED), {name: now})
|
|
pipe.execute()
|
|
|
|
logger.info(f"Locked profile '{name}' for owner '{owner}'")
|
|
return self.get_profile(name)
|
|
|
|
logger.warning("Could not lock any active profile (all may have been locked by other workers).")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
def unlock_profile(self, name: str, owner: Optional[str] = None, rest_for_seconds: Optional[int] = None) -> bool:
|
|
"""Unlock a profile. If owner provided, it must match. Can optionally put profile into COOLDOWN state."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found.")
|
|
return False
|
|
|
|
if profile['state'] != self.STATE_LOCKED:
|
|
logger.warning(f"Profile '{name}' is not in LOCKED state (current: {profile['state']}).")
|
|
# Forcibly remove from locks hash if it's inconsistent
|
|
self.redis.hdel(self._locks_key(), name)
|
|
return False
|
|
|
|
if owner and profile['lock_owner'] != owner:
|
|
logger.error(f"Owner mismatch: cannot unlock profile '{name}'. Locked by '{profile['lock_owner']}', attempted by '{owner}'.")
|
|
return False
|
|
|
|
now = time.time()
|
|
profile_key = self._profile_key(name)
|
|
|
|
pipe = self.redis.pipeline()
|
|
|
|
updates = {
|
|
'lock_owner': '',
|
|
'lock_timestamp': '0',
|
|
'last_used': str(now)
|
|
}
|
|
|
|
if rest_for_seconds and rest_for_seconds > 0:
|
|
new_state = self.STATE_COOLDOWN
|
|
rest_until = now + rest_for_seconds
|
|
updates['rest_until'] = str(rest_until)
|
|
updates['rest_reason'] = 'Post-task cooldown'
|
|
logger_msg = f"Unlocked profile '{name}' into COOLDOWN for {rest_for_seconds}s."
|
|
else:
|
|
new_state = self.STATE_ACTIVE
|
|
# Clear any rest-related fields when moving to ACTIVE
|
|
updates['rest_until'] = '0'
|
|
updates['rest_reason'] = ''
|
|
updates['reason'] = ''
|
|
logger_msg = f"Unlocked profile '{name}'"
|
|
|
|
updates['state'] = new_state
|
|
pipe.hset(profile_key, mapping=updates)
|
|
|
|
# Move from LOCKED to the new state index
|
|
pipe.zrem(self._state_key(self.STATE_LOCKED), name)
|
|
pipe.zadd(self._state_key(new_state), {name: now})
|
|
|
|
# Remove from global locks hash
|
|
pipe.hdel(self._locks_key(), name)
|
|
|
|
pipe.execute()
|
|
|
|
logger.info(logger_msg)
|
|
return True
|
|
|
|
def cleanup_stale_locks(self, max_lock_time_seconds: int) -> int:
|
|
"""Find and unlock profiles with stale locks."""
|
|
locks_key = self._locks_key()
|
|
all_locks = self.redis.hgetall(locks_key)
|
|
if not all_locks:
|
|
logger.debug("No active locks found to clean up.")
|
|
return 0
|
|
|
|
now = time.time()
|
|
cleaned_count = 0
|
|
|
|
for name, owner in all_locks.items():
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
# Lock exists but profile doesn't. Clean up the lock.
|
|
self.redis.hdel(locks_key, name)
|
|
logger.warning(f"Removed stale lock for non-existent profile '{name}'")
|
|
cleaned_count += 1
|
|
continue
|
|
|
|
lock_timestamp = profile.get('lock_timestamp', 0)
|
|
if lock_timestamp > 0 and (now - lock_timestamp) > max_lock_time_seconds:
|
|
logger.warning(f"Found stale lock for profile '{name}' (locked by '{owner}' for {now - lock_timestamp:.0f}s). Unlocking...")
|
|
if self.unlock_profile(name):
|
|
cleaned_count += 1
|
|
|
|
if cleaned_count > 0:
|
|
logger.info(f"Cleaned up {cleaned_count} stale lock(s).")
|
|
else:
|
|
logger.debug("No stale locks found to clean up.")
|
|
return cleaned_count
|
|
|
|
def format_timestamp(ts: float) -> str:
|
|
"""Format timestamp for display."""
|
|
if not ts or ts == 0:
|
|
return "Never"
|
|
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
|
|
|
|
def format_duration(seconds: float) -> str:
|
|
"""Format duration for display."""
|
|
if seconds < 60:
|
|
return f"{seconds:.0f}s"
|
|
elif seconds < 3600:
|
|
return f"{seconds/60:.1f}m"
|
|
elif seconds < 86400:
|
|
return f"{seconds/3600:.1f}h"
|
|
else:
|
|
return f"{seconds/86400:.1f}d"
|
|
|
|
|
|
def add_profile_manager_parser(subparsers):
|
|
"""Adds the parser for the 'profile' command."""
|
|
parser = subparsers.add_parser(
|
|
'profile',
|
|
description='Manage profiles (v2).',
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
help='Manage profiles (v2).'
|
|
)
|
|
|
|
# Common arguments for all profile manager subcommands
|
|
common_parser = argparse.ArgumentParser(add_help=False)
|
|
common_parser.add_argument('--env-file', help='Path to a .env file to load environment variables from.')
|
|
common_parser.add_argument('--redis-host', default=None, help='Redis host. Defaults to REDIS_HOST or MASTER_HOST_IP env var, or localhost.')
|
|
common_parser.add_argument('--redis-port', type=int, default=None, help='Redis port. Defaults to REDIS_PORT env var, or 6379.')
|
|
common_parser.add_argument('--redis-password', default=None, help='Redis password. Defaults to REDIS_PASSWORD env var.')
|
|
common_parser.add_argument('--env', default='dev', help="Environment name for Redis key prefix (e.g., 'stg', 'prod'). Used by all non-list commands, and by 'list' for single-view mode. Defaults to 'dev'.")
|
|
common_parser.add_argument('--legacy', action='store_true', help="Use legacy key prefix ('profile_mgmt_') without environment.")
|
|
common_parser.add_argument('--key-prefix', default=None, help='Explicit key prefix for Redis. Overrides --env, --legacy and any defaults.')
|
|
common_parser.add_argument('--verbose', action='store_true', help='Enable verbose logging')
|
|
|
|
subparsers = parser.add_subparsers(dest='profile_command', help='Command to execute', required=True)
|
|
|
|
# Create command
|
|
create_parser = subparsers.add_parser('create', help='Create a new profile', parents=[common_parser])
|
|
create_parser.add_argument('name', help='Profile name')
|
|
create_parser.add_argument('proxy', help='Proxy URL (e.g., sslocal-rust-1090:1090)')
|
|
create_parser.add_argument('--state', default='ACTIVE',
|
|
choices=['ACTIVE', 'PAUSED', 'RESTING', 'BANNED', 'COOLDOWN'],
|
|
help='Initial state (default: ACTIVE)')
|
|
|
|
# List command
|
|
list_parser = subparsers.add_parser('list', help='List profiles', parents=[common_parser])
|
|
list_parser.add_argument('--auth-env', help='Environment name for the Auth simulation monitor. Use with --download-env for a merged view.')
|
|
list_parser.add_argument('--download-env', help='Environment name for the Download simulation monitor. Use with --auth-env for a merged view.')
|
|
list_parser.add_argument('--separate-views', action='store_true', help='In dual-monitor mode, show two separate reports instead of a single merged view.')
|
|
list_parser.add_argument('--rest-after-requests', type=int, help='(For display) Show countdown to rest based on this request limit.')
|
|
list_parser.add_argument('--state', help='Filter by state')
|
|
list_parser.add_argument('--proxy', help='Filter by proxy (substring match)')
|
|
list_parser.add_argument('--show-proxy-activity', action='store_true', help='Show a detailed activity summary table for proxies. If --proxy is specified, shows details for that proxy only. Otherwise, shows a summary for all proxies.')
|
|
list_parser.add_argument('--format', choices=['table', 'json', 'csv'], default='table',
|
|
help='Output format (default: table)')
|
|
list_parser.add_argument('--live', action='store_true', help='Run continuously with a non-blinking live-updating display.')
|
|
list_parser.add_argument('--no-blink', action='store_true', help='Use ANSI escape codes for smoother screen updates in --live mode (experimental).')
|
|
list_parser.add_argument('--interval-seconds', type=int, default=5, help='When in --live mode, how often to refresh in seconds. Default: 5.')
|
|
list_parser.add_argument('--hide-active-state', action='store_true', help="Display 'ACTIVE' state as blank for cleaner UI.")
|
|
|
|
# Get command
|
|
get_parser = subparsers.add_parser('get', help='Get profile details', parents=[common_parser])
|
|
get_parser.add_argument('name', help='Profile name')
|
|
|
|
# Set proxy state command
|
|
set_proxy_state_parser = subparsers.add_parser('set-proxy-state', help='Set the state of a proxy and propagate to its profiles.', parents=[common_parser])
|
|
set_proxy_state_parser.add_argument('proxy_url', help='Proxy URL')
|
|
set_proxy_state_parser.add_argument('state', choices=['ACTIVE', 'RESTING'], help='New state for the proxy')
|
|
set_proxy_state_parser.add_argument('--duration-minutes', type=int, help='Duration for the RESTING state')
|
|
|
|
# Update state command
|
|
update_state_parser = subparsers.add_parser('update-state', help='Update profile state', parents=[common_parser])
|
|
update_state_parser.add_argument('name', help='Profile name')
|
|
update_state_parser.add_argument('state', choices=['ACTIVE', 'PAUSED', 'RESTING', 'BANNED', 'LOCKED', 'COOLDOWN'],
|
|
help='New state')
|
|
update_state_parser.add_argument('--reason', help='Reason for state change (especially for BAN)')
|
|
|
|
# Update field command
|
|
update_field_parser = subparsers.add_parser('update-field', help='Update a profile field', parents=[common_parser])
|
|
update_field_parser.add_argument('name', help='Profile name')
|
|
update_field_parser.add_argument('field', help='Field name to update')
|
|
update_field_parser.add_argument('value', help='New value')
|
|
|
|
# Pause command (convenience)
|
|
pause_parser = subparsers.add_parser('pause', help='Pause a profile (sets state to PAUSED).', parents=[common_parser])
|
|
pause_parser.add_argument('name', help='Profile name')
|
|
|
|
# Activate command (convenience)
|
|
activate_parser = subparsers.add_parser('activate', help='Activate a profile (sets state to ACTIVE). Useful for resuming a PAUSED profile or fixing a stale LOCKED one.', parents=[common_parser])
|
|
activate_parser.add_argument('name', help='Profile name')
|
|
|
|
# Ban command (convenience)
|
|
ban_parser = subparsers.add_parser('ban', help='Ban a profile (sets state to BANNED).', parents=[common_parser])
|
|
ban_parser.add_argument('name', help='Profile name')
|
|
ban_parser.add_argument('--reason', required=True, help='Reason for ban')
|
|
|
|
# Unban command (convenience)
|
|
unban_parser = subparsers.add_parser('unban', help='Unban a profile (sets state to ACTIVE and resets session counters).', parents=[common_parser])
|
|
unban_parser.add_argument('name', help='Profile name')
|
|
|
|
# Delete command
|
|
delete_parser = subparsers.add_parser('delete', help='Delete a profile', parents=[common_parser])
|
|
delete_parser.add_argument('name', help='Profile name')
|
|
delete_parser.add_argument('--confirm', action='store_true',
|
|
help='Confirm deletion (required)')
|
|
|
|
# Delete all command
|
|
delete_all_parser = subparsers.add_parser('delete-all', help='(Destructive) Delete all profiles and data under the current key prefix.', parents=[common_parser])
|
|
delete_all_parser.add_argument('--confirm', action='store_true', help='Confirm this highly destructive action (required)')
|
|
|
|
# Reset global counters command
|
|
reset_global_parser = subparsers.add_parser('reset-global-counters', help='Reset global counters (e.g., failed_lock_attempts).', parents=[common_parser])
|
|
|
|
# Reset counters command
|
|
reset_counters_parser = subparsers.add_parser(
|
|
'reset-counters',
|
|
help='Reset session counters for profiles or proxies.',
|
|
description="Resets session-specific counters (success, failure, etc.) for one or more profiles.\n\nWARNING: This only resets Redis counters. It does not affect any data stored on disk\n(e.g., downloaded files, logs) associated with the profile or proxy.",
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
parents=[common_parser]
|
|
)
|
|
reset_group = reset_counters_parser.add_mutually_exclusive_group(required=True)
|
|
reset_group.add_argument('--profile-name', help='The name of the single profile to reset.')
|
|
reset_group.add_argument('--proxy-url', help='Reset all profiles associated with this proxy.')
|
|
reset_group.add_argument('--all-profiles', action='store_true', help='Reset all profiles in the environment.')
|
|
|
|
# Record activity command (for testing)
|
|
record_parser = subparsers.add_parser('record-activity', help='(Testing) Record a synthetic activity event for a profile.', parents=[common_parser])
|
|
record_parser.add_argument('name', help='Profile name')
|
|
record_parser.add_argument('type', choices=['success', 'failure', 'tolerated_error', 'download', 'download_error'], help='Activity type')
|
|
record_parser.add_argument('--timestamp', type=float, help='Timestamp (default: now)')
|
|
|
|
# Get rate command
|
|
rate_parser = subparsers.add_parser('get-rate', help='Get activity rate for a profile', parents=[common_parser])
|
|
rate_parser.add_argument('name', help='Profile name')
|
|
rate_parser.add_argument('type', choices=['success', 'failure', 'tolerated_error', 'download', 'download_error'], help='Activity type')
|
|
rate_parser.add_argument('--window', type=int, default=3600,
|
|
help='Time window in seconds (default: 3600)')
|
|
return parser
|
|
|
|
def _build_profile_groups_config(manager, profiles):
|
|
"""Builds a configuration structure for profile groups by reading state from Redis."""
|
|
group_state_keys = [k for k in manager.redis.scan_iter(f"{manager.key_prefix}profile_group_state:*")]
|
|
if not group_state_keys:
|
|
return []
|
|
|
|
group_names = [k.split(':')[-1] for k in group_state_keys]
|
|
group_states = manager.get_profile_group_states(group_names)
|
|
|
|
config = []
|
|
for name, state in group_states.items():
|
|
profiles_in_group = []
|
|
prefix = state.get('prefix')
|
|
if prefix:
|
|
profiles_in_group = [p['name'] for p in profiles if p['name'].startswith(prefix)]
|
|
|
|
config.append({
|
|
'name': name,
|
|
'profiles_in_group': profiles_in_group,
|
|
**state
|
|
})
|
|
return config
|
|
|
|
|
|
def _render_all_proxies_activity_summary(manager, simulation_type, file=sys.stdout):
|
|
"""Renders a summary of activity rates for all proxies."""
|
|
if not manager:
|
|
return
|
|
|
|
print(f"\n--- All Proxies Activity Summary ({simulation_type}) ---", file=file)
|
|
|
|
all_profiles = manager.list_profiles()
|
|
if not all_profiles:
|
|
print("No profiles found to determine proxy list.", file=file)
|
|
return
|
|
|
|
unique_proxies = sorted(list(set(p['proxy'] for p in all_profiles if p.get('proxy'))))
|
|
if not unique_proxies:
|
|
print("No proxies are currently associated with any profiles.", file=file)
|
|
return
|
|
|
|
proxy_states = manager.get_proxy_states(unique_proxies)
|
|
|
|
is_auth_sim = 'Auth' in simulation_type
|
|
# Sum up all relevant activity types for the rate columns
|
|
activity_types_to_sum = ['success', 'failure', 'tolerated_error'] if is_auth_sim else ['download', 'download_error', 'tolerated_error']
|
|
|
|
proxy_work_minutes_str = manager.get_config('proxy_work_minutes')
|
|
proxy_work_minutes = 0
|
|
if proxy_work_minutes_str and proxy_work_minutes_str.isdigit():
|
|
proxy_work_minutes = int(proxy_work_minutes_str)
|
|
|
|
proxy_rest_minutes_str = manager.get_config('proxy_rest_duration_minutes')
|
|
proxy_rest_minutes = 0
|
|
if proxy_rest_minutes_str and proxy_rest_minutes_str.isdigit():
|
|
proxy_rest_minutes = int(proxy_rest_minutes_str)
|
|
|
|
table_data = []
|
|
headers = ['Proxy URL', 'State', 'Policy', 'State Ends In', 'Reqs (1m)', 'Reqs (5m)', 'Reqs (1h)']
|
|
|
|
for proxy_url in unique_proxies:
|
|
state_data = proxy_states.get(proxy_url, {})
|
|
state = state_data.get('state', 'N/A')
|
|
rest_until = state_data.get('rest_until', 0)
|
|
work_start = state_data.get('work_start_timestamp', 0)
|
|
|
|
state_str = state
|
|
countdown_str = "N/A"
|
|
now = time.time()
|
|
|
|
policy_str = "N/A"
|
|
group_name = state_data.get('group_name')
|
|
work_minutes_for_countdown = 0
|
|
|
|
if group_name:
|
|
group_work_minutes = state_data.get('group_work_minutes', 0)
|
|
try:
|
|
group_work_minutes = int(group_work_minutes)
|
|
work_minutes_for_countdown = group_work_minutes
|
|
except (ValueError, TypeError):
|
|
group_work_minutes = 0
|
|
policy_str = f"Group: {group_name}\n({group_work_minutes}m/proxy)"
|
|
elif proxy_work_minutes > 0:
|
|
policy_str = f"Work: {proxy_work_minutes}m\nRest: {proxy_rest_minutes}m"
|
|
work_minutes_for_countdown = proxy_work_minutes
|
|
|
|
if state == 'RESTING' and rest_until > now:
|
|
countdown_str = format_duration(rest_until - now)
|
|
elif state == 'ACTIVE' and work_start > 0 and work_minutes_for_countdown > 0:
|
|
work_end_time = work_start + (work_minutes_for_countdown * 60)
|
|
if work_end_time > now:
|
|
countdown_str = format_duration(work_end_time - now)
|
|
else:
|
|
countdown_str = "Now"
|
|
|
|
rate_1m = sum(manager.get_proxy_activity_rate(proxy_url, act_type, 60) for act_type in activity_types_to_sum)
|
|
rate_5m = sum(manager.get_proxy_activity_rate(proxy_url, act_type, 300) for act_type in activity_types_to_sum)
|
|
rate_1h = sum(manager.get_proxy_activity_rate(proxy_url, act_type, 3600) for act_type in activity_types_to_sum)
|
|
|
|
row = [
|
|
proxy_url,
|
|
state_str,
|
|
policy_str,
|
|
countdown_str,
|
|
rate_1m,
|
|
rate_5m,
|
|
rate_1h,
|
|
]
|
|
table_data.append(row)
|
|
|
|
if table_data:
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_proxy_activity_summary(manager, proxy_url, simulation_type, file=sys.stdout):
|
|
"""Renders a detailed activity summary for a single proxy."""
|
|
if not manager or not proxy_url:
|
|
return
|
|
|
|
print(f"\n--- Activity Summary for Proxy: {proxy_url} ({simulation_type}) ---", file=file)
|
|
|
|
proxy_work_minutes_str = manager.get_config('proxy_work_minutes')
|
|
proxy_work_minutes = 0
|
|
if proxy_work_minutes_str and proxy_work_minutes_str.isdigit():
|
|
proxy_work_minutes = int(proxy_work_minutes_str)
|
|
|
|
proxy_rest_minutes_str = manager.get_config('proxy_rest_duration_minutes')
|
|
proxy_rest_minutes = 0
|
|
if proxy_rest_minutes_str and proxy_rest_minutes_str.isdigit():
|
|
proxy_rest_minutes = int(proxy_rest_minutes_str)
|
|
|
|
proxy_state_data = manager.get_proxy_states([proxy_url]).get(proxy_url, {})
|
|
state = proxy_state_data.get('state', 'N/A')
|
|
rest_until = proxy_state_data.get('rest_until', 0)
|
|
work_start = proxy_state_data.get('work_start_timestamp', 0)
|
|
|
|
policy_str = "N/A"
|
|
group_name = proxy_state_data.get('group_name')
|
|
work_minutes_for_countdown = 0
|
|
|
|
if group_name:
|
|
group_work_minutes = proxy_state_data.get('group_work_minutes', 0)
|
|
try:
|
|
group_work_minutes = int(group_work_minutes)
|
|
work_minutes_for_countdown = group_work_minutes
|
|
except (ValueError, TypeError):
|
|
group_work_minutes = 0
|
|
policy_str = f"Group: {group_name} ({group_work_minutes}m/proxy)"
|
|
elif proxy_work_minutes > 0:
|
|
policy_str = f"Work: {proxy_work_minutes}m, Rest: {proxy_rest_minutes}m"
|
|
work_minutes_for_countdown = proxy_work_minutes
|
|
|
|
state_str = state
|
|
now = time.time()
|
|
if state == 'RESTING' and rest_until > now:
|
|
state_str += f" (ends in {format_duration(rest_until - now)})"
|
|
|
|
active_duration_str = "N/A"
|
|
time_until_rest_str = "N/A"
|
|
if state == 'ACTIVE' and work_start > 0:
|
|
active_duration_str = format_duration(now - work_start)
|
|
if work_minutes_for_countdown > 0:
|
|
work_end_time = work_start + (work_minutes_for_countdown * 60)
|
|
if work_end_time > now:
|
|
time_until_rest_str = format_duration(work_end_time - now)
|
|
else:
|
|
time_until_rest_str = "Now"
|
|
|
|
summary_data = [
|
|
("State", state_str),
|
|
("Policy", policy_str),
|
|
("Active Since", format_timestamp(work_start)),
|
|
("Active Duration", active_duration_str),
|
|
("Time Until Rest", time_until_rest_str),
|
|
]
|
|
print(tabulate(summary_data, tablefmt='grid'), file=file)
|
|
|
|
windows = {
|
|
"Last 1 Min": 60,
|
|
"Last 5 Min": 300,
|
|
"Last 1 Hour": 3600,
|
|
"Last 24 Hours": 86400,
|
|
}
|
|
|
|
is_auth_sim = 'Auth' in simulation_type
|
|
activity_types = ['success', 'failure', 'tolerated_error'] if is_auth_sim else ['download', 'download_error', 'tolerated_error']
|
|
|
|
table_data = []
|
|
headers = ['Window'] + [act_type.replace('_', ' ').title() for act_type in activity_types]
|
|
|
|
for name, seconds in windows.items():
|
|
row = [name]
|
|
for act_type in activity_types:
|
|
count = manager.get_proxy_activity_rate(proxy_url, act_type, seconds)
|
|
row.append(count)
|
|
table_data.append(row)
|
|
|
|
if table_data:
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_profile_group_summary_table(manager, all_profiles, profile_groups_config, file=sys.stdout):
|
|
"""Renders a summary table for profile groups."""
|
|
if not profile_groups_config:
|
|
return
|
|
|
|
print("\nProfile Group Status:", file=file)
|
|
table_data = []
|
|
all_profiles_map = {p['name']: p for p in all_profiles}
|
|
|
|
for group in profile_groups_config:
|
|
group_name = group.get('name', 'N/A')
|
|
profiles_in_group = group.get('profiles_in_group', [])
|
|
|
|
active_profiles = [
|
|
p_name for p_name in profiles_in_group
|
|
if all_profiles_map.get(p_name, {}).get('state') in [manager.STATE_ACTIVE, manager.STATE_LOCKED]
|
|
]
|
|
|
|
active_profiles_str = ', '.join(active_profiles) or "None"
|
|
|
|
max_active = group.get('max_active_profiles', 1)
|
|
policy_str = f"{len(active_profiles)}/{max_active} Active"
|
|
|
|
rotate_after = group.get('rotate_after_requests')
|
|
rotation_rule_str = f"After {rotate_after} reqs" if rotate_after else "N/A"
|
|
|
|
reqs_left_str = "N/A"
|
|
if rotate_after and rotate_after > 0 and active_profiles:
|
|
# Show countdown for the first active profile
|
|
active_profile_name = active_profiles[0]
|
|
p = all_profiles_map.get(active_profile_name)
|
|
if p:
|
|
total_reqs = (
|
|
p.get('success_count', 0) + p.get('failure_count', 0) +
|
|
p.get('tolerated_error_count', 0) +
|
|
p.get('download_count', 0) + p.get('download_error_count', 0)
|
|
)
|
|
remaining_reqs = rotate_after - total_reqs
|
|
reqs_left_str = str(max(0, int(remaining_reqs)))
|
|
|
|
table_data.append([
|
|
group_name,
|
|
active_profiles_str,
|
|
policy_str,
|
|
rotation_rule_str,
|
|
reqs_left_str
|
|
])
|
|
|
|
headers = ['Group Name', 'Active Profile(s)', 'Policy', 'Rotation Rule', 'Requests Left ↓']
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_profile_details_table(manager, args, simulation_type, profile_groups_config, file=sys.stdout):
|
|
"""Renders the detailed profile list table for a given manager."""
|
|
if not manager:
|
|
print("Manager not configured.", file=file)
|
|
return
|
|
|
|
profiles = manager.list_profiles(args.state, args.proxy)
|
|
if not profiles:
|
|
print("No profiles found matching the criteria.", file=file)
|
|
return
|
|
|
|
table_data = []
|
|
is_auth_sim = 'Auth' in simulation_type
|
|
|
|
for p in profiles:
|
|
rest_until_str = 'N/A'
|
|
last_rest_ts = p.get('last_rest_timestamp', 0)
|
|
last_rest_str = format_timestamp(last_rest_ts)
|
|
|
|
state_str = p.get('state', 'UNKNOWN')
|
|
|
|
if state_str in ['RESTING', 'COOLDOWN']:
|
|
rest_until = p.get('rest_until', 0)
|
|
if rest_until > 0:
|
|
remaining = rest_until - time.time()
|
|
if remaining > 0:
|
|
rest_until_str = f"in {format_duration(remaining)}"
|
|
else:
|
|
rest_until_str = "Ending now"
|
|
|
|
if last_rest_ts == 0:
|
|
last_rest_str = "NOW"
|
|
|
|
pending_dl_count = p.get('pending_downloads', 0)
|
|
if p.get('rest_reason') == 'waiting_downloads' and pending_dl_count > 0:
|
|
state_str += f"\n({pending_dl_count} DLs)"
|
|
|
|
countdown_str = 'N/A'
|
|
# Find the group this profile belongs to and get its rotation policy
|
|
profile_group = next((g for g in profile_groups_config if p['name'] in g.get('profiles_in_group', [])), None)
|
|
|
|
rotate_after = 0
|
|
if profile_group:
|
|
rotate_after = profile_group.get('rotate_after_requests')
|
|
elif args.rest_after_requests and args.rest_after_requests > 0:
|
|
rotate_after = args.rest_after_requests
|
|
|
|
if rotate_after > 0 and state_str != manager.STATE_COOLDOWN:
|
|
total_reqs = (
|
|
p.get('success_count', 0) + p.get('failure_count', 0) +
|
|
p.get('tolerated_error_count', 0) +
|
|
p.get('download_count', 0) + p.get('download_error_count', 0)
|
|
)
|
|
remaining_reqs = rotate_after - total_reqs
|
|
countdown_str = str(max(0, int(remaining_reqs)))
|
|
|
|
if args.hide_active_state and state_str == 'ACTIVE':
|
|
state_str = ''
|
|
|
|
row = [
|
|
p.get('name', 'MISSING_NAME'),
|
|
p.get('proxy', 'MISSING_PROXY'),
|
|
state_str,
|
|
format_timestamp(p.get('last_used', 0)),
|
|
]
|
|
|
|
if is_auth_sim:
|
|
row.extend([
|
|
p.get('success_count', 0),
|
|
p.get('failure_count', 0),
|
|
p.get('tolerated_error_count', 0),
|
|
p.get('global_success_count', 0),
|
|
p.get('global_failure_count', 0),
|
|
])
|
|
else: # is_download_sim or unknown
|
|
row.extend([
|
|
p.get('download_count', 0),
|
|
p.get('download_error_count', 0),
|
|
p.get('tolerated_error_count', 0),
|
|
p.get('global_download_count', 0),
|
|
p.get('global_download_error_count', 0),
|
|
])
|
|
|
|
# Display generic 'reason' field as a fallback for 'rest_reason'
|
|
reason_str = p.get('rest_reason') or p.get('reason') or ''
|
|
row.extend([
|
|
countdown_str,
|
|
rest_until_str,
|
|
reason_str,
|
|
p.get('ban_reason') or ''
|
|
])
|
|
table_data.append(row)
|
|
|
|
headers = ['Name', 'Proxy', 'State', 'Last Used']
|
|
|
|
if is_auth_sim:
|
|
headers.extend(['AuthOK', 'AuthFail', 'Skip.Err', 'Tot.AuthOK', 'Tot.AuthFail'])
|
|
else: # is_download_sim or unknown
|
|
headers.extend(['DataOK', 'DownFail', 'Skip.Err', 'Tot.DataOK', 'Tot.DownFail'])
|
|
|
|
headers.extend(['ReqCD ↓', 'RestCD ↓', 'R.Reason', 'B.Reason'])
|
|
|
|
# Using `maxcolwidths` to control column width for backward compatibility
|
|
# with older versions of the `tabulate` library. This prevents content
|
|
# from making columns excessively wide, but does not guarantee a fixed width.
|
|
maxwidths = None
|
|
if table_data or headers: # Check headers in case table_data is empty
|
|
# Transpose table to get columns, including headers.
|
|
# This handles empty table_data correctly.
|
|
columns = list(zip(*([headers] + table_data)))
|
|
# Calculate max width for each column based on its content.
|
|
maxwidths = [max(len(str(x)) for x in col) if col else 0 for col in columns]
|
|
|
|
# Enforce a minimum width for the reason columns to keep table width stable.
|
|
DEFAULT_REASON_WIDTH = 25
|
|
try:
|
|
r_reason_idx = headers.index('R.Reason')
|
|
b_reason_idx = headers.index('B.Reason')
|
|
maxwidths[r_reason_idx] = max(DEFAULT_REASON_WIDTH, maxwidths[r_reason_idx])
|
|
maxwidths[b_reason_idx] = max(DEFAULT_REASON_WIDTH, maxwidths[b_reason_idx])
|
|
except (ValueError, IndexError):
|
|
# This should not happen if headers are constructed as expected.
|
|
pass
|
|
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid', maxcolwidths=maxwidths), file=file)
|
|
|
|
|
|
def _render_simulation_view(title, manager, args, file=sys.stdout):
|
|
"""Helper function to render the list of profiles for a single simulation environment."""
|
|
if not manager:
|
|
print(f"\n--- {title} (Environment Not Configured) ---", file=file)
|
|
return 0
|
|
|
|
if not tabulate:
|
|
print("'tabulate' library is required for table format. Please install it.", file=sys.stderr)
|
|
return 1
|
|
|
|
print(f"\n--- {title} ---", file=file)
|
|
profiles = manager.list_profiles(args.state, args.proxy)
|
|
|
|
if args.format == 'json':
|
|
print(json.dumps(profiles, indent=2, default=str), file=file)
|
|
return 0
|
|
elif args.format == 'csv':
|
|
if profiles:
|
|
headers = profiles[0].keys()
|
|
print(','.join(headers), file=file)
|
|
for p in profiles:
|
|
print(','.join(str(p.get(h, '')) for h in headers), file=file)
|
|
return 0
|
|
|
|
# --- Table Format with Summaries ---
|
|
|
|
if args.show_proxy_activity:
|
|
if args.proxy:
|
|
_render_proxy_activity_summary(manager, args.proxy, title, file=file)
|
|
else:
|
|
_render_all_proxies_activity_summary(manager, title, file=file)
|
|
|
|
profile_groups_config = _build_profile_groups_config(manager, profiles)
|
|
_render_profile_group_summary_table(manager, profiles, profile_groups_config, file=file)
|
|
|
|
failed_lock_attempts = manager.get_failed_lock_attempts()
|
|
global_stats = manager.get_global_stats()
|
|
per_proxy_stats = manager.get_per_proxy_stats()
|
|
|
|
unique_proxies = sorted(per_proxy_stats.keys())
|
|
proxy_states = manager.get_proxy_states(unique_proxies)
|
|
|
|
# Build global summary
|
|
total_reqs = global_stats['total_success'] + global_stats['total_failure']
|
|
success_rate = (global_stats['total_success'] / total_reqs * 100) if total_reqs > 0 else 100
|
|
global_summary_str = (
|
|
f"Total Requests: {total_reqs} | "
|
|
f"Success: {global_stats['total_success']} | "
|
|
f"Failure: {global_stats['total_failure']} | "
|
|
f"Tolerated Error: {global_stats['total_tolerated_error']} | "
|
|
f"Downloads: {global_stats['total_downloads']} | "
|
|
f"Download Errors: {global_stats.get('total_download_errors', 0)} | "
|
|
f"Success Rate: {success_rate:.2f}% | "
|
|
f"Failed Lock Attempts: {failed_lock_attempts}"
|
|
)
|
|
print("Global Stats:", global_summary_str, file=file)
|
|
|
|
# Build per-proxy summary
|
|
if per_proxy_stats:
|
|
print("\nPer-Proxy Stats:", file=file)
|
|
proxy_table_data = []
|
|
for proxy_url in unique_proxies:
|
|
stats = per_proxy_stats[proxy_url]
|
|
state_info = proxy_states.get(proxy_url, {})
|
|
state = state_info.get('state', 'ACTIVE')
|
|
|
|
cooldown_str = 'N/A'
|
|
if state == 'RESTING':
|
|
rest_until = state_info.get('rest_until', 0)
|
|
if rest_until > time.time():
|
|
cooldown_str = f"in {format_duration(rest_until - time.time())}"
|
|
else:
|
|
cooldown_str = "Ending now"
|
|
|
|
proxy_total_auth = stats['success'] + stats['failure']
|
|
proxy_total_downloads = stats['downloads'] + stats['download_errors']
|
|
proxy_total_reqs = proxy_total_auth + proxy_total_downloads
|
|
proxy_success_rate = (stats['success'] / proxy_total_auth * 100) if proxy_total_auth > 0 else 100
|
|
|
|
proxy_table_data.append([
|
|
proxy_url,
|
|
state,
|
|
cooldown_str,
|
|
stats['profiles'],
|
|
proxy_total_reqs,
|
|
stats['success'],
|
|
stats['failure'],
|
|
stats['tolerated_error'],
|
|
stats['downloads'],
|
|
stats['download_errors'],
|
|
f"{proxy_success_rate:.1f}%"
|
|
])
|
|
proxy_headers = ['Proxy', 'State', 'Cooldown', 'Profiles', 'Total Reqs', 'AuthOK', 'AuthFail', 'Skip.Err', 'DataOK', 'DownFail', 'OK %']
|
|
print(tabulate(proxy_table_data, headers=proxy_headers, tablefmt='grid'), file=file)
|
|
|
|
print("\nProfile Details:", file=file)
|
|
_render_profile_details_table(manager, args, title, profile_groups_config, file=file)
|
|
return 0
|
|
|
|
|
|
def _render_merged_view(auth_manager, download_manager, args, file=sys.stdout):
|
|
"""Renders a merged, unified view for both auth and download simulations."""
|
|
# --- 1. Fetch ALL data first to prevent delays during rendering ---
|
|
auth_stats = auth_manager.get_global_stats()
|
|
auth_failed_locks = auth_manager.get_failed_lock_attempts()
|
|
dl_stats = download_manager.get_global_stats()
|
|
dl_failed_locks = download_manager.get_failed_lock_attempts()
|
|
|
|
auth_proxy_stats = auth_manager.get_per_proxy_stats()
|
|
dl_proxy_stats = download_manager.get_per_proxy_stats()
|
|
all_proxies = sorted(list(set(auth_proxy_stats.keys()) | set(dl_proxy_stats.keys())))
|
|
|
|
auth_proxy_states, dl_proxy_states = {}, {}
|
|
if all_proxies:
|
|
auth_proxy_states = auth_manager.get_proxy_states(all_proxies)
|
|
dl_proxy_states = download_manager.get_proxy_states(all_proxies)
|
|
|
|
auth_profiles = auth_manager.list_profiles(args.state, args.proxy)
|
|
auth_groups_config = _build_profile_groups_config(auth_manager, auth_profiles)
|
|
|
|
dl_profiles = download_manager.list_profiles(args.state, args.proxy)
|
|
dl_groups_config = _build_profile_groups_config(download_manager, dl_profiles)
|
|
|
|
# --- 2. Prepare all display data using fetched information ---
|
|
total_reqs = auth_stats['total_success'] + auth_stats['total_failure']
|
|
success_rate = (auth_stats['total_success'] / total_reqs * 100) if total_reqs > 0 else 100
|
|
|
|
total_dls = dl_stats['total_downloads'] + dl_stats['total_download_errors']
|
|
dl_success_rate = (dl_stats['total_downloads'] / total_dls * 100) if total_dls > 0 else 100
|
|
|
|
global_summary_str = (
|
|
f"Auth: {total_reqs} reqs ({auth_stats['total_success']} OK, {auth_stats['total_failure']} Fail, {auth_stats['total_tolerated_error']} Tol.Err) | "
|
|
f"OK Rate: {success_rate:.2f}% | "
|
|
f"Failed Locks: {auth_failed_locks} || "
|
|
f"Download: {total_dls} attempts ({dl_stats['total_downloads']} OK, {dl_stats['total_download_errors']} Fail) | "
|
|
f"OK Rate: {dl_success_rate:.2f}% | "
|
|
f"Failed Locks: {dl_failed_locks}"
|
|
)
|
|
|
|
proxy_table_data = []
|
|
if all_proxies:
|
|
for proxy in all_proxies:
|
|
astats = auth_proxy_stats.get(proxy, {})
|
|
dstats = dl_proxy_stats.get(proxy, {})
|
|
astate = auth_proxy_states.get(proxy, {})
|
|
dstate = dl_proxy_states.get(proxy, {})
|
|
|
|
state_str = f"{astate.get('state', 'N/A')} / {dstate.get('state', 'N/A')}"
|
|
|
|
proxy_table_data.append([
|
|
proxy,
|
|
state_str,
|
|
astats.get('profiles', 0),
|
|
dstats.get('profiles', 0),
|
|
astats.get('success', 0),
|
|
astats.get('failure', 0),
|
|
astats.get('tolerated_error', 0),
|
|
dstats.get('downloads', 0),
|
|
dstats.get('download_errors', 0),
|
|
dstats.get('tolerated_error', 0)
|
|
])
|
|
|
|
# --- 3. Render everything to the buffer at once ---
|
|
print("--- Global Simulation Stats ---", file=file)
|
|
print(global_summary_str, file=file)
|
|
|
|
if args.show_proxy_activity:
|
|
if args.proxy:
|
|
_render_proxy_activity_summary(auth_manager, args.proxy, "Auth", file=file)
|
|
_render_proxy_activity_summary(download_manager, args.proxy, "Download", file=file)
|
|
else:
|
|
# In merged view, it makes sense to show both summaries if requested.
|
|
_render_all_proxies_activity_summary(auth_manager, "Auth", file=file)
|
|
_render_all_proxies_activity_summary(download_manager, "Download", file=file)
|
|
|
|
if all_proxies:
|
|
print("\n--- Per-Proxy Stats (Merged) ---", file=file)
|
|
proxy_headers = ['Proxy', 'State (A/D)', 'Profiles (A)', 'Profiles (D)', 'AuthOK', 'AuthFail', 'Skip.Err(A)', 'DataOK', 'DownFail', 'Skip.Err(D)']
|
|
print(tabulate(proxy_table_data, headers=proxy_headers, tablefmt='grid'), file=file)
|
|
|
|
print(f"\n--- Auth Simulation Profile Details ({args.auth_env}) ---", file=file)
|
|
_render_profile_group_summary_table(auth_manager, auth_profiles, auth_groups_config, file=file)
|
|
_render_profile_details_table(auth_manager, args, "Auth", auth_groups_config, file=file)
|
|
|
|
print(f"\n--- Download Simulation Profile Details ({args.download_env}) ---", file=file)
|
|
_render_profile_group_summary_table(download_manager, dl_profiles, dl_groups_config, file=file)
|
|
_render_profile_details_table(download_manager, args, "Download", dl_groups_config, file=file)
|
|
|
|
return 0
|
|
|
|
|
|
def _print_profile_list(manager, args, title="Profile Status"):
|
|
"""Helper function to print the list of profiles in the desired format."""
|
|
return _render_simulation_view(title, manager, args, file=sys.stdout)
|
|
|
|
|
|
def main_profile_manager(args):
|
|
"""Main dispatcher for 'profile' command."""
|
|
if load_dotenv:
|
|
env_file = args.env_file
|
|
if not env_file and args.env and '.env' in args.env and os.path.exists(args.env):
|
|
print(f"WARNING: --env should be an environment name (e.g., 'dev'), not a file path. Treating '{args.env}' as --env-file. The environment name will default to 'dev'.", file=sys.stderr)
|
|
env_file = args.env
|
|
args.env = 'dev'
|
|
|
|
was_loaded = load_dotenv(env_file)
|
|
if was_loaded:
|
|
print(f"Loaded environment variables from {env_file or '.env file'}", file=sys.stderr)
|
|
elif args.env_file:
|
|
print(f"ERROR: The specified --env-file was not found: {args.env_file}", file=sys.stderr)
|
|
return 1
|
|
|
|
if args.redis_host is None:
|
|
args.redis_host = os.getenv('REDIS_HOST', os.getenv('MASTER_HOST_IP', 'localhost'))
|
|
if args.redis_port is None:
|
|
args.redis_port = int(os.getenv('REDIS_PORT', 6379))
|
|
if args.redis_password is None:
|
|
args.redis_password = os.getenv('REDIS_PASSWORD')
|
|
|
|
if args.verbose:
|
|
logging.getLogger().setLevel(logging.DEBUG)
|
|
|
|
if args.key_prefix:
|
|
key_prefix = args.key_prefix
|
|
elif args.legacy:
|
|
key_prefix = 'profile_mgmt_'
|
|
else:
|
|
key_prefix = f"{args.env}_profile_mgmt_"
|
|
|
|
manager = ProfileManager(
|
|
redis_host=args.redis_host,
|
|
redis_port=args.redis_port,
|
|
redis_password=args.redis_password,
|
|
key_prefix=key_prefix
|
|
)
|
|
|
|
if args.profile_command == 'create':
|
|
success = manager.create_profile(args.name, args.proxy, args.state)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'list':
|
|
is_dual_mode = args.auth_env and args.download_env
|
|
|
|
def _create_manager(env_name, is_for_dual_mode):
|
|
if not env_name:
|
|
return None
|
|
|
|
# For dual mode, we ignore --legacy and --key-prefix from CLI, and derive from env name.
|
|
# This is opinionated but makes dual-mode behavior predictable.
|
|
if is_for_dual_mode:
|
|
key_prefix = f"{env_name}_profile_mgmt_"
|
|
else:
|
|
# Single mode respects all CLI flags
|
|
if args.key_prefix:
|
|
key_prefix = args.key_prefix
|
|
elif args.legacy:
|
|
key_prefix = 'profile_mgmt_'
|
|
else:
|
|
key_prefix = f"{env_name}_profile_mgmt_"
|
|
|
|
return ProfileManager(
|
|
redis_host=args.redis_host, redis_port=args.redis_port,
|
|
redis_password=args.redis_password, key_prefix=key_prefix
|
|
)
|
|
|
|
if not args.live:
|
|
if is_dual_mode and not args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
return _render_merged_view(auth_manager, download_manager, args)
|
|
elif is_dual_mode and args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
_render_simulation_view(f"Auth Simulation ({args.auth_env})", auth_manager, args)
|
|
_render_simulation_view(f"Download Simulation ({args.download_env})", download_manager, args)
|
|
return 0
|
|
else:
|
|
# Single view mode
|
|
single_env = args.auth_env or args.download_env or args.env
|
|
manager = _create_manager(single_env, is_for_dual_mode=False)
|
|
# Determine the title for correct table headers
|
|
title = f"Profile Status ({single_env})"
|
|
if args.auth_env:
|
|
title = f"Auth Simulation ({args.auth_env})"
|
|
elif args.download_env:
|
|
title = f"Download Simulation ({args.download_env})"
|
|
return _print_profile_list(manager, args, title=title)
|
|
|
|
# --- Live Mode ---
|
|
pm_logger = logging.getLogger(__name__)
|
|
original_log_level = pm_logger.level
|
|
try:
|
|
if args.no_blink:
|
|
sys.stdout.write('\033[?25l') # Hide cursor
|
|
sys.stdout.flush()
|
|
|
|
# Register signal handlers for graceful shutdown in live mode
|
|
signal.signal(signal.SIGINT, handle_shutdown)
|
|
signal.signal(signal.SIGTERM, handle_shutdown)
|
|
|
|
while not shutdown_event.is_set():
|
|
pm_logger.setLevel(logging.WARNING) # Suppress connection logs for cleaner UI
|
|
start_time = time.time()
|
|
|
|
output_buffer = io.StringIO()
|
|
print(f"--- Profile Status (auto-refreshing every {args.interval_seconds}s, Ctrl+C to exit) | Last updated: {datetime.now().strftime('%H:%M:%S')} ---", file=output_buffer)
|
|
|
|
if is_dual_mode and not args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
_render_merged_view(auth_manager, download_manager, args, file=output_buffer)
|
|
elif is_dual_mode and args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
_render_simulation_view(f"Auth Simulation ({args.auth_env})", auth_manager, args, file=output_buffer)
|
|
_render_simulation_view(f"Download Simulation ({args.download_env})", download_manager, args, file=output_buffer)
|
|
else:
|
|
# Single view mode
|
|
single_env = args.auth_env or args.download_env or args.env
|
|
manager = _create_manager(single_env, is_for_dual_mode=False)
|
|
# Determine the title for correct table headers
|
|
title = f"Profile Status ({single_env})"
|
|
if args.auth_env:
|
|
title = f"Auth Simulation ({args.auth_env})"
|
|
elif args.download_env:
|
|
title = f"Download Simulation ({args.download_env})"
|
|
_render_simulation_view(title, manager, args, file=output_buffer)
|
|
|
|
pm_logger.setLevel(original_log_level) # Restore log level
|
|
fetch_and_render_duration = time.time() - start_time
|
|
|
|
if args.no_blink:
|
|
sys.stdout.write('\033[2J\033[H') # Clear screen, move to top
|
|
else:
|
|
os.system('cls' if os.name == 'nt' else 'clear')
|
|
|
|
sys.stdout.write(output_buffer.getvalue())
|
|
sys.stdout.flush()
|
|
|
|
# --- Adaptive Countdown ---
|
|
remaining_sleep = args.interval_seconds - fetch_and_render_duration
|
|
|
|
if remaining_sleep > 0:
|
|
end_time = time.time() + remaining_sleep
|
|
while time.time() < end_time and not shutdown_event.is_set():
|
|
time_left = end_time - time.time()
|
|
sys.stdout.write(f"\rRefreshing in {int(time_left)}s... (fetch took {fetch_and_render_duration:.2f}s) ")
|
|
sys.stdout.flush()
|
|
time.sleep(min(1, time_left if time_left > 0 else 1))
|
|
elif not shutdown_event.is_set():
|
|
sys.stdout.write(f"\rRefreshing now... (fetch took {fetch_and_render_duration:.2f}s, behind by {-remaining_sleep:.2f}s) ")
|
|
sys.stdout.flush()
|
|
time.sleep(0.5) # Brief pause to make message readable
|
|
|
|
sys.stdout.write("\r" + " " * 80 + "\r") # Clear line
|
|
sys.stdout.flush()
|
|
|
|
except KeyboardInterrupt:
|
|
# This can be triggered by Ctrl+C during a time.sleep().
|
|
# The signal handler will have already set the shutdown_event and printed a message.
|
|
# This block is a fallback.
|
|
if not shutdown_event.is_set():
|
|
print("\nKeyboardInterrupt received. Stopping live view...", file=sys.stderr)
|
|
shutdown_event.set() # Ensure event is set if handler didn't run
|
|
return 0
|
|
finally:
|
|
pm_logger.setLevel(original_log_level)
|
|
if args.live and args.no_blink:
|
|
sys.stdout.write('\033[?25h') # Restore cursor
|
|
sys.stdout.flush()
|
|
|
|
elif args.profile_command == 'set-proxy-state':
|
|
success = manager.set_proxy_state(args.proxy_url, args.state, args.duration_minutes)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'get':
|
|
profile = manager.get_profile(args.name)
|
|
if not profile:
|
|
print(f"Profile '{args.name}' not found")
|
|
return 1
|
|
|
|
print(f"Profile: {profile['name']}")
|
|
print(f"Proxy: {profile['proxy']}")
|
|
print(f"State: {profile['state']}")
|
|
print(f"Created: {format_timestamp(profile['created_at'])}")
|
|
print(f"Last Used: {format_timestamp(profile['last_used'])}")
|
|
print(f"Success Count: {profile['success_count']}")
|
|
print(f"Failure Count: {profile['failure_count']}")
|
|
|
|
if profile.get('rest_until', 0) > 0:
|
|
remaining = profile['rest_until'] - time.time()
|
|
if remaining > 0:
|
|
print(f"Resting for: {format_duration(remaining)} more")
|
|
else:
|
|
print(f"Rest period ended: {format_timestamp(profile['rest_until'])}")
|
|
|
|
if profile.get('ban_reason'):
|
|
print(f"Ban Reason: {profile['ban_reason']}")
|
|
|
|
if profile.get('lock_timestamp', 0) > 0:
|
|
print(f"Locked since: {format_timestamp(profile['lock_timestamp'])}")
|
|
print(f"Lock Owner: {profile['lock_owner']}")
|
|
|
|
if profile.get('notes'):
|
|
print(f"Notes: {profile['notes']}")
|
|
return 0
|
|
|
|
elif args.profile_command == 'update-state':
|
|
success = manager.update_profile_state(args.name, args.state, args.reason or '')
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'update-field':
|
|
success = manager.update_profile_field(args.name, args.field, args.value)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'pause':
|
|
success = manager.update_profile_state(args.name, manager.STATE_PAUSED, 'Manual pause')
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'activate':
|
|
success = manager.update_profile_state(args.name, manager.STATE_ACTIVE, 'Manual activation')
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'ban':
|
|
success = manager.update_profile_state(args.name, manager.STATE_BANNED, args.reason)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'unban':
|
|
# First activate, then reset session counters. The ban reason is cleared by update_profile_state.
|
|
success = manager.update_profile_state(args.name, manager.STATE_ACTIVE, 'Manual unban')
|
|
if success:
|
|
manager.reset_profile_counters(args.name)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'delete':
|
|
if not args.confirm:
|
|
print("Error: --confirm flag is required for deletion", file=sys.stderr)
|
|
return 1
|
|
success = manager.delete_profile(args.name)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'delete-all':
|
|
if not args.confirm:
|
|
print("Error: --confirm flag is required for this destructive action.", file=sys.stderr)
|
|
return 1
|
|
deleted_count = manager.delete_all_data()
|
|
print(f"Deleted {deleted_count} key(s) with prefix '{manager.key_prefix}'.")
|
|
return 0
|
|
|
|
elif args.profile_command == 'reset-global-counters':
|
|
manager.reset_global_counters()
|
|
return 0
|
|
|
|
elif args.profile_command == 'reset-counters':
|
|
profiles_to_reset = []
|
|
if args.profile_name:
|
|
profile = manager.get_profile(args.profile_name)
|
|
if profile:
|
|
profiles_to_reset.append(profile)
|
|
elif args.proxy_url:
|
|
profiles_to_reset = manager.list_profiles(proxy_filter=args.proxy_url)
|
|
elif args.all_profiles:
|
|
profiles_to_reset = manager.list_profiles()
|
|
|
|
if not profiles_to_reset:
|
|
print("No profiles found to reset.", file=sys.stderr)
|
|
return 1
|
|
|
|
print(f"Found {len(profiles_to_reset)} profile(s) to reset. This action is not reversible.")
|
|
confirm = input("Continue? (y/N): ")
|
|
if confirm.lower() != 'y':
|
|
print("Aborted.")
|
|
return 1
|
|
|
|
success_count = 0
|
|
for profile in profiles_to_reset:
|
|
if manager.reset_profile_counters(profile['name']):
|
|
success_count += 1
|
|
|
|
print(f"Successfully reset session counters for {success_count} profile(s).")
|
|
return 0
|
|
|
|
elif args.profile_command == 'record-activity':
|
|
success = manager.record_activity(args.name, args.type, args.timestamp)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'get-rate':
|
|
rate = manager.get_activity_rate(args.name, args.type, args.window)
|
|
print(f"{args.type.capitalize()} rate for '{args.name}' over {args.window}s: {rate}")
|
|
return 0
|
|
|
|
return 1 # Should not be reached
|