2245 lines
100 KiB
Python
2245 lines
100 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Profile Management CLI Tool (v2) for yt-ops-client.
|
|
"""
|
|
|
|
import argparse
|
|
import base64
|
|
import json
|
|
import io
|
|
import logging
|
|
import os
|
|
import random
|
|
import re
|
|
import signal
|
|
import sys
|
|
import threading
|
|
import time
|
|
from datetime import datetime
|
|
from typing import Dict, List, Optional, Any
|
|
import collections
|
|
|
|
import redis
|
|
|
|
from .profile_statemachine import ProfileState, ProfileStateMachine
|
|
|
|
try:
|
|
from dotenv import load_dotenv
|
|
except ImportError:
|
|
load_dotenv = None
|
|
|
|
try:
|
|
from tabulate import tabulate
|
|
except ImportError:
|
|
print("'tabulate' library not found. Please install it with: pip install tabulate", file=sys.stderr)
|
|
tabulate = None
|
|
|
|
# Configure logging
|
|
logging.basicConfig(
|
|
level=logging.INFO,
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Graceful shutdown handler for live mode
|
|
shutdown_event = threading.Event()
|
|
def handle_shutdown(sig, frame):
|
|
"""Sets the shutdown_event on SIGINT or SIGTERM."""
|
|
if not shutdown_event.is_set():
|
|
# Use print to stderr to avoid messing with the live display
|
|
print("\nShutdown signal received. Stopping live view...", file=sys.stderr)
|
|
shutdown_event.set()
|
|
|
|
|
|
def natural_sort_key(s: str) -> List[Any]:
|
|
"""Key for natural sorting (e.g., 'user10' comes after 'user2')."""
|
|
return [int(text) if text.isdigit() else text.lower() for text in re.split('([0-9]+)', s)]
|
|
|
|
|
|
def format_timestamp(ts: float) -> str:
|
|
"""Format timestamp for display."""
|
|
if not ts or ts == 0:
|
|
return "Never"
|
|
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
|
|
|
|
|
|
def format_duration(seconds: float) -> str:
|
|
"""Format duration for display."""
|
|
if seconds < 60:
|
|
return f"{seconds:.0f}s"
|
|
elif seconds < 3600:
|
|
return f"{seconds/60:.1f}m"
|
|
elif seconds < 86400:
|
|
return f"{seconds/3600:.1f}h"
|
|
else:
|
|
return f"{seconds/86400:.1f}d"
|
|
|
|
|
|
class ProfileManager:
|
|
"""Manages profiles in Redis with configurable prefix."""
|
|
|
|
# Profile states are defined in the ProfileState enum.
|
|
VALID_STATES = ProfileState.values()
|
|
|
|
def __init__(self, redis_host='localhost', redis_port=6379,
|
|
redis_password=None, key_prefix='profile_mgmt_', redis_db=0):
|
|
"""Initialize Redis connection and key prefix."""
|
|
self.key_prefix = key_prefix
|
|
logger.info(f"Attempting to connect to Redis at {redis_host}:{redis_port} (DB: {redis_db})...")
|
|
try:
|
|
self.redis = redis.Redis(
|
|
host=redis_host,
|
|
port=redis_port,
|
|
password=redis_password,
|
|
db=redis_db,
|
|
decode_responses=True,
|
|
socket_connect_timeout=10,
|
|
socket_timeout=30,
|
|
socket_keepalive=True,
|
|
retry_on_timeout=True,
|
|
max_connections=10
|
|
)
|
|
self.redis.ping()
|
|
logger.info(f"Successfully connected to Redis.")
|
|
logger.info(f"Using key prefix: {key_prefix}")
|
|
except redis.exceptions.ConnectionError as e:
|
|
logger.error(f"Failed to connect to Redis at {redis_host}:{redis_port}: {e}")
|
|
sys.exit(1)
|
|
|
|
def _profile_key(self, profile_name: str) -> str:
|
|
"""Get Redis key for a profile."""
|
|
return f"{self.key_prefix}profile:{profile_name}"
|
|
|
|
def _state_key(self, state: str) -> str:
|
|
"""Get Redis key for a state index."""
|
|
return f"{self.key_prefix}state:{state}"
|
|
|
|
def _activity_key(self, profile_name: str, activity_type: str) -> str:
|
|
"""Get Redis key for activity timeline."""
|
|
return f"{self.key_prefix}activity:{profile_name}:{activity_type}"
|
|
|
|
def _proxy_state_key(self, proxy_url: str) -> str:
|
|
"""Get Redis key for proxy state hash."""
|
|
encoded_proxy = base64.urlsafe_b64encode(proxy_url.encode()).decode()
|
|
return f"{self.key_prefix}proxy_state:{encoded_proxy}"
|
|
|
|
def _proxy_group_state_key(self, group_name: str) -> str:
|
|
"""Get Redis key for proxy group state hash."""
|
|
return f"{self.key_prefix}proxy_group_state:{group_name}"
|
|
|
|
def _profile_group_state_key(self, group_name: str) -> str:
|
|
"""Get Redis key for profile group state hash."""
|
|
return f"{self.key_prefix}profile_group_state:{group_name}"
|
|
|
|
def _proxy_activity_key(self, proxy_url: str, activity_type: str) -> str:
|
|
"""Get Redis key for proxy activity."""
|
|
# Use base64 to handle special chars in URL
|
|
encoded_proxy = base64.urlsafe_b64encode(proxy_url.encode()).decode()
|
|
return f"{self.key_prefix}activity:proxy:{encoded_proxy}:{activity_type}"
|
|
|
|
def _config_key(self) -> str:
|
|
"""Get Redis key for shared configuration."""
|
|
return f"{self.key_prefix}config"
|
|
|
|
def _pending_downloads_key(self, profile_name: str) -> str:
|
|
"""Get Redis key for a profile's pending downloads counter."""
|
|
return f"{self.key_prefix}downloads_pending:{profile_name}"
|
|
|
|
def _activation_log_key(self) -> str:
|
|
"""Get Redis key for the activation event log."""
|
|
return f"{self.key_prefix}log:activations"
|
|
|
|
def log_activation_event(self, event_data: Dict[str, Any]):
|
|
"""Logs a profile activation event to a capped list in Redis."""
|
|
key = self._activation_log_key()
|
|
try:
|
|
# Serialize the event data to a JSON string
|
|
event_json = json.dumps(event_data, default=str)
|
|
|
|
pipe = self.redis.pipeline()
|
|
# Prepend the new event to the list
|
|
pipe.lpush(key, event_json)
|
|
# Trim the list to keep only the most recent 20 entries
|
|
pipe.ltrim(key, 0, 19)
|
|
pipe.execute()
|
|
logger.debug(f"Logged activation event: {event_json}")
|
|
except (TypeError, redis.RedisError) as e:
|
|
logger.error(f"Failed to log activation event: {e}")
|
|
|
|
def get_activation_events(self, count: int = 10) -> List[Dict[str, Any]]:
|
|
"""Retrieves the most recent activation events from Redis."""
|
|
key = self._activation_log_key()
|
|
try:
|
|
events_json = self.redis.lrange(key, 0, count - 1)
|
|
events = [json.loads(e) for e in events_json]
|
|
return events
|
|
except (TypeError, json.JSONDecodeError, redis.RedisError) as e:
|
|
logger.error(f"Failed to retrieve or parse activation events: {e}")
|
|
return []
|
|
|
|
def increment_pending_downloads(self, profile_name: str, count: int = 1) -> Optional[int]:
|
|
"""Atomically increments (or decrements if count is negative) the pending downloads counter for a profile."""
|
|
if count == 0:
|
|
return None
|
|
|
|
key = self._pending_downloads_key(profile_name)
|
|
|
|
# When decrementing, ensure the counter exists to avoid creating negative counters from stray calls.
|
|
if count < 0 and not self.redis.exists(key):
|
|
logger.warning(f"Attempted to decrement pending downloads for '{profile_name}' by {abs(count)}, but no counter exists. No action taken.")
|
|
return None
|
|
|
|
new_value = self.redis.incrby(key, count)
|
|
|
|
if count > 0:
|
|
# Set/refresh TTL on positive increments to prevent it from living forever.
|
|
# 5 hours is a safe buffer for the 4-hour info.json validity.
|
|
self.redis.expire(key, 5 * 3600)
|
|
logger.info(f"Incremented pending downloads for '{profile_name}' by {count}. New count: {new_value}")
|
|
elif count < 0:
|
|
logger.info(f"Decremented pending downloads for '{profile_name}' by {abs(count)}. New count: {new_value}")
|
|
|
|
if new_value <= 0:
|
|
# Clean up the key if it drops to or below zero.
|
|
self.redis.delete(key)
|
|
logger.info(f"Pending downloads for '{profile_name}' reached zero or less. Cleared counter key.")
|
|
|
|
return new_value
|
|
|
|
def decrement_pending_downloads(self, profile_name: str) -> Optional[int]:
|
|
"""Atomically decrements the pending downloads counter for a profile."""
|
|
key = self._pending_downloads_key(profile_name)
|
|
|
|
# Only decrement if the key exists. This prevents stray calls from creating negative counters.
|
|
if not self.redis.exists(key):
|
|
logger.warning(f"Attempted to decrement pending downloads for '{profile_name}', but no counter exists. No action taken.")
|
|
return None
|
|
|
|
new_value = self.redis.decr(key)
|
|
|
|
logger.info(f"Decremented pending downloads for '{profile_name}'. New count: {new_value}")
|
|
if new_value <= 0:
|
|
# Clean up the key once it reaches zero.
|
|
self.redis.delete(key)
|
|
logger.info(f"Pending downloads for '{profile_name}' reached zero. Cleared counter key.")
|
|
|
|
return new_value
|
|
|
|
def get_pending_downloads(self, profile_name: str) -> int:
|
|
"""Retrieves the current pending downloads count for a profile."""
|
|
key = self._pending_downloads_key(profile_name)
|
|
value = self.redis.get(key)
|
|
return int(value) if value else 0
|
|
|
|
def clear_pending_downloads(self, profile_name: str) -> bool:
|
|
"""Deletes the pending downloads counter key for a profile."""
|
|
key = self._pending_downloads_key(profile_name)
|
|
deleted_count = self.redis.delete(key)
|
|
if deleted_count > 0:
|
|
logger.info(f"Cleared pending downloads counter for '{profile_name}'.")
|
|
return deleted_count > 0
|
|
|
|
def set_config(self, key: str, value: Any) -> bool:
|
|
"""Sets a configuration value in Redis."""
|
|
self.redis.hset(self._config_key(), key, str(value))
|
|
logger.info(f"Set config '{key}' to '{value}'")
|
|
return True
|
|
|
|
def get_config(self, key: str, default: Optional[Any] = None) -> Optional[Any]:
|
|
"""Gets a configuration value from Redis."""
|
|
value = self.redis.hget(self._config_key(), key)
|
|
if value is None:
|
|
return default
|
|
return value
|
|
|
|
def _locks_key(self) -> str:
|
|
"""Get Redis key for locks hash."""
|
|
return f"{self.key_prefix}locks"
|
|
|
|
def _failed_lock_attempts_key(self) -> str:
|
|
"""Get Redis key for the failed lock attempts counter."""
|
|
return f"{self.key_prefix}stats:failed_lock_attempts"
|
|
|
|
def create_profile(self, name: str, proxy: str, initial_state: str = ProfileState.ACTIVE.value) -> bool:
|
|
"""Create a new profile."""
|
|
# Normalize to uppercase
|
|
initial_state = initial_state.upper()
|
|
|
|
if initial_state not in self.VALID_STATES:
|
|
logger.error(f"Invalid initial state: {initial_state}")
|
|
return False
|
|
|
|
profile_key = self._profile_key(name)
|
|
|
|
# Check if profile already exists
|
|
if self.redis.exists(profile_key):
|
|
logger.error(f"Profile '{name}' already exists")
|
|
return False
|
|
|
|
now = time.time()
|
|
profile_data = {
|
|
'name': name,
|
|
'proxy': proxy,
|
|
'state': initial_state,
|
|
'created_at': str(now),
|
|
'last_used': str(now),
|
|
'success_count': '0',
|
|
'failure_count': '0',
|
|
'tolerated_error_count': '0',
|
|
'download_count': '0',
|
|
'download_error_count': '0',
|
|
'global_success_count': '0',
|
|
'global_failure_count': '0',
|
|
'global_tolerated_error_count': '0',
|
|
'global_download_count': '0',
|
|
'global_download_error_count': '0',
|
|
'lock_timestamp': '0',
|
|
'lock_owner': '',
|
|
'rest_until': '0',
|
|
'last_rest_timestamp': '0',
|
|
'wait_started_at': '0',
|
|
'ban_reason': '',
|
|
'rest_reason': '',
|
|
'reason': '',
|
|
'notes': ''
|
|
}
|
|
|
|
# Use pipeline for atomic operations
|
|
pipe = self.redis.pipeline()
|
|
pipe.hset(profile_key, mapping=profile_data)
|
|
# Add to state index
|
|
pipe.zadd(self._state_key(initial_state), {name: now})
|
|
result = pipe.execute()
|
|
|
|
if result[0] > 0:
|
|
logger.info(f"Created profile '{name}' with proxy '{proxy}' (state: {initial_state})")
|
|
return True
|
|
else:
|
|
logger.error(f"Failed to create profile '{name}'")
|
|
return False
|
|
|
|
def get_profile(self, name: str) -> Optional[Dict[str, Any]]:
|
|
"""Get profile details."""
|
|
profile_key = self._profile_key(name)
|
|
data = self.redis.hgetall(profile_key)
|
|
|
|
if not data:
|
|
return None
|
|
|
|
# Convert numeric fields
|
|
numeric_fields = ['created_at', 'last_used', 'success_count', 'failure_count',
|
|
'tolerated_error_count', 'download_count', 'download_error_count',
|
|
'global_success_count', 'global_failure_count',
|
|
'global_tolerated_error_count', 'global_download_count',
|
|
'global_download_error_count',
|
|
'lock_timestamp', 'rest_until', 'last_rest_timestamp', 'wait_started_at']
|
|
for field in numeric_fields:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
|
|
return data
|
|
|
|
def list_profiles(self, state_filter: Optional[str] = None,
|
|
proxy_filter: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
"""List profiles with optional filtering."""
|
|
profiles = []
|
|
|
|
if state_filter:
|
|
# Get profiles from specific state index
|
|
state_key = self._state_key(state_filter)
|
|
profile_names = self.redis.zrange(state_key, 0, -1)
|
|
else:
|
|
# Get all profiles by scanning keys
|
|
pattern = self._profile_key('*')
|
|
keys = []
|
|
cursor = 0
|
|
while True:
|
|
cursor, found_keys = self.redis.scan(cursor=cursor, match=pattern, count=100)
|
|
keys.extend(found_keys)
|
|
if cursor == 0:
|
|
break
|
|
profile_names = [k.split(':')[-1] for k in keys]
|
|
|
|
if not profile_names:
|
|
return []
|
|
|
|
# --- Batch fetch profile data to avoid timeouts ---
|
|
all_profile_data = []
|
|
all_pending_downloads = []
|
|
batch_size = 500
|
|
|
|
for i in range(0, len(profile_names), batch_size):
|
|
batch_names = profile_names[i:i + batch_size]
|
|
|
|
# Fetch profile hashes
|
|
pipe = self.redis.pipeline()
|
|
for name in batch_names:
|
|
pipe.hgetall(self._profile_key(name))
|
|
all_profile_data.extend(pipe.execute())
|
|
|
|
# Fetch pending download counts
|
|
pipe = self.redis.pipeline()
|
|
for name in batch_names:
|
|
pipe.get(self._pending_downloads_key(name))
|
|
all_pending_downloads.extend(pipe.execute())
|
|
# --- End batch fetch ---
|
|
|
|
numeric_fields = ['created_at', 'last_used', 'success_count', 'failure_count',
|
|
'tolerated_error_count', 'download_count', 'download_error_count',
|
|
'global_success_count', 'global_failure_count',
|
|
'global_tolerated_error_count', 'global_download_count',
|
|
'global_download_error_count',
|
|
'lock_timestamp', 'rest_until', 'last_rest_timestamp', 'wait_started_at']
|
|
|
|
for i, data in enumerate(all_profile_data):
|
|
if not data:
|
|
continue
|
|
|
|
# Add pending downloads count to the profile data
|
|
pending_downloads = all_pending_downloads[i]
|
|
data['pending_downloads'] = int(pending_downloads) if pending_downloads else 0
|
|
|
|
# Convert numeric fields
|
|
for field in numeric_fields:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
|
|
if proxy_filter and proxy_filter not in data.get('proxy', ''):
|
|
continue
|
|
|
|
profiles.append(data)
|
|
|
|
# Sort by natural name order
|
|
profiles.sort(key=lambda p: natural_sort_key(p.get('name', '')))
|
|
return profiles
|
|
|
|
def update_profile_state(self, name: str, new_state: str,
|
|
reason: str = '') -> bool:
|
|
"""Update profile state by triggering a state machine transition."""
|
|
# Normalize to uppercase
|
|
new_state = new_state.upper()
|
|
|
|
if new_state not in ProfileState.values():
|
|
logger.error(f"Invalid state: {new_state}")
|
|
return False
|
|
|
|
sm = self.get_state_machine(name)
|
|
if not sm:
|
|
return False # get_state_machine logs the error
|
|
|
|
if sm.current_state.value == new_state:
|
|
logger.info(f"Profile '{name}' already in state {new_state}. No action taken.")
|
|
return True
|
|
|
|
try:
|
|
if new_state == ProfileState.ACTIVE.value:
|
|
sm.activate()
|
|
elif new_state == ProfileState.BANNED.value:
|
|
sm.ban(reason=reason)
|
|
elif new_state == ProfileState.RESTING.value:
|
|
sm.rest(reason=reason)
|
|
elif new_state == ProfileState.PAUSED.value:
|
|
sm.pause(reason=reason)
|
|
# LOCKED and COOLDOWN are not handled here as they are special transitions
|
|
# from lock_profile and unlock_profile, and should not be set directly.
|
|
elif new_state in [ProfileState.LOCKED.value, ProfileState.COOLDOWN.value]:
|
|
logger.error(f"Manual state transition to '{new_state}' is not allowed. Use lock_profile() or unlock_profile().")
|
|
return False
|
|
else:
|
|
# This case should not be reached if ProfileState.values() is correct
|
|
logger.error(f"State transition to '{new_state}' is not implemented in update_profile_state.")
|
|
return False
|
|
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"Failed to update profile '{name}' from {sm.current_state.id} to '{new_state}': {e}", exc_info=True)
|
|
return False
|
|
|
|
def update_profile_field(self, name: str, field: str, value: str) -> bool:
|
|
"""Update a specific field in profile."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
profile_key = self._profile_key(name)
|
|
self.redis.hset(profile_key, field, value)
|
|
logger.info(f"Updated profile '{name}' field '{field}' to '{value}'")
|
|
return True
|
|
|
|
def delete_profile(self, name: str) -> bool:
|
|
"""Delete a profile and all associated data."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
state = profile['state']
|
|
|
|
pipe = self.redis.pipeline()
|
|
|
|
# Delete profile hash
|
|
profile_key = self._profile_key(name)
|
|
pipe.delete(profile_key)
|
|
|
|
# Remove from state index
|
|
if state in ProfileState.values():
|
|
pipe.zrem(self._state_key(state), name)
|
|
|
|
# Delete activity keys
|
|
for activity_type in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
activity_key = self._activity_key(name, activity_type)
|
|
pipe.delete(activity_key)
|
|
|
|
# Remove from locks if present
|
|
locks_key = self._locks_key()
|
|
pipe.hdel(locks_key, name)
|
|
|
|
result = pipe.execute()
|
|
|
|
logger.info(f"Deleted profile '{name}' and all associated data")
|
|
return True
|
|
|
|
def delete_all_data(self) -> int:
|
|
"""Deletes all keys associated with the current manager's key_prefix."""
|
|
logger.warning(f"Deleting all keys with prefix: {self.key_prefix}")
|
|
|
|
keys_to_delete = []
|
|
for key in self.redis.scan_iter(f"{self.key_prefix}*"):
|
|
keys_to_delete.append(key)
|
|
|
|
if not keys_to_delete:
|
|
logger.info("No keys found to delete.")
|
|
return 0
|
|
|
|
total_deleted = 0
|
|
chunk_size = 500
|
|
for i in range(0, len(keys_to_delete), chunk_size):
|
|
chunk = keys_to_delete[i:i + chunk_size]
|
|
total_deleted += self.redis.delete(*chunk)
|
|
|
|
logger.info(f"Deleted {total_deleted} key(s).")
|
|
return total_deleted
|
|
|
|
def record_activity(self, name: str, activity_type: str,
|
|
timestamp: Optional[float] = None) -> bool:
|
|
"""Record activity (success/failure) for a profile."""
|
|
if activity_type not in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
logger.error(f"Invalid activity type: {activity_type}")
|
|
return False
|
|
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
ts = timestamp or time.time()
|
|
activity_key = self._activity_key(name, activity_type)
|
|
|
|
# Add to sorted set
|
|
self.redis.zadd(activity_key, {str(ts): ts})
|
|
|
|
# Update counters in profile
|
|
profile_key = self._profile_key(name)
|
|
counter_field = f"{activity_type}_count"
|
|
self.redis.hincrby(profile_key, counter_field, 1)
|
|
global_counter_field = f"global_{activity_type}_count"
|
|
self.redis.hincrby(profile_key, global_counter_field, 1)
|
|
|
|
# Update last_used
|
|
self.redis.hset(profile_key, 'last_used', str(ts))
|
|
|
|
# Keep only last 1000 activities to prevent unbounded growth
|
|
self.redis.zremrangebyrank(activity_key, 0, -1001)
|
|
|
|
# Also record activity for the proxy
|
|
proxy_url = profile.get('proxy')
|
|
if proxy_url:
|
|
proxy_activity_key = self._proxy_activity_key(proxy_url, activity_type)
|
|
pipe = self.redis.pipeline()
|
|
pipe.zadd(proxy_activity_key, {str(ts): ts})
|
|
# Keep last 5000 activities per proxy (higher limit)
|
|
pipe.zremrangebyrank(proxy_activity_key, 0, -5001)
|
|
pipe.execute()
|
|
logger.debug(f"Recorded {activity_type} for proxy '{proxy_url}'")
|
|
|
|
logger.debug(f"Recorded {activity_type} for profile '{name}' at {ts}")
|
|
return True
|
|
|
|
def get_activity_rate(self, name: str, activity_type: str,
|
|
window_seconds: int) -> int:
|
|
"""Get activity count within time window."""
|
|
if activity_type not in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
return 0
|
|
|
|
activity_key = self._activity_key(name, activity_type)
|
|
now = time.time()
|
|
start = now - window_seconds
|
|
|
|
count = self.redis.zcount(activity_key, start, now)
|
|
return count
|
|
|
|
def get_proxy_activity_rate(self, proxy_url: str, activity_type: str,
|
|
window_seconds: int) -> int:
|
|
"""Get proxy activity count within time window."""
|
|
if activity_type not in ['success', 'failure', 'tolerated_error', 'download', 'download_error']:
|
|
return 0
|
|
|
|
activity_key = self._proxy_activity_key(proxy_url, activity_type)
|
|
now = time.time()
|
|
start = now - window_seconds
|
|
|
|
count = self.redis.zcount(activity_key, start, now)
|
|
return count
|
|
|
|
def reset_profile_counters(self, name: str) -> bool:
|
|
"""Resets the session counters for a single profile (does not affect global counters)."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found")
|
|
return False
|
|
|
|
profile_key = self._profile_key(name)
|
|
counters_to_reset = {
|
|
'success_count': '0',
|
|
'failure_count': '0',
|
|
'tolerated_error_count': '0',
|
|
'download_count': '0',
|
|
'download_error_count': '0',
|
|
}
|
|
self.redis.hset(profile_key, mapping=counters_to_reset)
|
|
logger.info(f"Reset session counters for profile '{name}'.")
|
|
return True
|
|
|
|
def get_failed_lock_attempts(self) -> int:
|
|
"""Get the total count of failed lock attempts from Redis."""
|
|
count = self.redis.get(self._failed_lock_attempts_key())
|
|
return int(count) if count else 0
|
|
|
|
def get_global_stats(self) -> Dict[str, int]:
|
|
"""Get aggregated global stats across all profiles."""
|
|
profiles = self.list_profiles()
|
|
total_success = sum(int(p.get('global_success_count', 0)) for p in profiles)
|
|
total_failure = sum(int(p.get('global_failure_count', 0)) for p in profiles)
|
|
total_tolerated_error = sum(int(p.get('global_tolerated_error_count', 0)) for p in profiles)
|
|
total_downloads = sum(int(p.get('global_download_count', 0)) for p in profiles)
|
|
total_download_errors = sum(int(p.get('global_download_error_count', 0)) for p in profiles)
|
|
return {
|
|
'total_success': total_success,
|
|
'total_failure': total_failure,
|
|
'total_tolerated_error': total_tolerated_error,
|
|
'total_downloads': total_downloads,
|
|
'total_download_errors': total_download_errors,
|
|
}
|
|
|
|
def get_per_proxy_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
"""Get aggregated stats per proxy."""
|
|
profiles = self.list_profiles()
|
|
proxy_stats = collections.defaultdict(lambda: {
|
|
'success': 0, 'failure': 0, 'tolerated_error': 0, 'downloads': 0, 'download_errors': 0, 'profiles': 0
|
|
})
|
|
for p in profiles:
|
|
proxy = p.get('proxy')
|
|
if proxy:
|
|
proxy_stats[proxy]['success'] += int(p.get('global_success_count', 0))
|
|
proxy_stats[proxy]['failure'] += int(p.get('global_failure_count', 0))
|
|
proxy_stats[proxy]['tolerated_error'] += int(p.get('global_tolerated_error_count', 0))
|
|
proxy_stats[proxy]['downloads'] += int(p.get('global_download_count', 0))
|
|
proxy_stats[proxy]['download_errors'] += int(p.get('global_download_error_count', 0))
|
|
proxy_stats[proxy]['profiles'] += 1
|
|
return dict(proxy_stats)
|
|
|
|
def reset_global_counters(self) -> int:
|
|
"""Resets global, non-profile-specific counters."""
|
|
logger.info("Resetting global counters...")
|
|
keys_to_delete = [self._failed_lock_attempts_key()]
|
|
|
|
deleted_count = 0
|
|
if keys_to_delete:
|
|
deleted_count = self.redis.delete(*keys_to_delete)
|
|
|
|
logger.info(f"Deleted {deleted_count} global counter key(s).")
|
|
return deleted_count
|
|
|
|
def set_proxy_state(self, proxy_url: str, state: str, rest_duration_minutes: Optional[int] = None) -> bool:
|
|
"""Set the state of a proxy and propagates it to associated profiles."""
|
|
if state not in [ProfileState.ACTIVE.value, ProfileState.RESTING.value]:
|
|
logger.error(f"Invalid proxy state: {state}. Only ACTIVE and RESTING are supported for proxies.")
|
|
return False
|
|
|
|
proxy_key = self._proxy_state_key(proxy_url)
|
|
now = time.time()
|
|
updates = {'state': state}
|
|
|
|
rest_until = 0
|
|
if state == ProfileState.RESTING.value:
|
|
if not rest_duration_minutes or rest_duration_minutes <= 0:
|
|
logger.error("rest_duration_minutes is required when setting proxy state to RESTING.")
|
|
return False
|
|
rest_until = now + rest_duration_minutes * 60
|
|
updates['rest_until'] = str(rest_until)
|
|
updates['work_start_timestamp'] = '0' # Clear work start time
|
|
else: # ACTIVE
|
|
updates['rest_until'] = '0'
|
|
updates['work_start_timestamp'] = str(now)
|
|
|
|
self.redis.hset(proxy_key, mapping=updates)
|
|
logger.info(f"Set proxy '{proxy_url}' state to {state}.")
|
|
|
|
# Now, update associated profiles
|
|
profiles_on_proxy = self.list_profiles(proxy_filter=proxy_url)
|
|
if not profiles_on_proxy:
|
|
return True
|
|
|
|
if state == ProfileState.RESTING.value:
|
|
logger.info(f"Propagating RESTING state to profiles on proxy '{proxy_url}'.")
|
|
for profile in profiles_on_proxy:
|
|
if profile['state'] == ProfileState.ACTIVE.value:
|
|
self.update_profile_state(profile['name'], ProfileState.RESTING.value, "Proxy resting")
|
|
self.update_profile_field(profile['name'], 'rest_until', str(rest_until))
|
|
elif state == ProfileState.ACTIVE.value:
|
|
logger.info(f"Propagating ACTIVE state to profiles on proxy '{proxy_url}'.")
|
|
for profile in profiles_on_proxy:
|
|
if profile['state'] == ProfileState.RESTING.value and profile.get('rest_reason') == "Proxy resting":
|
|
self.update_profile_state(profile['name'], ProfileState.ACTIVE.value, "Proxy activated")
|
|
|
|
return True
|
|
|
|
def get_proxy_states(self, proxy_urls: List[str]) -> Dict[str, Dict[str, Any]]:
|
|
"""Get states for multiple proxies."""
|
|
if not proxy_urls:
|
|
return {}
|
|
|
|
states = {}
|
|
batch_size = 500
|
|
|
|
for i in range(0, len(proxy_urls), batch_size):
|
|
batch_urls = proxy_urls[i:i + batch_size]
|
|
|
|
pipe = self.redis.pipeline()
|
|
for proxy_url in batch_urls:
|
|
pipe.hgetall(self._proxy_state_key(proxy_url))
|
|
results = pipe.execute()
|
|
|
|
for j, data in enumerate(results):
|
|
proxy_url = batch_urls[j]
|
|
if data:
|
|
# Convert numeric fields
|
|
for field in ['rest_until', 'work_start_timestamp']:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
states[proxy_url] = data
|
|
else:
|
|
# Default to ACTIVE if no state is found
|
|
states[proxy_url] = {'state': ProfileState.ACTIVE.value, 'rest_until': 0.0, 'work_start_timestamp': 0.0}
|
|
|
|
return states
|
|
|
|
def set_proxy_group_membership(self, proxy_url: str, group_name: str, work_minutes: int) -> bool:
|
|
"""Records a proxy's membership in a rotation group by updating its state hash."""
|
|
proxy_key = self._proxy_state_key(proxy_url)
|
|
updates = {
|
|
'group_name': group_name,
|
|
'group_work_minutes': str(work_minutes)
|
|
}
|
|
self.redis.hset(proxy_key, mapping=updates)
|
|
logger.debug(f"Set proxy '{proxy_url}' group membership to '{group_name}'.")
|
|
return True
|
|
|
|
def set_proxy_group_state(self, group_name: str, active_proxy_index: int, next_rotation_timestamp: float) -> bool:
|
|
"""Set the state of a proxy group."""
|
|
group_key = self._proxy_group_state_key(group_name)
|
|
updates = {
|
|
'active_proxy_index': str(active_proxy_index),
|
|
'next_rotation_timestamp': str(next_rotation_timestamp)
|
|
}
|
|
self.redis.hset(group_key, mapping=updates)
|
|
logger.info(f"Set proxy group '{group_name}' state: active_index={active_proxy_index}, next_rotation at {format_timestamp(next_rotation_timestamp)}.")
|
|
return True
|
|
|
|
def get_proxy_group_states(self, group_names: List[str]) -> Dict[str, Dict[str, Any]]:
|
|
"""Get states for multiple proxy groups."""
|
|
if not group_names:
|
|
return {}
|
|
|
|
pipe = self.redis.pipeline()
|
|
for name in group_names:
|
|
pipe.hgetall(self._proxy_group_state_key(name))
|
|
|
|
results = pipe.execute()
|
|
|
|
states = {}
|
|
for i, data in enumerate(results):
|
|
group_name = group_names[i]
|
|
if data:
|
|
# Convert numeric fields
|
|
for field in ['active_proxy_index', 'next_rotation_timestamp']:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
if 'active_proxy_index' in data:
|
|
data['active_proxy_index'] = int(data['active_proxy_index'])
|
|
states[group_name] = data
|
|
else:
|
|
# Default to empty dict if no state is found
|
|
states[group_name] = {}
|
|
|
|
return states
|
|
|
|
def set_profile_group_state(self, group_name: str, state_data: Dict[str, Any]) -> bool:
|
|
"""Set or update the state of a profile group."""
|
|
group_key = self._profile_group_state_key(group_name)
|
|
# Ensure all values are strings for redis hset, and filter out None values
|
|
updates = {k: str(v) for k, v in state_data.items() if v is not None}
|
|
if not updates:
|
|
return True # Nothing to do
|
|
self.redis.hset(group_key, mapping=updates)
|
|
logger.debug(f"Set profile group '{group_name}' state: {updates}.")
|
|
return True
|
|
|
|
def get_profile_group_states(self, group_names: List[str]) -> Dict[str, Dict[str, Any]]:
|
|
"""Get states for multiple profile groups."""
|
|
if not group_names:
|
|
return {}
|
|
|
|
pipe = self.redis.pipeline()
|
|
for name in group_names:
|
|
pipe.hgetall(self._profile_group_state_key(name))
|
|
|
|
results = pipe.execute()
|
|
|
|
states = {}
|
|
for i, data in enumerate(results):
|
|
group_name = group_names[i]
|
|
if data:
|
|
numeric_fields = {
|
|
'active_profile_index': int,
|
|
'rotate_after_requests': int,
|
|
'max_active_profiles': int,
|
|
'pending_downloads': int,
|
|
}
|
|
float_fields = ['last_finished_downloads_ts']
|
|
for field, type_converter in numeric_fields.items():
|
|
if field in data:
|
|
try:
|
|
data[field] = type_converter(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0
|
|
for field in float_fields:
|
|
if field in data:
|
|
try:
|
|
data[field] = float(data[field])
|
|
except (ValueError, TypeError):
|
|
data[field] = 0.0
|
|
states[group_name] = data
|
|
else:
|
|
states[group_name] = {}
|
|
|
|
return states
|
|
|
|
def lock_profile(self, owner: str, profile_prefix: Optional[str] = None, specific_profile_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
|
"""
|
|
Find and lock an available ACTIVE profile.
|
|
If `specific_profile_name` is provided, it will attempt to lock only that profile.
|
|
Otherwise, it scans for available profiles, optionally filtered by `profile_prefix`.
|
|
"""
|
|
profiles_to_check = []
|
|
if specific_profile_name:
|
|
# If a specific profile is requested, we only check that one.
|
|
profiles_to_check = [specific_profile_name]
|
|
else:
|
|
# Original logic: find all active profiles, optionally filtered by prefix.
|
|
active_profiles = self.redis.zrange(self._state_key(ProfileState.ACTIVE.value), 0, -1)
|
|
if not active_profiles:
|
|
logger.warning("No active profiles available to lock.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
if profile_prefix:
|
|
profiles_to_check = [p for p in active_profiles if p.startswith(profile_prefix)]
|
|
if not profiles_to_check:
|
|
logger.warning(f"No active profiles with prefix '{profile_prefix}' available to lock.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
else:
|
|
profiles_to_check = active_profiles
|
|
|
|
# --- Filter by active proxy and prepare for locking ---
|
|
full_profiles = [self.get_profile(p) for p in profiles_to_check]
|
|
# Filter out any None profiles from a race condition with deletion, and ensure state is ACTIVE.
|
|
# This is especially important when locking a specific profile.
|
|
full_profiles = [p for p in full_profiles if p and p.get('proxy') and p.get('state') == ProfileState.ACTIVE.value]
|
|
|
|
if not full_profiles:
|
|
if specific_profile_name:
|
|
logger.warning(f"Profile '{specific_profile_name}' is not eligible for locking (e.g., not ACTIVE or missing).")
|
|
else:
|
|
logger.warning("No active profiles available to lock after filtering.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
unique_proxies = sorted(list(set(p['proxy'] for p in full_profiles)))
|
|
proxy_states = self.get_proxy_states(unique_proxies)
|
|
|
|
eligible_profiles = [
|
|
p['name'] for p in full_profiles
|
|
if proxy_states.get(p['proxy'], {}).get('state', ProfileState.ACTIVE.value) == ProfileState.ACTIVE.value
|
|
]
|
|
|
|
if not eligible_profiles:
|
|
logger.warning("No active profiles with an active proxy available to lock.")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
# Make selection deterministic (use Redis's sorted set order) instead of random
|
|
# random.shuffle(active_profiles)
|
|
|
|
locks_key = self._locks_key()
|
|
|
|
for name in eligible_profiles:
|
|
# Try to acquire lock atomically
|
|
if self.redis.hsetnx(locks_key, name, owner):
|
|
# Lock acquired. Now, re-check state to avoid race condition with enforcer.
|
|
profile = self.get_profile(name)
|
|
if not profile: # Profile might have been deleted in a race condition.
|
|
self.redis.hdel(locks_key, name)
|
|
continue
|
|
|
|
current_state = profile.get('state')
|
|
|
|
# Normalize to uppercase for check
|
|
if not current_state or current_state.upper() != ProfileState.ACTIVE.value:
|
|
# Another process (enforcer) changed the state. Release lock and try next.
|
|
self.redis.hdel(locks_key, name)
|
|
logger.warning(f"Aborted lock for '{name}'; state changed from ACTIVE to '{current_state}' during lock acquisition.")
|
|
continue
|
|
|
|
# State is still ACTIVE, proceed with locking.
|
|
sm = self.get_state_machine(name, profile=profile)
|
|
if not sm:
|
|
# Should not happen if we just checked the profile
|
|
self.redis.hdel(locks_key, name)
|
|
continue
|
|
|
|
try:
|
|
# The hsetnx above acquired the global lock. Now we transition the state.
|
|
sm.lock(owner=owner)
|
|
# The on_enter_locked action handles all Redis updates for the profile itself.
|
|
# The logger messages are also in the action.
|
|
return self.get_profile(name)
|
|
except Exception as e:
|
|
# This could be a TransitionNotAllowed error if the state changed,
|
|
# or a Redis error during the action.
|
|
logger.error(f"Failed to transition profile '{name}' to LOCKED state: {e}", exc_info=True)
|
|
# Release the global lock as the state transition failed.
|
|
self.redis.hdel(locks_key, name)
|
|
continue
|
|
|
|
logger.warning("Could not lock any active profile (all may have been locked by other workers).")
|
|
self.redis.incr(self._failed_lock_attempts_key())
|
|
return None
|
|
|
|
def unlock_profile(self, name: str, owner: Optional[str] = None, rest_for_seconds: Optional[int] = None) -> bool:
|
|
"""Unlock a profile. If owner provided, it must match. Can optionally put profile into COOLDOWN state."""
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
logger.error(f"Profile '{name}' not found.")
|
|
return False
|
|
|
|
# Normalize to uppercase for check
|
|
current_state = profile.get('state')
|
|
if not current_state or current_state.upper() != ProfileState.LOCKED.value:
|
|
logger.warning(f"Profile '{name}' is not in LOCKED state (current: {current_state}).")
|
|
# Forcibly remove from locks hash if it's inconsistent
|
|
self.redis.hdel(self._locks_key(), name)
|
|
return False
|
|
|
|
if owner and profile['lock_owner'] != owner:
|
|
logger.error(f"Owner mismatch: cannot unlock profile '{name}'. Locked by '{profile['lock_owner']}', attempted by '{owner}'.")
|
|
return False
|
|
|
|
sm = self.get_state_machine(name, profile=profile)
|
|
if not sm:
|
|
return False
|
|
|
|
try:
|
|
if rest_for_seconds and rest_for_seconds > 0:
|
|
sm.start_cooldown(duration=rest_for_seconds)
|
|
else:
|
|
sm.unlock()
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"Failed to unlock profile '{name}': {e}", exc_info=True)
|
|
return False
|
|
|
|
def get_state_machine(self, name: str, profile: Optional[Dict[str, Any]] = None) -> Optional[ProfileStateMachine]:
|
|
"""
|
|
Initializes and returns a ProfileStateMachine instance for a given profile,
|
|
set to its current state from Redis.
|
|
If `profile` object is not provided, it will be fetched from Redis.
|
|
"""
|
|
if profile is None:
|
|
profile = self.get_profile(name)
|
|
|
|
if not profile:
|
|
logger.error(f"Cannot create state machine for non-existent profile '{name}'")
|
|
return None
|
|
|
|
current_state_str = profile.get('state')
|
|
if not current_state_str:
|
|
logger.error(f"Profile '{name}' has no state. Cannot initialize state machine.")
|
|
return None
|
|
|
|
# Normalize to uppercase to handle potential inconsistencies (e.g. "locked" vs "LOCKED")
|
|
current_state_str = current_state_str.upper()
|
|
|
|
if current_state_str not in self.VALID_STATES:
|
|
logger.error(f"Profile '{name}' has an invalid state value '{current_state_str}' in Redis. Cannot initialize state machine.")
|
|
return None
|
|
|
|
# The `model` parameter in the StateMachine constructor is where we can pass
|
|
# context. We pass the manager and profile name.
|
|
# We convert the Redis state (uppercase value) to the state machine identifier (lowercase attribute name).
|
|
|
|
# When re-hydrating a state machine from a stored state, we don't want to re-trigger
|
|
# the `on_enter` actions for the current state. We can suppress the initial transition
|
|
# and set the state directly.
|
|
|
|
# WORKAROUND for older statemachine library:
|
|
# Instantiating the machine triggers an initial transition to ACTIVE, which wrongly updates Redis.
|
|
# We let this happen, and then immediately correct the state if it was supposed to be something else.
|
|
sm = ProfileStateMachine(manager=self, profile_name=name)
|
|
|
|
# The sm is now in ACTIVE state, and Redis has been updated. If the original state was
|
|
# LOCKED, we must re-lock it to fix Redis and the state machine object so transitions work.
|
|
if current_state_str == ProfileState.LOCKED.value:
|
|
lock_owner = profile.get('lock_owner', 're-lock-owner')
|
|
try:
|
|
# This transition ensures the `on_enter_LOCKED` actions are run, making the
|
|
# state consistent in Redis and in the state machine object.
|
|
sm.lock(owner=lock_owner)
|
|
except Exception as e:
|
|
logger.error(f"Failed to re-lock profile '{name}' during state machine hydration: {e}")
|
|
# The state is now inconsistent, best to not return a broken machine.
|
|
return None
|
|
elif current_state_str != sm.current_state.value.upper():
|
|
# For any other state, we must manually fix both the state machine object and Redis,
|
|
# as the constructor wrongly transitioned to ACTIVE.
|
|
|
|
# 1. Force state on the machine object. This does not trigger actions.
|
|
target_state_obj = next((s for s in sm.states if s.value.upper() == current_state_str), None)
|
|
if not target_state_obj:
|
|
logger.error(f"Could not find state object for '{current_state_str}' during hydration of '{name}'.")
|
|
return None
|
|
sm.current_state = target_state_obj
|
|
|
|
# 2. Manually revert the state in Redis to what it should be.
|
|
profile_key = self._profile_key(name)
|
|
pipe = self.redis.pipeline()
|
|
pipe.hset(profile_key, 'state', current_state_str)
|
|
# Atomically move the profile from the incorrect ACTIVE index to the correct one.
|
|
# The constructor may have added it to ACTIVE without removing it from its original state index.
|
|
pipe.zrem(self._state_key(ProfileState.ACTIVE.value), name)
|
|
pipe.zadd(self._state_key(current_state_str), {name: profile.get('last_used', time.time())})
|
|
pipe.execute()
|
|
logger.debug(f"Corrected state for '{name}' to '{current_state_str}' in object and Redis during hydration.")
|
|
|
|
return sm
|
|
|
|
def cleanup_stale_locks(self, max_lock_time_seconds: int) -> int:
|
|
"""Find and unlock profiles with stale locks."""
|
|
locks_key = self._locks_key()
|
|
all_locks = self.redis.hgetall(locks_key)
|
|
if not all_locks:
|
|
logger.debug("No active locks found to clean up.")
|
|
return 0
|
|
|
|
now = time.time()
|
|
cleaned_count = 0
|
|
|
|
for name, owner in all_locks.items():
|
|
profile = self.get_profile(name)
|
|
if not profile:
|
|
# Lock exists but profile doesn't. Clean up the lock.
|
|
self.redis.hdel(locks_key, name)
|
|
logger.warning(f"Removed stale lock for non-existent profile '{name}'")
|
|
cleaned_count += 1
|
|
continue
|
|
|
|
# --- NEW: Check for inconsistent locks on ACTIVE profiles ---
|
|
# A lock should not exist for a profile that is still ACTIVE, except for the
|
|
# milliseconds between when a worker acquires the lock and when it transitions
|
|
# the profile state to LOCKED. If the policy enforcer sees this state, it's
|
|
# almost certainly a stale lock from a crashed worker.
|
|
if profile.get('state') == ProfileState.ACTIVE.value:
|
|
logger.warning(
|
|
f"Found inconsistent lock for ACTIVE profile '{name}' (owner: '{owner}'). "
|
|
"This indicates a worker may have crashed. Cleaning up stale lock."
|
|
)
|
|
self.redis.hdel(locks_key, name)
|
|
cleaned_count += 1
|
|
continue
|
|
# --- END NEW LOGIC ---
|
|
|
|
lock_timestamp = profile.get('lock_timestamp', 0)
|
|
if lock_timestamp > 0 and (now - lock_timestamp) > max_lock_time_seconds:
|
|
logger.warning(f"Found stale lock for profile '{name}' (locked by '{owner}' for {now - lock_timestamp:.0f}s). Unlocking...")
|
|
if self.unlock_profile(name):
|
|
cleaned_count += 1
|
|
|
|
if cleaned_count > 0:
|
|
logger.info(f"Cleaned up {cleaned_count} stale lock(s).")
|
|
else:
|
|
logger.debug("No stale locks found to clean up.")
|
|
return cleaned_count
|
|
|
|
|
|
def add_profile_manager_parser(subparsers):
|
|
"""Adds the parser for the 'profile' command."""
|
|
parser = subparsers.add_parser(
|
|
'profile',
|
|
description='Manage profiles (v2).',
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
help='Manage profiles (v2).'
|
|
)
|
|
|
|
# Common arguments for all profile manager subcommands
|
|
common_parser = argparse.ArgumentParser(add_help=False)
|
|
common_parser.add_argument('--env-file', help='Path to a .env file to load environment variables from.')
|
|
common_parser.add_argument('--redis-host', default=None, help='Redis host. Defaults to REDIS_HOST or MASTER_HOST_IP env var, or localhost.')
|
|
common_parser.add_argument('--redis-port', type=int, default=None, help='Redis port. Defaults to REDIS_PORT env var, or 6379.')
|
|
common_parser.add_argument('--redis-db', type=int, default=None, help='Redis DB number. Defaults to REDIS_DB env var, or 0.')
|
|
common_parser.add_argument('--redis-password', default=None, help='Redis password. Defaults to REDIS_PASSWORD env var.')
|
|
common_parser.add_argument('--env', default='dev', help="Environment name for Redis key prefix (e.g., 'stg', 'prod'). Used by all non-list commands, and by 'list' for single-view mode. Defaults to 'dev'.")
|
|
common_parser.add_argument('--legacy', action='store_true', help="Use legacy key prefix ('profile_mgmt_') without environment.")
|
|
common_parser.add_argument('--key-prefix', default=None, help='Explicit key prefix for Redis. Overrides --env, --legacy and any defaults.')
|
|
common_parser.add_argument('--verbose', action='store_true', help='Enable verbose logging')
|
|
|
|
subparsers = parser.add_subparsers(dest='profile_command', help='Command to execute', required=True)
|
|
|
|
# Create command
|
|
create_parser = subparsers.add_parser('create', help='Create a new profile', parents=[common_parser])
|
|
create_parser.add_argument('name', help='Profile name')
|
|
create_parser.add_argument('proxy', help='Proxy URL (e.g., sslocal-rust-1090:1090)')
|
|
create_parser.add_argument('--state', default=ProfileState.ACTIVE.value,
|
|
choices=ProfileState.values(),
|
|
help=f'Initial state (default: {ProfileState.ACTIVE.value})')
|
|
|
|
# List command
|
|
list_parser = subparsers.add_parser('list', help='List profiles', parents=[common_parser])
|
|
list_parser.add_argument('--auth-env', help='Environment name for the Auth simulation monitor. Use with --download-env for a merged view.')
|
|
list_parser.add_argument('--download-env', help='Environment name for the Download simulation monitor. Use with --auth-env for a merged view.')
|
|
list_parser.add_argument('--separate-views', action='store_true', help='In dual-monitor mode, show two separate reports instead of a single merged view.')
|
|
list_parser.add_argument('--rest-after-requests', type=int, help='(For display) Show countdown to rest based on this request limit.')
|
|
list_parser.add_argument('--state', help='Filter by state')
|
|
list_parser.add_argument('--proxy', help='Filter by proxy (substring match)')
|
|
list_parser.add_argument('--show-proxy-activity', action='store_true', help='Show a detailed activity summary table for proxies. If --proxy is specified, shows details for that proxy only. Otherwise, shows a summary for all proxies.')
|
|
list_parser.add_argument('--show-reasons', action='store_true', help='Show detailed reasons for group and profile selection states.')
|
|
list_parser.add_argument('--show-activation-history', action='store_true', help='Show the recent profile activation history.')
|
|
list_parser.add_argument('--format', choices=['table', 'json', 'csv'], default='table',
|
|
help='Output format (default: table)')
|
|
list_parser.add_argument('--live', action='store_true', help='Run continuously with a non-blinking live-updating display.')
|
|
list_parser.add_argument('--no-blink', action='store_true', help='Use ANSI escape codes for smoother screen updates in --live mode (experimental).')
|
|
list_parser.add_argument('--interval-seconds', type=int, default=5, help='When in --live mode, how often to refresh in seconds. Default: 5.')
|
|
list_parser.add_argument('--hide-active-state', action='store_true', help="Display 'ACTIVE' state as blank for cleaner UI.")
|
|
|
|
# Get command
|
|
get_parser = subparsers.add_parser('get', help='Get profile details', parents=[common_parser])
|
|
get_parser.add_argument('name', help='Profile name')
|
|
|
|
# Set proxy state command
|
|
set_proxy_state_parser = subparsers.add_parser('set-proxy-state', help='Set the state of a proxy and propagate to its profiles.', parents=[common_parser])
|
|
set_proxy_state_parser.add_argument('proxy_url', help='Proxy URL')
|
|
set_proxy_state_parser.add_argument('state', choices=['ACTIVE', 'RESTING'], help='New state for the proxy')
|
|
set_proxy_state_parser.add_argument('--duration-minutes', type=int, help='Duration for the RESTING state')
|
|
|
|
# Update state command
|
|
update_state_parser = subparsers.add_parser('update-state', help='Update profile state', parents=[common_parser])
|
|
update_state_parser.add_argument('name', help='Profile name')
|
|
update_state_parser.add_argument('state', choices=ProfileState.values(),
|
|
help='New state')
|
|
update_state_parser.add_argument('--reason', help='Reason for state change (especially for BAN)')
|
|
|
|
# Update field command
|
|
update_field_parser = subparsers.add_parser('update-field', help='Update a profile field', parents=[common_parser])
|
|
update_field_parser.add_argument('name', help='Profile name')
|
|
update_field_parser.add_argument('field', help='Field name to update')
|
|
update_field_parser.add_argument('value', help='New value')
|
|
|
|
# Pause command (convenience)
|
|
pause_parser = subparsers.add_parser('pause', help=f'Pause a profile (sets state to {ProfileState.PAUSED.value}).', parents=[common_parser])
|
|
pause_parser.add_argument('name', help='Profile name')
|
|
|
|
# Activate command (convenience)
|
|
activate_parser = subparsers.add_parser('activate', help=f'Activate a profile (sets state to {ProfileState.ACTIVE.value}). Useful for resuming a PAUSED profile or fixing a stale LOCKED one.', parents=[common_parser])
|
|
activate_parser.add_argument('name', help='Profile name')
|
|
|
|
# Ban command (convenience)
|
|
ban_parser = subparsers.add_parser('ban', help=f'Ban a profile (sets state to {ProfileState.BANNED.value}).', parents=[common_parser])
|
|
ban_parser.add_argument('name', help='Profile name')
|
|
ban_parser.add_argument('--reason', required=True, help='Reason for ban')
|
|
|
|
# Unban command (convenience)
|
|
unban_parser = subparsers.add_parser('unban', help=f'Unban a profile (sets state to {ProfileState.ACTIVE.value} and resets session counters).', parents=[common_parser])
|
|
unban_parser.add_argument('name', help='Profile name')
|
|
|
|
# Delete command
|
|
delete_parser = subparsers.add_parser('delete', help='Delete a profile', parents=[common_parser])
|
|
delete_parser.add_argument('name', help='Profile name')
|
|
delete_parser.add_argument('--confirm', action='store_true',
|
|
help='Confirm deletion (required)')
|
|
|
|
# Delete all command
|
|
delete_all_parser = subparsers.add_parser('delete-all', help='(Destructive) Delete all profiles and data under the current key prefix.', parents=[common_parser])
|
|
delete_all_parser.add_argument('--confirm', action='store_true', help='Confirm this highly destructive action (required)')
|
|
|
|
# Reset global counters command
|
|
reset_global_parser = subparsers.add_parser('reset-global-counters', help='Reset global counters (e.g., failed_lock_attempts).', parents=[common_parser])
|
|
|
|
# Reset counters command
|
|
reset_counters_parser = subparsers.add_parser(
|
|
'reset-counters',
|
|
help='Reset session counters for profiles or proxies.',
|
|
description="Resets session-specific counters (success, failure, etc.) for one or more profiles.\n\nWARNING: This only resets Redis counters. It does not affect any data stored on disk\n(e.g., downloaded files, logs) associated with the profile or proxy.",
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
parents=[common_parser]
|
|
)
|
|
reset_group = reset_counters_parser.add_mutually_exclusive_group(required=True)
|
|
reset_group.add_argument('--profile-name', help='The name of the single profile to reset.')
|
|
reset_group.add_argument('--proxy-url', help='Reset all profiles associated with this proxy.')
|
|
reset_group.add_argument('--all-profiles', action='store_true', help='Reset all profiles in the environment.')
|
|
|
|
# Record activity command (for testing)
|
|
record_parser = subparsers.add_parser('record-activity', help='(Testing) Record a synthetic activity event for a profile.', parents=[common_parser])
|
|
record_parser.add_argument('name', help='Profile name')
|
|
record_parser.add_argument('type', choices=['success', 'failure', 'tolerated_error', 'download', 'download_error'], help='Activity type')
|
|
record_parser.add_argument('--timestamp', type=float, help='Timestamp (default: now)')
|
|
|
|
# Get rate command
|
|
rate_parser = subparsers.add_parser('get-rate', help='Get activity rate for a profile', parents=[common_parser])
|
|
rate_parser.add_argument('name', help='Profile name')
|
|
rate_parser.add_argument('type', choices=['success', 'failure', 'tolerated_error', 'download', 'download_error'], help='Activity type')
|
|
rate_parser.add_argument('--window', type=int, default=3600,
|
|
help='Time window in seconds (default: 3600)')
|
|
return parser
|
|
|
|
def _build_profile_groups_config(manager, profiles):
|
|
"""Builds a configuration structure for profile groups by reading state from Redis."""
|
|
group_state_keys = [k for k in manager.redis.scan_iter(f"{manager.key_prefix}profile_group_state:*")]
|
|
if not group_state_keys:
|
|
return []
|
|
|
|
group_names = [k.split(':')[-1] for k in group_state_keys]
|
|
group_states = manager.get_profile_group_states(group_names)
|
|
|
|
config = []
|
|
for name, state in group_states.items():
|
|
profiles_in_group = []
|
|
prefix = state.get('prefix')
|
|
if prefix:
|
|
profiles_in_group = [p['name'] for p in profiles if p['name'].startswith(prefix)]
|
|
|
|
config.append({
|
|
'name': name,
|
|
'profiles_in_group': profiles_in_group,
|
|
**state
|
|
})
|
|
return config
|
|
|
|
|
|
def _render_all_proxies_activity_summary(manager, simulation_type, file=sys.stdout):
|
|
"""Renders a summary of activity rates for all proxies."""
|
|
if not manager:
|
|
return
|
|
|
|
print(f"\n--- All Proxies Activity Summary ({simulation_type}) ---", file=file)
|
|
|
|
all_profiles = manager.list_profiles()
|
|
if not all_profiles:
|
|
print("No profiles found to determine proxy list.", file=file)
|
|
return
|
|
|
|
unique_proxies = sorted(list(set(p['proxy'] for p in all_profiles if p.get('proxy'))))
|
|
if not unique_proxies:
|
|
print("No proxies are currently associated with any profiles.", file=file)
|
|
return
|
|
|
|
proxy_states = manager.get_proxy_states(unique_proxies)
|
|
|
|
is_auth_sim = 'Auth' in simulation_type
|
|
# Sum up all relevant activity types for the rate columns
|
|
activity_types_to_sum = ['success', 'failure', 'tolerated_error'] if is_auth_sim else ['download', 'download_error', 'tolerated_error']
|
|
|
|
proxy_work_minutes_str = manager.get_config('proxy_work_minutes')
|
|
proxy_work_minutes = 0
|
|
if proxy_work_minutes_str and proxy_work_minutes_str.isdigit():
|
|
proxy_work_minutes = int(proxy_work_minutes_str)
|
|
|
|
proxy_rest_minutes_str = manager.get_config('proxy_rest_duration_minutes')
|
|
proxy_rest_minutes = 0
|
|
if proxy_rest_minutes_str and proxy_rest_minutes_str.isdigit():
|
|
proxy_rest_minutes = int(proxy_rest_minutes_str)
|
|
|
|
table_data = []
|
|
headers = ['Proxy URL', 'State', 'Policy', 'State Ends In', 'Reqs (1m)', 'Reqs (5m)', 'Reqs (1h)']
|
|
|
|
for proxy_url in unique_proxies:
|
|
state_data = proxy_states.get(proxy_url, {})
|
|
state = state_data.get('state', 'N/A')
|
|
rest_until = state_data.get('rest_until', 0)
|
|
work_start = state_data.get('work_start_timestamp', 0)
|
|
|
|
state_str = state
|
|
countdown_str = "N/A"
|
|
now = time.time()
|
|
|
|
policy_str = "N/A"
|
|
group_name = state_data.get('group_name')
|
|
work_minutes_for_countdown = 0
|
|
|
|
if group_name:
|
|
group_work_minutes = state_data.get('group_work_minutes', 0)
|
|
try:
|
|
group_work_minutes = int(group_work_minutes)
|
|
work_minutes_for_countdown = group_work_minutes
|
|
except (ValueError, TypeError):
|
|
group_work_minutes = 0
|
|
policy_str = f"Group: {group_name}\n({group_work_minutes}m/proxy)"
|
|
elif proxy_work_minutes > 0:
|
|
policy_str = f"Work: {proxy_work_minutes}m\nRest: {proxy_rest_minutes}m"
|
|
work_minutes_for_countdown = proxy_work_minutes
|
|
|
|
if state == 'RESTING' and rest_until > now:
|
|
countdown_str = format_duration(rest_until - now)
|
|
elif state == 'ACTIVE' and work_start > 0 and work_minutes_for_countdown > 0:
|
|
work_end_time = work_start + (work_minutes_for_countdown * 60)
|
|
if work_end_time > now:
|
|
countdown_str = format_duration(work_end_time - now)
|
|
else:
|
|
countdown_str = "Now"
|
|
|
|
rate_1m = sum(manager.get_proxy_activity_rate(proxy_url, act_type, 60) for act_type in activity_types_to_sum)
|
|
rate_5m = sum(manager.get_proxy_activity_rate(proxy_url, act_type, 300) for act_type in activity_types_to_sum)
|
|
rate_1h = sum(manager.get_proxy_activity_rate(proxy_url, act_type, 3600) for act_type in activity_types_to_sum)
|
|
|
|
row = [
|
|
proxy_url,
|
|
state_str,
|
|
policy_str,
|
|
countdown_str,
|
|
rate_1m,
|
|
rate_5m,
|
|
rate_1h,
|
|
]
|
|
table_data.append(row)
|
|
|
|
if table_data:
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_proxy_activity_summary(manager, proxy_url, simulation_type, file=sys.stdout):
|
|
"""Renders a detailed activity summary for a single proxy."""
|
|
if not manager or not proxy_url:
|
|
return
|
|
|
|
print(f"\n--- Activity Summary for Proxy: {proxy_url} ({simulation_type}) ---", file=file)
|
|
|
|
proxy_work_minutes_str = manager.get_config('proxy_work_minutes')
|
|
proxy_work_minutes = 0
|
|
if proxy_work_minutes_str and proxy_work_minutes_str.isdigit():
|
|
proxy_work_minutes = int(proxy_work_minutes_str)
|
|
|
|
proxy_rest_minutes_str = manager.get_config('proxy_rest_duration_minutes')
|
|
proxy_rest_minutes = 0
|
|
if proxy_rest_minutes_str and proxy_rest_minutes_str.isdigit():
|
|
proxy_rest_minutes = int(proxy_rest_minutes_str)
|
|
|
|
proxy_state_data = manager.get_proxy_states([proxy_url]).get(proxy_url, {})
|
|
state = proxy_state_data.get('state', 'N/A')
|
|
rest_until = proxy_state_data.get('rest_until', 0)
|
|
work_start = proxy_state_data.get('work_start_timestamp', 0)
|
|
|
|
policy_str = "N/A"
|
|
group_name = proxy_state_data.get('group_name')
|
|
work_minutes_for_countdown = 0
|
|
|
|
if group_name:
|
|
group_work_minutes = proxy_state_data.get('group_work_minutes', 0)
|
|
try:
|
|
group_work_minutes = int(group_work_minutes)
|
|
work_minutes_for_countdown = group_work_minutes
|
|
except (ValueError, TypeError):
|
|
group_work_minutes = 0
|
|
policy_str = f"Group: {group_name} ({group_work_minutes}m/proxy)"
|
|
elif proxy_work_minutes > 0:
|
|
policy_str = f"Work: {proxy_work_minutes}m, Rest: {proxy_rest_minutes}m"
|
|
work_minutes_for_countdown = proxy_work_minutes
|
|
|
|
state_str = state
|
|
now = time.time()
|
|
if state == 'RESTING' and rest_until > now:
|
|
state_str += f" (ends in {format_duration(rest_until - now)})"
|
|
|
|
active_duration_str = "N/A"
|
|
time_until_rest_str = "N/A"
|
|
if state == 'ACTIVE' and work_start > 0:
|
|
active_duration_str = format_duration(now - work_start)
|
|
if work_minutes_for_countdown > 0:
|
|
work_end_time = work_start + (work_minutes_for_countdown * 60)
|
|
if work_end_time > now:
|
|
time_until_rest_str = format_duration(work_end_time - now)
|
|
else:
|
|
time_until_rest_str = "Now"
|
|
|
|
summary_data = [
|
|
("State", state_str),
|
|
("Policy", policy_str),
|
|
("Active Since", format_timestamp(work_start)),
|
|
("Active Duration", active_duration_str),
|
|
("Time Until Rest", time_until_rest_str),
|
|
]
|
|
print(tabulate(summary_data, tablefmt='grid'), file=file)
|
|
|
|
windows = {
|
|
"Last 1 Min": 60,
|
|
"Last 5 Min": 300,
|
|
"Last 1 Hour": 3600,
|
|
"Last 24 Hours": 86400,
|
|
}
|
|
|
|
is_auth_sim = 'Auth' in simulation_type
|
|
activity_types = ['success', 'failure', 'tolerated_error'] if is_auth_sim else ['download', 'download_error', 'tolerated_error']
|
|
|
|
table_data = []
|
|
headers = ['Window'] + [act_type.replace('_', ' ').title() for act_type in activity_types]
|
|
|
|
for name, seconds in windows.items():
|
|
row = [name]
|
|
for act_type in activity_types:
|
|
count = manager.get_proxy_activity_rate(proxy_url, act_type, seconds)
|
|
row.append(count)
|
|
table_data.append(row)
|
|
|
|
if table_data:
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_profile_group_summary_table(manager, all_profiles, profile_groups_config, args, file=sys.stdout):
|
|
"""Renders a summary table for profile groups."""
|
|
if not profile_groups_config:
|
|
return
|
|
|
|
print("\nProfile Group Status:", file=file)
|
|
table_data = []
|
|
all_profiles_map = {p['name']: p for p in all_profiles}
|
|
|
|
# --- New logic to determine the next group to be activated ---
|
|
profile_selection_strategy = manager.get_config('profile_selection_strategy')
|
|
next_up_group_name = None
|
|
next_up_reason = ""
|
|
|
|
if profile_selection_strategy and profile_groups_config:
|
|
# This logic mirrors the enforcer's selection process for display purposes.
|
|
# It determines which group is likely to have its profile activated next.
|
|
now = time.time()
|
|
all_profiles_by_name = {p['name']: p for p in all_profiles}
|
|
|
|
if profile_selection_strategy == 'least_loaded':
|
|
sorted_groups = sorted(
|
|
profile_groups_config,
|
|
key=lambda g: (
|
|
g.get('pending_downloads', 0),
|
|
g.get('last_finished_downloads_ts', float('inf')),
|
|
g.get('name', '')
|
|
)
|
|
)
|
|
if sorted_groups:
|
|
next_up_group = sorted_groups[0]
|
|
next_up_group_name = next_up_group['name']
|
|
next_up_reason = profile_selection_strategy
|
|
if getattr(args, 'show_reasons', False):
|
|
load = next_up_group.get('pending_downloads', 0)
|
|
finish_ts = next_up_group.get('last_finished_downloads_ts', 0)
|
|
finish_str = f"finished {format_duration(time.time() - finish_ts)} ago" if finish_ts > 0 else "never finished"
|
|
next_up_reason = f"least_loaded (load: {load}, {finish_str})"
|
|
|
|
elif profile_selection_strategy == 'longest_idle':
|
|
# Find the single longest idle profile across all groups
|
|
ready_profiles = []
|
|
for group in profile_groups_config:
|
|
for p_name in group.get('profiles_in_group', []):
|
|
p = all_profiles_by_name.get(p_name)
|
|
if p and p['state'] in [ProfileState.RESTING.value, ProfileState.COOLDOWN.value] and p.get('rest_until', 0) <= now and p.get('rest_reason') != 'waiting_downloads':
|
|
ready_profiles.append(p)
|
|
|
|
if ready_profiles:
|
|
# Sort them according to the 'longest_idle' activation logic
|
|
unused_profiles = [p for p in ready_profiles if (p.get('success_count', 0) + p.get('failure_count', 0) + p.get('tolerated_error_count', 0) + p.get('download_count', 0) + p.get('download_error_count', 0)) == 0]
|
|
used_profiles = [p for p in ready_profiles if p not in unused_profiles]
|
|
|
|
unused_profiles.sort(key=lambda p: natural_sort_key(p.get('name', '')))
|
|
used_profiles.sort(key=lambda p: (p.get('last_used', 0), natural_sort_key(p.get('name', ''))))
|
|
|
|
sorted_ready_profiles = unused_profiles + used_profiles
|
|
|
|
if sorted_ready_profiles:
|
|
next_profile = sorted_ready_profiles[0]
|
|
# Find which group it belongs to
|
|
for group in profile_groups_config:
|
|
if next_profile['name'] in group.get('profiles_in_group', []):
|
|
next_up_group_name = group['name']
|
|
next_up_reason = profile_selection_strategy
|
|
if getattr(args, 'show_reasons', False):
|
|
last_used_ts = next_profile.get('last_used', 0)
|
|
idle_time_str = f"idle for {format_duration(time.time() - last_used_ts)}" if last_used_ts > 0 else "never used"
|
|
next_up_reason = f"longest_idle (via {next_profile['name']}, {idle_time_str})"
|
|
break
|
|
# --- End new logic ---
|
|
|
|
for group in profile_groups_config:
|
|
group_name = group.get('name', 'N/A')
|
|
profiles_in_group = group.get('profiles_in_group', [])
|
|
|
|
active_profiles = [
|
|
p_name for p_name in profiles_in_group
|
|
if all_profiles_map.get(p_name, {}).get('state') in [ProfileState.ACTIVE.value, ProfileState.LOCKED.value]
|
|
]
|
|
|
|
active_profiles_str = ', '.join(active_profiles) or "None"
|
|
|
|
max_active = group.get('max_active_profiles', 1)
|
|
policy_str = f"{len(active_profiles)}/{max_active} Active"
|
|
|
|
rotate_after = group.get('rotate_after_requests')
|
|
rotation_rule_str = f"After {rotate_after} reqs" if rotate_after else "N/A"
|
|
|
|
reqs_left_str = "N/A"
|
|
if rotate_after and rotate_after > 0 and active_profiles:
|
|
# Show countdown for the first active profile
|
|
active_profile_name = active_profiles[0]
|
|
p = all_profiles_map.get(active_profile_name)
|
|
if p:
|
|
total_reqs = (
|
|
p.get('success_count', 0) + p.get('failure_count', 0) +
|
|
p.get('tolerated_error_count', 0) +
|
|
p.get('download_count', 0) + p.get('download_error_count', 0)
|
|
)
|
|
remaining_reqs = rotate_after - total_reqs
|
|
reqs_left_str = str(max(0, int(remaining_reqs)))
|
|
|
|
# Recalculate pending downloads on the fly for display accuracy
|
|
pending_downloads = sum(
|
|
all_profiles_map.get(p_name, {}).get('pending_downloads', 0)
|
|
for p_name in profiles_in_group
|
|
)
|
|
|
|
selection_priority_str = ""
|
|
if group_name == next_up_group_name:
|
|
selection_priority_str = f"<- Next Up ({next_up_reason})"
|
|
|
|
time_since_finish_str = "N/A"
|
|
last_finish_ts = group.get('last_finished_downloads_ts', 0)
|
|
if last_finish_ts > 0:
|
|
time_since_finish_str = format_duration(time.time() - last_finish_ts)
|
|
|
|
table_data.append([
|
|
group_name,
|
|
active_profiles_str,
|
|
policy_str,
|
|
rotation_rule_str,
|
|
reqs_left_str,
|
|
pending_downloads,
|
|
time_since_finish_str,
|
|
selection_priority_str
|
|
])
|
|
|
|
headers = ['Group Name', 'Active Profile(s)', 'Policy', 'Rotation Rule', 'Requests Left ↓', 'Pending DLs', 'Time Since Finish', 'Selection Priority']
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_activation_history_table(manager, file=sys.stdout):
|
|
"""Renders a table of the most recent profile activation events."""
|
|
if not manager:
|
|
return
|
|
|
|
# Fetch more events to ensure we have enough after filtering.
|
|
# The log is capped at 20 entries in Redis.
|
|
events = manager.get_activation_events(count=20)
|
|
|
|
# Filter out non-activation events and take the most recent 10.
|
|
filtered_events = [
|
|
e for e in events if e.get('reason') != 'Rest/Cooldown completed'
|
|
][:10]
|
|
|
|
if not filtered_events:
|
|
# Don't print the header if there's nothing to show.
|
|
return
|
|
|
|
print("\nRecent Profile Activations:", file=file)
|
|
table_data = []
|
|
for event in filtered_events:
|
|
ts = event.get('ts', 0)
|
|
time_str = format_timestamp(ts) if ts > 0 else "N/A"
|
|
|
|
table_data.append([
|
|
time_str,
|
|
event.get('profile', 'N/A'),
|
|
event.get('group', 'N/A'),
|
|
event.get('reason', 'N/A')
|
|
])
|
|
|
|
headers = ['Time', 'Profile', 'Group', 'Reason']
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid'), file=file)
|
|
|
|
|
|
def _render_profile_details_table(manager, args, simulation_type, profile_groups_config, file=sys.stdout):
|
|
"""Renders the detailed profile list table for a given manager."""
|
|
if not manager:
|
|
print("Manager not configured.", file=file)
|
|
return
|
|
|
|
profiles = manager.list_profiles(args.state, args.proxy)
|
|
if not profiles:
|
|
print("No profiles found matching the criteria.", file=file)
|
|
return
|
|
|
|
table_data = []
|
|
is_auth_sim = 'Auth' in simulation_type
|
|
|
|
for p in profiles:
|
|
rest_until_str = 'N/A'
|
|
last_rest_ts = p.get('last_rest_timestamp', 0)
|
|
last_rest_str = format_timestamp(last_rest_ts)
|
|
|
|
state_str = p.get('state', 'UNKNOWN')
|
|
|
|
if state_str in ['RESTING', 'COOLDOWN']:
|
|
rest_until = p.get('rest_until', 0)
|
|
if rest_until > 0:
|
|
remaining = rest_until - time.time()
|
|
if remaining > 0:
|
|
rest_until_str = f"in {format_duration(remaining)}"
|
|
else:
|
|
rest_until_str = "Ending now"
|
|
|
|
if last_rest_ts == 0:
|
|
last_rest_str = "NOW"
|
|
|
|
pending_dl_count = p.get('pending_downloads', 0)
|
|
if p.get('rest_reason') == 'waiting_downloads' and pending_dl_count > 0:
|
|
state_str += f"\n({pending_dl_count} DLs)"
|
|
|
|
countdown_str = 'N/A'
|
|
# Find the group this profile belongs to and get its rotation policy
|
|
profile_group = next((g for g in profile_groups_config if p['name'] in g.get('profiles_in_group', [])), None)
|
|
|
|
rotate_after = 0
|
|
if profile_group:
|
|
rotate_after = profile_group.get('rotate_after_requests')
|
|
elif args.rest_after_requests and args.rest_after_requests > 0:
|
|
rotate_after = args.rest_after_requests
|
|
|
|
if rotate_after > 0 and state_str != ProfileState.COOLDOWN.value:
|
|
total_reqs = (
|
|
p.get('success_count', 0) + p.get('failure_count', 0) +
|
|
p.get('tolerated_error_count', 0) +
|
|
p.get('download_count', 0) + p.get('download_error_count', 0)
|
|
)
|
|
remaining_reqs = rotate_after - total_reqs
|
|
countdown_str = str(max(0, int(remaining_reqs)))
|
|
|
|
if args.hide_active_state and state_str == 'ACTIVE':
|
|
state_str = ''
|
|
|
|
row = [
|
|
p.get('name', 'MISSING_NAME'),
|
|
p.get('proxy', 'MISSING_PROXY'),
|
|
state_str,
|
|
format_timestamp(p.get('last_used', 0)),
|
|
]
|
|
|
|
if is_auth_sim:
|
|
row.extend([
|
|
p.get('success_count', 0),
|
|
p.get('failure_count', 0),
|
|
p.get('tolerated_error_count', 0),
|
|
p.get('global_success_count', 0),
|
|
p.get('global_failure_count', 0),
|
|
])
|
|
else: # is_download_sim or unknown
|
|
row.extend([
|
|
p.get('download_count', 0),
|
|
p.get('download_error_count', 0),
|
|
p.get('tolerated_error_count', 0),
|
|
p.get('global_download_count', 0),
|
|
p.get('global_download_error_count', 0),
|
|
])
|
|
|
|
# Display generic 'reason' field as a fallback for 'rest_reason'
|
|
reason_str = p.get('rest_reason') or p.get('reason') or ''
|
|
row.extend([
|
|
countdown_str,
|
|
rest_until_str,
|
|
reason_str,
|
|
p.get('ban_reason') or '',
|
|
p.get('pending_downloads', 0)
|
|
])
|
|
table_data.append(row)
|
|
|
|
headers = ['Name', 'Proxy', 'State', 'Last Used']
|
|
|
|
if is_auth_sim:
|
|
headers.extend(['AuthOK', 'AuthFail', 'Skip.Err', 'Tot.AuthOK', 'Tot.AuthFail'])
|
|
else: # is_download_sim or unknown
|
|
headers.extend(['DataOK', 'DownFail', 'Skip.Err', 'Tot.DataOK', 'Tot.DownFail'])
|
|
|
|
headers.extend(['ReqCD ↓', 'RestCD ↓', 'R.Reason', 'B.Reason', 'Pend.DLs'])
|
|
|
|
# Using `maxcolwidths` to control column width for backward compatibility
|
|
# with older versions of the `tabulate` library. This prevents content
|
|
# from making columns excessively wide, but does not guarantee a fixed width.
|
|
maxwidths = None
|
|
if table_data or headers: # Check headers in case table_data is empty
|
|
# Transpose table to get columns, including headers.
|
|
# This handles empty table_data correctly.
|
|
columns = list(zip(*([headers] + table_data)))
|
|
# Calculate max width for each column based on its content.
|
|
maxwidths = [max(len(str(x)) for x in col) if col else 0 for col in columns]
|
|
|
|
# Enforce a minimum width for the reason columns to keep table width stable.
|
|
DEFAULT_REASON_WIDTH = 25
|
|
try:
|
|
r_reason_idx = headers.index('R.Reason')
|
|
b_reason_idx = headers.index('B.Reason')
|
|
maxwidths[r_reason_idx] = max(DEFAULT_REASON_WIDTH, maxwidths[r_reason_idx])
|
|
maxwidths[b_reason_idx] = max(DEFAULT_REASON_WIDTH, maxwidths[b_reason_idx])
|
|
except (ValueError, IndexError):
|
|
# This should not happen if headers are constructed as expected.
|
|
pass
|
|
|
|
print(tabulate(table_data, headers=headers, tablefmt='grid', maxcolwidths=maxwidths), file=file)
|
|
|
|
|
|
def _render_simulation_view(title, manager, args, file=sys.stdout):
|
|
"""Helper function to render the list of profiles for a single simulation environment."""
|
|
if not manager:
|
|
print(f"\n--- {title} (Environment Not Configured) ---", file=file)
|
|
return 0
|
|
|
|
if not tabulate:
|
|
print("'tabulate' library is required for table format. Please install it.", file=sys.stderr)
|
|
return 1
|
|
|
|
profiles = manager.list_profiles(args.state, args.proxy)
|
|
|
|
if args.format == 'json':
|
|
print(json.dumps(profiles, indent=2, default=str), file=file)
|
|
return 0
|
|
elif args.format == 'csv':
|
|
if profiles:
|
|
headers = profiles[0].keys()
|
|
print(','.join(headers), file=file)
|
|
for p in profiles:
|
|
print(','.join(str(p.get(h, '')) for h in headers), file=file)
|
|
return 0
|
|
|
|
# --- Table Format with Summaries ---
|
|
print(f"\n--- {title} ---", file=file)
|
|
|
|
if args.show_proxy_activity:
|
|
if args.proxy:
|
|
_render_proxy_activity_summary(manager, args.proxy, title, file=file)
|
|
else:
|
|
_render_all_proxies_activity_summary(manager, title, file=file)
|
|
|
|
profile_groups_config = _build_profile_groups_config(manager, profiles)
|
|
|
|
profile_selection_strategy = manager.get_config('profile_selection_strategy')
|
|
if profile_selection_strategy:
|
|
print(f"Profile Selection Strategy: {profile_selection_strategy}", file=file)
|
|
|
|
_render_profile_group_summary_table(manager, profiles, profile_groups_config, args, file=file)
|
|
|
|
failed_lock_attempts = manager.get_failed_lock_attempts()
|
|
global_stats = manager.get_global_stats()
|
|
per_proxy_stats = manager.get_per_proxy_stats()
|
|
|
|
unique_proxies = sorted(per_proxy_stats.keys())
|
|
proxy_states = manager.get_proxy_states(unique_proxies)
|
|
|
|
# Build global summary
|
|
total_reqs = global_stats['total_success'] + global_stats['total_failure']
|
|
success_rate = (global_stats['total_success'] / total_reqs * 100) if total_reqs > 0 else 100
|
|
global_summary_str = (
|
|
f"Total Requests: {total_reqs} | "
|
|
f"Success: {global_stats['total_success']} | "
|
|
f"Failure: {global_stats['total_failure']} | "
|
|
f"Tolerated Error: {global_stats['total_tolerated_error']} | "
|
|
f"Downloads: {global_stats['total_downloads']} | "
|
|
f"Download Errors: {global_stats.get('total_download_errors', 0)} | "
|
|
f"Success Rate: {success_rate:.2f}% | "
|
|
f"Failed Lock Attempts: {failed_lock_attempts}"
|
|
)
|
|
print("Global Stats:", global_summary_str, file=file)
|
|
|
|
# Build per-proxy summary
|
|
if per_proxy_stats:
|
|
print("\nPer-Proxy Stats:", file=file)
|
|
proxy_table_data = []
|
|
for proxy_url in unique_proxies:
|
|
stats = per_proxy_stats[proxy_url]
|
|
state_info = proxy_states.get(proxy_url, {})
|
|
state = state_info.get('state', 'ACTIVE')
|
|
|
|
cooldown_str = 'N/A'
|
|
if state == 'RESTING':
|
|
rest_until = state_info.get('rest_until', 0)
|
|
if rest_until > time.time():
|
|
cooldown_str = f"in {format_duration(rest_until - time.time())}"
|
|
else:
|
|
cooldown_str = "Ending now"
|
|
|
|
proxy_total_auth = stats['success'] + stats['failure']
|
|
proxy_total_downloads = stats['downloads'] + stats['download_errors']
|
|
proxy_total_reqs = proxy_total_auth + proxy_total_downloads
|
|
proxy_success_rate = (stats['success'] / proxy_total_auth * 100) if proxy_total_auth > 0 else 100
|
|
|
|
proxy_table_data.append([
|
|
proxy_url,
|
|
state,
|
|
cooldown_str,
|
|
stats['profiles'],
|
|
proxy_total_reqs,
|
|
stats['success'],
|
|
stats['failure'],
|
|
stats['tolerated_error'],
|
|
stats['downloads'],
|
|
stats['download_errors'],
|
|
f"{proxy_success_rate:.1f}%"
|
|
])
|
|
proxy_headers = ['Proxy', 'State', 'Cooldown', 'Profiles', 'Total Reqs', 'AuthOK', 'AuthFail', 'Skip.Err', 'DataOK', 'DownFail', 'OK %']
|
|
print(tabulate(proxy_table_data, headers=proxy_headers, tablefmt='grid'), file=file)
|
|
|
|
print("\nProfile Details:", file=file)
|
|
_render_profile_details_table(manager, args, title, profile_groups_config, file=file)
|
|
|
|
if args.show_activation_history:
|
|
_render_activation_history_table(manager, file=file)
|
|
return 0
|
|
|
|
|
|
def _render_merged_view(auth_manager, download_manager, args, file=sys.stdout):
|
|
"""Renders a merged, unified view for both auth and download simulations."""
|
|
# --- 1. Fetch ALL data first to prevent delays during rendering ---
|
|
auth_stats = auth_manager.get_global_stats()
|
|
auth_failed_locks = auth_manager.get_failed_lock_attempts()
|
|
dl_stats = download_manager.get_global_stats()
|
|
dl_failed_locks = download_manager.get_failed_lock_attempts()
|
|
|
|
auth_proxy_stats = auth_manager.get_per_proxy_stats()
|
|
dl_proxy_stats = download_manager.get_per_proxy_stats()
|
|
all_proxies = sorted(list(set(auth_proxy_stats.keys()) | set(dl_proxy_stats.keys())))
|
|
|
|
auth_proxy_states, dl_proxy_states = {}, {}
|
|
if all_proxies:
|
|
auth_proxy_states = auth_manager.get_proxy_states(all_proxies)
|
|
dl_proxy_states = download_manager.get_proxy_states(all_proxies)
|
|
|
|
auth_profiles = auth_manager.list_profiles(args.state, args.proxy)
|
|
auth_groups_config = _build_profile_groups_config(auth_manager, auth_profiles)
|
|
|
|
dl_profiles = download_manager.list_profiles(args.state, args.proxy)
|
|
dl_groups_config = _build_profile_groups_config(download_manager, dl_profiles)
|
|
|
|
# --- 2. Prepare all display data using fetched information ---
|
|
total_reqs = auth_stats['total_success'] + auth_stats['total_failure']
|
|
success_rate = (auth_stats['total_success'] / total_reqs * 100) if total_reqs > 0 else 100
|
|
|
|
total_dls = dl_stats['total_downloads'] + dl_stats['total_download_errors']
|
|
dl_success_rate = (dl_stats['total_downloads'] / total_dls * 100) if total_dls > 0 else 100
|
|
|
|
global_summary_str = (
|
|
f"Auth: {total_reqs} reqs ({auth_stats['total_success']} OK, {auth_stats['total_failure']} Fail, {auth_stats['total_tolerated_error']} Tol.Err) | "
|
|
f"OK Rate: {success_rate:.2f}% | "
|
|
f"Failed Locks: {auth_failed_locks} || "
|
|
f"Download: {total_dls} attempts ({dl_stats['total_downloads']} OK, {dl_stats['total_download_errors']} Fail) | "
|
|
f"OK Rate: {dl_success_rate:.2f}% | "
|
|
f"Failed Locks: {dl_failed_locks}"
|
|
)
|
|
|
|
proxy_table_data = []
|
|
if all_proxies:
|
|
for proxy in all_proxies:
|
|
astats = auth_proxy_stats.get(proxy, {})
|
|
dstats = dl_proxy_stats.get(proxy, {})
|
|
astate = auth_proxy_states.get(proxy, {})
|
|
dstate = dl_proxy_states.get(proxy, {})
|
|
|
|
state_str = f"{astate.get('state', 'N/A')} / {dstate.get('state', 'N/A')}"
|
|
|
|
proxy_table_data.append([
|
|
proxy,
|
|
state_str,
|
|
astats.get('profiles', 0),
|
|
dstats.get('profiles', 0),
|
|
astats.get('success', 0),
|
|
astats.get('failure', 0),
|
|
astats.get('tolerated_error', 0),
|
|
dstats.get('downloads', 0),
|
|
dstats.get('download_errors', 0),
|
|
dstats.get('tolerated_error', 0)
|
|
])
|
|
|
|
# --- 3. Render everything to the buffer at once ---
|
|
print("--- Global Simulation Stats ---", file=file)
|
|
print(global_summary_str, file=file)
|
|
|
|
if args.show_proxy_activity:
|
|
if args.proxy:
|
|
_render_proxy_activity_summary(auth_manager, args.proxy, "Auth", file=file)
|
|
_render_proxy_activity_summary(download_manager, args.proxy, "Download", file=file)
|
|
else:
|
|
# In merged view, it makes sense to show both summaries if requested.
|
|
_render_all_proxies_activity_summary(auth_manager, "Auth", file=file)
|
|
_render_all_proxies_activity_summary(download_manager, "Download", file=file)
|
|
|
|
if all_proxies:
|
|
print("\n--- Per-Proxy Stats (Merged) ---", file=file)
|
|
proxy_headers = ['Proxy', 'State (A/D)', 'Profiles (A)', 'Profiles (D)', 'AuthOK', 'AuthFail', 'Skip.Err(A)', 'DataOK', 'DownFail', 'Skip.Err(D)']
|
|
print(tabulate(proxy_table_data, headers=proxy_headers, tablefmt='grid'), file=file)
|
|
|
|
print(f"\n--- Auth Simulation Profile Details ({args.auth_env}) ---", file=file)
|
|
profile_selection_strategy = auth_manager.get_config('profile_selection_strategy')
|
|
if profile_selection_strategy:
|
|
print(f"Profile Selection Strategy: {profile_selection_strategy}", file=file)
|
|
_render_profile_group_summary_table(auth_manager, auth_profiles, auth_groups_config, args, file=file)
|
|
_render_profile_details_table(auth_manager, args, "Auth", auth_groups_config, file=file)
|
|
if args.show_activation_history:
|
|
_render_activation_history_table(auth_manager, file=file)
|
|
|
|
print(f"\n--- Download Simulation Profile Details ({args.download_env}) ---", file=file)
|
|
_render_profile_group_summary_table(download_manager, dl_profiles, dl_groups_config, args, file=file)
|
|
_render_profile_details_table(download_manager, args, "Download", dl_groups_config, file=file)
|
|
if args.show_activation_history:
|
|
_render_activation_history_table(download_manager, file=file)
|
|
|
|
return 0
|
|
|
|
|
|
def _print_profile_list(manager, args, title="Profile Status"):
|
|
"""Helper function to print the list of profiles in the desired format."""
|
|
return _render_simulation_view(title, manager, args, file=sys.stdout)
|
|
|
|
|
|
def main_profile_manager(args):
|
|
"""Main dispatcher for 'profile' command."""
|
|
if load_dotenv:
|
|
env_file = args.env_file
|
|
if not env_file and args.env and '.env' in args.env and os.path.exists(args.env):
|
|
print(f"Warning: --env should be an environment name (e.g., 'dev'), not a file path. Treating '{args.env}' as --env-file. The environment name will default to 'dev'.", file=sys.stderr)
|
|
env_file = args.env
|
|
args.env = 'dev'
|
|
|
|
was_loaded = load_dotenv(env_file)
|
|
if was_loaded:
|
|
print(f"Loaded environment variables from {env_file or '.env file'}", file=sys.stderr)
|
|
elif args.env_file:
|
|
print(f"ERROR: The specified --env-file was not found: {args.env_file}", file=sys.stderr)
|
|
return 1
|
|
|
|
if args.redis_host is None:
|
|
args.redis_host = os.getenv('REDIS_HOST', os.getenv('MASTER_HOST_IP', 'localhost'))
|
|
if args.redis_port is None:
|
|
args.redis_port = int(os.getenv('REDIS_PORT', 6379))
|
|
if getattr(args, 'redis_db', None) is None:
|
|
args.redis_db = int(os.getenv('REDIS_DB', 0))
|
|
if args.redis_password is None:
|
|
args.redis_password = os.getenv('REDIS_PASSWORD')
|
|
|
|
if args.verbose:
|
|
logging.getLogger().setLevel(logging.DEBUG)
|
|
|
|
if args.key_prefix:
|
|
key_prefix = args.key_prefix
|
|
elif args.legacy:
|
|
key_prefix = 'profile_mgmt_'
|
|
else:
|
|
key_prefix = f"{args.env}_profile_mgmt_"
|
|
|
|
manager = ProfileManager(
|
|
redis_host=args.redis_host,
|
|
redis_port=args.redis_port,
|
|
redis_password=args.redis_password,
|
|
key_prefix=key_prefix,
|
|
redis_db=args.redis_db
|
|
)
|
|
|
|
if args.profile_command == 'create':
|
|
success = manager.create_profile(args.name, args.proxy, args.state)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'list':
|
|
is_dual_mode = args.auth_env and args.download_env
|
|
|
|
def _create_manager(env_name, is_for_dual_mode):
|
|
if not env_name:
|
|
return None
|
|
|
|
# For dual mode, we ignore --legacy and --key-prefix from CLI, and derive from env name.
|
|
# This is opinionated but makes dual-mode behavior predictable.
|
|
if is_for_dual_mode:
|
|
key_prefix = f"{env_name}_profile_mgmt_"
|
|
else:
|
|
# Single mode respects all CLI flags
|
|
if args.key_prefix:
|
|
key_prefix = args.key_prefix
|
|
elif args.legacy:
|
|
key_prefix = 'profile_mgmt_'
|
|
else:
|
|
key_prefix = f"{env_name}_profile_mgmt_"
|
|
|
|
return ProfileManager(
|
|
redis_host=args.redis_host, redis_port=args.redis_port,
|
|
redis_password=args.redis_password, key_prefix=key_prefix,
|
|
redis_db=args.redis_db
|
|
)
|
|
|
|
if not args.live:
|
|
if is_dual_mode and not args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
return _render_merged_view(auth_manager, download_manager, args)
|
|
elif is_dual_mode and args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
_render_simulation_view(f"Auth Simulation ({args.auth_env})", auth_manager, args)
|
|
_render_simulation_view(f"Download Simulation ({args.download_env})", download_manager, args)
|
|
return 0
|
|
else:
|
|
# Single view mode
|
|
single_env = args.auth_env or args.download_env or args.env
|
|
manager = _create_manager(single_env, is_for_dual_mode=False)
|
|
# Determine the title for correct table headers
|
|
title = f"Profile Status ({single_env})"
|
|
if args.auth_env:
|
|
title = f"Auth Simulation ({args.auth_env})"
|
|
elif args.download_env:
|
|
title = f"Download Simulation ({args.download_env})"
|
|
return _print_profile_list(manager, args, title=title)
|
|
|
|
# --- Live Mode ---
|
|
pm_logger = logging.getLogger(__name__)
|
|
original_log_level = pm_logger.level
|
|
try:
|
|
if args.no_blink:
|
|
sys.stdout.write('\033[?25l') # Hide cursor
|
|
sys.stdout.flush()
|
|
|
|
# Register signal handlers for graceful shutdown in live mode
|
|
signal.signal(signal.SIGINT, handle_shutdown)
|
|
signal.signal(signal.SIGTERM, handle_shutdown)
|
|
|
|
while not shutdown_event.is_set():
|
|
pm_logger.setLevel(logging.WARNING) # Suppress connection logs for cleaner UI
|
|
start_time = time.time()
|
|
|
|
output_buffer = io.StringIO()
|
|
print(f"--- Profile Status (auto-refreshing every {args.interval_seconds}s, Ctrl+C to exit) | Last updated: {datetime.now().strftime('%H:%M:%S')} ---", file=output_buffer)
|
|
|
|
if is_dual_mode and not args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
_render_merged_view(auth_manager, download_manager, args, file=output_buffer)
|
|
elif is_dual_mode and args.separate_views:
|
|
auth_manager = _create_manager(args.auth_env, is_for_dual_mode=True)
|
|
download_manager = _create_manager(args.download_env, is_for_dual_mode=True)
|
|
_render_simulation_view(f"Auth Simulation ({args.auth_env})", auth_manager, args, file=output_buffer)
|
|
_render_simulation_view(f"Download Simulation ({args.download_env})", download_manager, args, file=output_buffer)
|
|
else:
|
|
# Single view mode
|
|
single_env = args.auth_env or args.download_env or args.env
|
|
manager = _create_manager(single_env, is_for_dual_mode=False)
|
|
# Determine the title for correct table headers
|
|
title = f"Profile Status ({single_env})"
|
|
if args.auth_env:
|
|
title = f"Auth Simulation ({args.auth_env})"
|
|
elif args.download_env:
|
|
title = f"Download Simulation ({args.download_env})"
|
|
_render_simulation_view(title, manager, args, file=output_buffer)
|
|
|
|
pm_logger.setLevel(original_log_level) # Restore log level
|
|
fetch_and_render_duration = time.time() - start_time
|
|
|
|
if args.no_blink:
|
|
sys.stdout.write('\033[2J\033[H') # Clear screen, move to top
|
|
else:
|
|
os.system('cls' if os.name == 'nt' else 'clear')
|
|
|
|
sys.stdout.write(output_buffer.getvalue())
|
|
sys.stdout.flush()
|
|
|
|
# --- Adaptive Countdown ---
|
|
remaining_sleep = args.interval_seconds - fetch_and_render_duration
|
|
|
|
if remaining_sleep > 0:
|
|
end_time = time.time() + remaining_sleep
|
|
while time.time() < end_time and not shutdown_event.is_set():
|
|
time_left = end_time - time.time()
|
|
sys.stdout.write(f"\rRefreshing in {int(time_left)}s... (fetch took {fetch_and_render_duration:.2f}s) ")
|
|
sys.stdout.flush()
|
|
time.sleep(min(1, time_left if time_left > 0 else 1))
|
|
elif not shutdown_event.is_set():
|
|
sys.stdout.write(f"\rRefreshing now... (fetch took {fetch_and_render_duration:.2f}s, behind by {-remaining_sleep:.2f}s) ")
|
|
sys.stdout.flush()
|
|
time.sleep(0.5) # Brief pause to make message readable
|
|
|
|
sys.stdout.write("\r" + " " * 80 + "\r") # Clear line
|
|
sys.stdout.flush()
|
|
|
|
except KeyboardInterrupt:
|
|
# This can be triggered by Ctrl+C during a time.sleep().
|
|
# The signal handler will have already set the shutdown_event and printed a message.
|
|
# This block is a fallback.
|
|
if not shutdown_event.is_set():
|
|
print("\nKeyboardInterrupt received. Stopping live view...", file=sys.stderr)
|
|
shutdown_event.set() # Ensure event is set if handler didn't run
|
|
return 0
|
|
finally:
|
|
pm_logger.setLevel(original_log_level)
|
|
if args.live and args.no_blink:
|
|
sys.stdout.write('\033[?25h') # Restore cursor
|
|
sys.stdout.flush()
|
|
|
|
elif args.profile_command == 'set-proxy-state':
|
|
success = manager.set_proxy_state(args.proxy_url, args.state, args.duration_minutes)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'get':
|
|
profile = manager.get_profile(args.name)
|
|
if not profile:
|
|
print(f"Profile '{args.name}' not found")
|
|
return 1
|
|
|
|
print(f"Profile: {profile['name']}")
|
|
print(f"Proxy: {profile['proxy']}")
|
|
print(f"State: {profile['state']}")
|
|
print(f"Created: {format_timestamp(profile['created_at'])}")
|
|
print(f"Last Used: {format_timestamp(profile['last_used'])}")
|
|
print(f"Success Count: {profile['success_count']}")
|
|
print(f"Failure Count: {profile['failure_count']}")
|
|
|
|
if profile.get('rest_until', 0) > 0:
|
|
remaining = profile['rest_until'] - time.time()
|
|
if remaining > 0:
|
|
print(f"Resting for: {format_duration(remaining)} more")
|
|
else:
|
|
print(f"Rest period ended: {format_timestamp(profile['rest_until'])}")
|
|
|
|
if profile.get('ban_reason'):
|
|
print(f"Ban Reason: {profile['ban_reason']}")
|
|
|
|
if profile.get('lock_timestamp', 0) > 0:
|
|
print(f"Locked since: {format_timestamp(profile['lock_timestamp'])}")
|
|
print(f"Lock Owner: {profile['lock_owner']}")
|
|
|
|
if profile.get('notes'):
|
|
print(f"Notes: {profile['notes']}")
|
|
return 0
|
|
|
|
elif args.profile_command == 'update-state':
|
|
success = manager.update_profile_state(args.name, args.state, args.reason or '')
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'update-field':
|
|
success = manager.update_profile_field(args.name, args.field, args.value)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'pause':
|
|
success = manager.update_profile_state(args.name, ProfileState.PAUSED.value, 'Manual pause')
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'activate':
|
|
success = manager.update_profile_state(args.name, ProfileState.ACTIVE.value, 'Manual activation')
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'ban':
|
|
success = manager.update_profile_state(args.name, ProfileState.BANNED.value, args.reason)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'unban':
|
|
# First activate, then reset session counters. The ban reason is cleared by update_profile_state.
|
|
success = manager.update_profile_state(args.name, ProfileState.ACTIVE.value, 'Manual unban')
|
|
if success:
|
|
manager.reset_profile_counters(args.name)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'delete':
|
|
if not args.confirm:
|
|
print("Error: --confirm flag is required for deletion", file=sys.stderr)
|
|
return 1
|
|
success = manager.delete_profile(args.name)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'delete-all':
|
|
if not args.confirm:
|
|
print("Error: --confirm flag is required for this destructive action.", file=sys.stderr)
|
|
return 1
|
|
deleted_count = manager.delete_all_data()
|
|
print(f"Deleted {deleted_count} key(s) with prefix '{manager.key_prefix}'.")
|
|
return 0
|
|
|
|
elif args.profile_command == 'reset-global-counters':
|
|
manager.reset_global_counters()
|
|
return 0
|
|
|
|
elif args.profile_command == 'reset-counters':
|
|
profiles_to_reset = []
|
|
if args.profile_name:
|
|
profile = manager.get_profile(args.profile_name)
|
|
if profile:
|
|
profiles_to_reset.append(profile)
|
|
elif args.proxy_url:
|
|
profiles_to_reset = manager.list_profiles(proxy_filter=args.proxy_url)
|
|
elif args.all_profiles:
|
|
profiles_to_reset = manager.list_profiles()
|
|
|
|
if not profiles_to_reset:
|
|
print("No profiles found to reset.", file=sys.stderr)
|
|
return 1
|
|
|
|
print(f"Found {len(profiles_to_reset)} profile(s) to reset. This action is not reversible.")
|
|
confirm = input("Continue? (y/N): ")
|
|
if confirm.lower() != 'y':
|
|
print("Aborted.")
|
|
return 1
|
|
|
|
success_count = 0
|
|
for profile in profiles_to_reset:
|
|
if manager.reset_profile_counters(profile['name']):
|
|
success_count += 1
|
|
|
|
print(f"Successfully reset session counters for {success_count} profile(s).")
|
|
return 0
|
|
|
|
elif args.profile_command == 'record-activity':
|
|
success = manager.record_activity(args.name, args.type, args.timestamp)
|
|
return 0 if success else 1
|
|
|
|
elif args.profile_command == 'get-rate':
|
|
rate = manager.get_activity_rate(args.name, args.type, args.window)
|
|
print(f"{args.type.capitalize()} rate for '{args.name}' over {args.window}s: {rate}")
|
|
return 0
|
|
|
|
return 1 # Should not be reached
|