Initial commit: Crypto trader application

This commit is contained in:
2025-12-25 20:20:40 -05:00
commit 07a04c1bb8
47895 changed files with 2042266 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
-- Migration: Add running column to strategies table
-- This separates "running manually" state from "enabled for Autopilot" state
ALTER TABLE strategies ADD COLUMN IF NOT EXISTS running BOOLEAN DEFAULT FALSE;
-- Comment explaining the difference
COMMENT ON COLUMN strategies.enabled IS 'Available to Autopilot with custom params';
COMMENT ON COLUMN strategies.running IS 'Currently running manually (bypasses Autopilot)';

153
scripts/fetch_historical_data.py Executable file
View File

@@ -0,0 +1,153 @@
#!/usr/bin/env python3
"""Utility script to fetch and store historical OHLCV data from Binance public API.
This script uses the PublicDataAdapter to fetch historical market data without
requiring API keys. Perfect for populating your database with historical data
for backtesting and analysis.
Usage:
python scripts/fetch_historical_data.py --symbol BTC/USDT --timeframe 1h --days 30
python scripts/fetch_historical_data.py --symbol ETH/USDT --timeframe 1d --days 365
"""
import sys
import argparse
from datetime import datetime, timedelta
from pathlib import Path
# Add src to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from src.exchanges.public_data import PublicDataAdapter
from src.data.collector import get_data_collector
from src.core.logger import setup_logging, get_logger
setup_logging()
logger = get_logger(__name__)
def fetch_historical_data(
symbol: str,
timeframe: str,
days: int,
exchange_name: str = "Binance Public"
) -> int:
"""Fetch and store historical OHLCV data.
Args:
symbol: Trading symbol (e.g., 'BTC/USDT')
timeframe: Timeframe (e.g., '1h', '1d', '4h')
days: Number of days of historical data to fetch
exchange_name: Exchange name for storage
Returns:
Number of candles stored
"""
logger.info(f"Fetching {days} days of {timeframe} data for {symbol}")
# Create public data adapter
adapter = PublicDataAdapter()
if not adapter.connect():
logger.error("Failed to connect to Binance public API")
return 0
# Calculate start date
end_date = datetime.utcnow()
start_date = end_date - timedelta(days=days)
# Fetch data in chunks (Binance limit is 1000 candles per request)
collector = get_data_collector()
total_candles = 0
current_date = start_date
chunk_days = 30 # Fetch 30 days at a time to stay under 1000 candle limit
while current_date < end_date:
chunk_end = min(current_date + timedelta(days=chunk_days), end_date)
logger.info(f"Fetching data from {current_date.date()} to {chunk_end.date()}")
# Fetch OHLCV data
ohlcv = adapter.get_ohlcv(
symbol=symbol,
timeframe=timeframe,
since=current_date,
limit=1000
)
if ohlcv:
# Store in database
collector.store_ohlcv(exchange_name, symbol, timeframe, ohlcv)
total_candles += len(ohlcv)
logger.info(f"Stored {len(ohlcv)} candles (total: {total_candles})")
else:
logger.warning(f"No data returned for period {current_date} to {chunk_end}")
# Move to next chunk
current_date = chunk_end
# Small delay to respect rate limits
import time
time.sleep(1)
adapter.disconnect()
logger.info(f"Completed! Total candles stored: {total_candles}")
return total_candles
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Fetch historical OHLCV data from Binance public API"
)
parser.add_argument(
'--symbol',
type=str,
default='BTC/USDT',
help='Trading symbol (e.g., BTC/USDT, ETH/USDT)'
)
parser.add_argument(
'--timeframe',
type=str,
default='1h',
choices=['1m', '5m', '15m', '30m', '1h', '4h', '1d', '1w'],
help='Timeframe for candles'
)
parser.add_argument(
'--days',
type=int,
default=30,
help='Number of days of historical data to fetch'
)
parser.add_argument(
'--exchange',
type=str,
default='Binance Public',
help='Exchange name for storage (default: Binance Public)'
)
args = parser.parse_args()
try:
count = fetch_historical_data(
symbol=args.symbol,
timeframe=args.timeframe,
days=args.days,
exchange_name=args.exchange
)
print(f"\n✓ Successfully fetched and stored {count} candles")
print(f" Symbol: {args.symbol}")
print(f" Timeframe: {args.timeframe}")
print(f" Period: {args.days} days")
return 0
except KeyboardInterrupt:
print("\n\nInterrupted by user")
return 1
except Exception as e:
logger.error(f"Error: {e}", exc_info=True)
print(f"\n✗ Error: {e}")
return 1
if __name__ == '__main__':
sys.exit(main())

68
scripts/reset_database.py Executable file
View File

@@ -0,0 +1,68 @@
#!/usr/bin/env python3
"""Reset database to a fresh state by dropping all tables and recreating them."""
import asyncio
import sys
from pathlib import Path
# Add project root to path
project_root = Path(__file__).parent.parent
if str(project_root) not in sys.path:
sys.path.insert(0, str(project_root))
from sqlalchemy import text
from src.core.database import Base, Database
from src.core.config import get_config
async def reset_database():
"""Reset database by dropping all tables and recreating them."""
config = get_config()
db_type = config.get("database.type", "postgresql")
print(f"Resetting {db_type} database...")
# Create database instance to get engine
db = Database()
try:
# Drop all tables
# For PostgreSQL, we need to handle foreign key constraints
print("Dropping all tables...")
async with db.engine.begin() as conn:
# Disable multiple statements in one call caution, but reset logic usually needs strict control.
# However, asyncpg doesn't support "SET session_replication_role" easily within a transaction block
# if it's not a superuser or specific config.
# Instead, we will use CASCADE drop which is cleaner for full reset.
# Use reflection to find tables is harder in async.
# We will just drop all tables using CASCADE via raw SQL or metadata.
# Since we are using async engine, we need to utilize run_sync for metadata operations
# But drop_all doesn't support CASCADE automatically for all dialects in the way we might want
# if using pure SQLAlchemy metadata.drop_all.
# Let's try the standard metadata.drop_all first
await conn.run_sync(Base.metadata.drop_all)
print("Dropped all tables.")
# Recreate all tables
print("Recreating all tables...")
await db.create_tables()
print("Database reset complete!")
except Exception as e:
print(f"Error during reset: {e}")
raise
finally:
# Close database connection
await db.close()
if __name__ == "__main__":
try:
asyncio.run(reset_database())
except Exception as e:
print(f"Error resetting database: {e}", file=sys.stderr)
sys.exit(1)

52
scripts/start_all.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Start Redis
echo "Starting Redis..."
if sudo service redis-server start; then
echo "✓ Redis started"
else
echo "x Failed to start Redis"
exit 1
fi
# Activate virtual environment if it exists
if [ -d "venv" ]; then
source venv/bin/activate
fi
# Start Celery Worker
echo "Starting Celery Worker..."
# Check for existing celery process
if pgrep -f "celery worker" > /dev/null; then
echo "! Celery is already running"
else
nohup celery -A src.worker.app worker --loglevel=info > celery.log 2>&1 &
echo "✓ Celery worker started"
fi
# Start Backend
echo "Starting Backend API..."
if pgrep -f "uvicorn backend.main:app" > /dev/null; then
echo "! Backend is already running"
else
nohup uvicorn backend.main:app --reload --host 0.0.0.0 --port 8000 > backend.log 2>&1 &
echo "✓ Backend API started"
fi
# Start Frontend
echo "Starting Frontend..."
if pgrep -f "vite" > /dev/null; then
echo "! Frontend is already running"
else
cd frontend
nohup npm run dev > ../frontend.log 2>&1 &
cd ..
echo "✓ Frontend started"
fi
echo "-----------------------------------"
echo "All services are running!"
echo "Logs:"
echo " - Celery: tail -f celery.log"
echo " - Backend: tail -f backend.log"
echo " - Frontend: tail -f frontend.log"

53
scripts/verify_redis.py Normal file
View File

@@ -0,0 +1,53 @@
"""Verify Redis and Celery integration."""
import asyncio
import os
import sys
# Add project root to path
sys.path.insert(0, os.getcwd())
from src.core.redis import get_redis_client
from src.worker.app import app
from src.worker.tasks import train_model_task
async def verify_redis():
"""Verify Redis connection."""
print("Verifying Redis connection...")
try:
redis = get_redis_client()
client = redis.get_client()
await client.set("test_key", "hello_redis")
value = await client.get("test_key")
print(f"Redis write/read success: {value}")
await client.delete("test_key")
await redis.close()
return True
except Exception as e:
print(f"Redis verification failed: {e}")
return False
def verify_celery_task_queuing():
"""Verify Celery task queuing."""
print("\nVerifying Celery task queuing...")
try:
# Submit task (won't run unless worker is active, but we check queuing)
task = train_model_task.delay(force_retrain=False, bootstrap=False)
print(f"Task submitted. ID: {task.id}")
print(f"Task status: {task.status}")
return True
except Exception as e:
print(f"Celery task submission failed: {e}")
return False
async def main():
redis_ok = await verify_redis()
celery_ok = verify_celery_task_queuing()
if redis_ok and celery_ok:
print("\nSUCCESS: Redis and Celery integration verified.")
else:
print("\nFAILURE: One or more components failed verification.")
if __name__ == "__main__":
asyncio.run(main())