diff --git a/.coverage b/.coverage new file mode 100644 index 0000000..7e57e93 Binary files /dev/null and b/.coverage differ diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..727292b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,34 @@ +__pycache__ +*.pyc +*.pyo +*.pyd +.Python +env/ +venv/ +.venv/ +pip-log.txt +pip-delete-this-directory.txt +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.log +.git +.mypy_cache +.pytest_cache +.hypothesis +local_settings.py +db.sqlite3 +data/ +outputs/ +tests/ +docs/ +.env +*.md +notebooks/ +.github/ +Dockerfile +docker-compose.yml diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..a253695 --- /dev/null +++ b/.env.example @@ -0,0 +1,12 @@ +# API Keys +FINNHUB_API_KEY=your_finnhub_key_here + +# Data Settings +DATA_DIR=./data +LOG_DIR=./logs + +# Optional Email Configuration +EMAIL_HOST=smtp.gmail.com +EMAIL_PORT=587 +EMAIL_USER=your_email@gmail.com +EMAIL_PASSWORD=your_app_password diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..1dfb3aa --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,83 @@ +name: CI Pipeline + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 + - name: Lint with flake8 + # Stop the build if there are Python syntax errors or undefined names + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + run: | + flake8 src/ --count --select=E9,F63,F7,F82 --show-source --statistics + flake8 src/ --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + # Add caching + - name: Cache Poetry dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pypoetry + key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} + + # Install Poetry + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - + echo "$HOME/.local/bin" >> $GITHUB_PATH + # Install Project Dependencies + - name: Install dependencies + run: poetry install --no-interaction + # Run Tests + - name: Test with pytest + run: poetry run pytest --cov=src --cov-report=xml + env: + FINNHUB_API_KEY: ${{ secrets.FINNHUB_API_KEY }} + # Mock value for tests if secret not present (tests should mock API calls anyway) + FINNHUB_API_KEY_FALLBACK: "test_key" + + docker-build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Build Docker image + run: docker build -t project_alpha:ci . + - name: Test Docker image help + run: docker run --rm project_alpha:ci --help + + audit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Security Scan with Bandit + # Using simple pip install for bandit + run: | + pip install bandit + bandit -r src/ -f json -o bandit_report.json || true + - name: Upload Security Report + uses: actions/upload-artifact@v4 + with: + name: bandit-report + path: bandit_report.json diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..cb5d56a --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,81 @@ +name: Build and Deploy + +on: + push: + tags: + - 'v*' + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - + echo "$HOME/.local/bin" >> $GITHUB_PATH + - name: Install dependencies + run: poetry install --no-interaction + - name: Run tests + run: poetry run pytest + env: + FINNHUB_API_KEY: "test_key" + + build-and-push: + needs: test + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + deploy: + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Deploy to VPS + uses: appleboy/ssh-action@v1.0.3 + with: + host: ${{ secrets.HOST }} + username: ${{ secrets.USERNAME }} + key: ${{ secrets.KEY }} + script: | + cd project_alpha + # Pull the new image + docker compose pull + # Restart containers with new image + docker compose up -d + # Prune unused images to save space + docker image prune -f diff --git a/.gitignore b/.gitignore index 815b7a7..b8892ff 100755 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ plots/ logs/ data/ email_config.json +tests/*_output* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..e157c6d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,58 @@ +# Stage 1: Builder +FROM python:3.10-slim AS builder + +WORKDIR /app + +# Install build dependencies +RUN apt-get update && apt-get install -y \ + gcc \ + python3-dev \ + libpq-dev \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Install Poetry +RUN curl -sSL https://install.python-poetry.org | python3 - +ENV PATH="/root/.local/bin:$PATH" + +# Copy dependency files +COPY pyproject.toml poetry.lock ./ + +# Install export plugin and export dependencies +RUN poetry self add poetry-plugin-export && poetry export -f requirements.txt --output requirements.txt --without-hashes + +# Stage 2: Production +FROM python:3.10-slim + +WORKDIR /app + +# Install runtime dependencies (e.g. libpq for psycopg2 if needed) +RUN apt-get update && apt-get install -y \ + libpq5 \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements from builder +COPY --from=builder /app/requirements.txt . + +# Install python dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy source code +COPY src/ ./src/ +COPY pyproject.toml ./ + +# Default environment variables +ENV PYTHONUNBUFFERED=1 +ENV PYTHONPATH=/app/src + +# Create a non-root user +RUN useradd -m appuser && chown -R appuser:appuser /app +USER appuser + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD python -c "import sys; sys.exit(0)" + +# Entrypoint +ENTRYPOINT ["python", "src/project_alpha.py"] +CMD ["--help"] diff --git a/README.md b/README.md index d7cff7a..5644329 100644 --- a/README.md +++ b/README.md @@ -1,66 +1,241 @@ -# Project Alpha +# ๐Ÿš€ Project Alpha -Project Alpha is a collection of Python tools for scanning and analysing -stock markets. It fetches historical prices, trains a volatility model, and -runs a suite of technical screeners. The project can work with both US and -Indian equities and stores data either as pickle files or in a SQLite -cache. +**Your day-to-day trading companion** โ€” a comprehensive stock market analysis and screening toolkit for US and Indian equities. -## Installation +Project Alpha fetches historical price data, trains Bayesian volatility models, runs a suite of pluggable technical screeners, and validates strategies through backtesting โ€” all from a single CLI. + +--- + +## โœจ Features + +| Category | Capabilities | +|----------|-------------| +| **Volatility Analysis** | Hierarchical Bayesian models (TensorFlow Probability) for trend estimation, growth scoring, and stock clustering | +| **Technical Screeners** | Breakout, Trendline, Moving Average, MACD, Donchian โ€” pluggable via `BaseScreener` ABC & Registry | +| **Consensus Scoring** | Weighted multi-signal aggregation with synergy bonuses across screeners and filters | +| **AI-Powered Filters** | Fundamental health checks (Finnhub API) + NLP sentiment analysis (FinBERT) | +| **Regime Detection** | Hidden Markov Model classifying Bull / Bear / Sideways market states | +| **Backtesting** | Strategy validation with ATR-based risk management, position sizing, and transaction cost modeling | +| **Walk-Forward Validation** | Anchored expanding windows with overfitting detection (Sharpe degradation) | +| **Multi-Market** | US (S&P 500, NASDAQ, Dow Jones) and India (NSE 500/50/100) | +| **Multi-Provider** | Data from yfinance or Polygon.io with SQLite / pickle caching | +| **Rich Output** | Interactive terminal tables, SVG/candlestick charts, CSV/JSON exports, email reports with PDF attachments | + +--- + +## ๐Ÿ“ฆ Installation + +### Prerequisites +- Python โ‰ฅ3.12, <3.14 +- [Poetry](https://python-poetry.org/) 2.0+ + +### Setup ```bash +# Clone the repository +git clone https://github.com/your-org/project-alpha.git +cd project-alpha + +# Install dependencies pip install --user poetry poetry install + +# Activate the virtual environment +poetry shell + +# (Optional) Copy and configure environment variables +cp .env.example .env +# Edit .env with your API keys (Finnhub, Polygon, email settings) ``` -Poetry creates an isolated virtual environment for the project. Activate it with: +--- + +## ๐Ÿš€ Quick Start ```bash -poetry shell +# Analyze US market with all screeners (default) +python src/project_alpha.py + +# Analyze Indian market +python src/project_alpha.py --market india + +# Run specific screeners only +python src/project_alpha.py --screeners breakout,trend + +# Top 20 stocks, JSON output +python src/project_alpha.py --top 20 --format json + +# Filter by price range +python src/project_alpha.py --min-price 10 --max-price 500 + +# Analyze specific symbols +python src/project_alpha.py -s AAPL -s MSFT -s GOOGL +``` + +### Advanced Features + +```bash +# Enable AI filters (requires API keys / model download) +python src/project_alpha.py --fundamental --sentiment --consensus + +# Market regime detection +python src/project_alpha.py --regime-detection --regime-index SPY + +# Backtest a strategy +python src/project_alpha.py --backtest --screeners breakout --initial-capital 50000 + +# Walk-forward validation (overfitting detection) +python src/project_alpha.py --walk-forward --wf-train-months 12 --wf-test-months 3 -s AAPL + +# Customize risk parameters +python src/project_alpha.py --risk-per-trade 0.02 --atr-multiplier 2.5 --max-positions 5 + +# Verbose debug output +python src/project_alpha.py -v --log-level DEBUG + +# Quiet mode with JSON logs (for pipelines) +python src/project_alpha.py -q --json-logs +``` + +Run `python src/project_alpha.py --help` for the full CLI reference. + +--- + +## ๐Ÿ“ Project Structure + +``` +project_alpha/ +โ”œโ”€โ”€ src/ +โ”‚ โ”œโ”€โ”€ project_alpha.py # CLI entry point (rich-click) +โ”‚ โ”œโ”€โ”€ config/ # Pydantic Settings + YAML defaults +โ”‚ โ””โ”€โ”€ classes/ +โ”‚ โ”œโ”€โ”€ analysis/ # VolatileAnalyzer, TrendAnalyzer, RegimeDetector +โ”‚ โ”œโ”€โ”€ screeners/ # BaseScreener ABC, Registry, ConsensusEngine +โ”‚ โ”œโ”€โ”€ filters/ # FundamentalFilter, SentimentFilter +โ”‚ โ”œโ”€โ”€ risk/ # RiskManager, TransactionCosts +โ”‚ โ”œโ”€โ”€ backtesting/ # BacktestEngine, WalkForwardValidator +โ”‚ โ”œโ”€โ”€ output/ # Charts, Formatters, Email, Console +โ”‚ โ”œโ”€โ”€ data/ # NewsFetcher +โ”‚ โ”œโ”€โ”€ Download.py # Multi-threaded data download +โ”‚ โ”œโ”€โ”€ DatabaseManager.py # SQLite persistence +โ”‚ โ””โ”€โ”€ IndexListFetcher.py # Market index symbol resolution +โ”œโ”€โ”€ tests/ # Unit (15 modules) + Integration (2 modules) +โ”œโ”€โ”€ scripts/ # Migration & automation scripts +โ”œโ”€โ”€ docs/ # Architecture docs, trading strategy guide, roadmap +โ”œโ”€โ”€ pyproject.toml # Poetry dependencies +โ”œโ”€โ”€ Dockerfile # Container build +โ””โ”€โ”€ docker-compose.yml # Service orchestration ``` -## Usage +See [docs/architecture_documentation.md](docs/architecture_documentation.md) for detailed architecture with diagrams. + +--- + +## ๐Ÿ—„๏ธ Data Storage + +### Pickle Cache (default) +Data is cached to `data/historic_data/{market}/` as pickle files, keyed by index name and date. -The main entry point is `src/project_alpha.py`. A few important command-line -options are shown below: +### SQLite Database (optional) +Persist price data across sessions with `--db-path`: -```text ---market Market name to fetch stocks list ("us" or "india") ---save-table Save prediction table in csv format ---no-plots Plot estimates with their uncertainty over time ---db-path Path to SQLite database for storing price data +```bash +python src/project_alpha.py --db-path data/prices.db +``` + +Migrate existing pickle caches: +```bash +python scripts/migrate_pickle_to_db.py ``` -Run the script directly for US stocks: +### Output +- **Charts:** `data/processed_data/{screener_name}/*.svg` +- **CSV Reports:** `data/processed_data/screener_{name}/*.csv` +- **Backtest Reports:** Interactive HTML files +- **Logs:** `logs/project_alpha_{market}.log` + +--- + +## โš™๏ธ Configuration + +All settings can be configured via environment variables with the `PA_` prefix: ```bash -python src/project_alpha.py +export PA_MARKET=us +export PA_FINNHUB_API_KEY=your_key +export PA_POLYGON_API_KEY=your_key +export PA_RISK_PER_TRADE=0.02 +export PA_DATA_PROVIDER=polygon +``` + +Or place them in a `.env` file (see `.env.example`). + +Settings are managed through [Pydantic Settings](https://docs.pydantic.dev/latest/concepts/pydantic_settings/) with the following precedence: + +**CLI flags โ†’ Environment variables โ†’ `.env` file โ†’ Code defaults** + +--- + +## ๐Ÿณ Docker + +```bash +# Build and run (displays help) +docker compose up --build + +# Run a specific scan +docker compose run --rm app --market us --top 10 + +# With environment file +docker compose --env-file .env run --rm app --market india --screeners breakout ``` -Or specify the Indian market: +--- + +## โฐ Scheduled Scans + +Use the included shell scripts for cron-based automation: ```bash -python src/project_alpha.py --market india +# US market scan (weekdays at 4:30 PM ET) +30 16 * * 1-5 /path/to/run_us_stock_scanner.sh + +# India market scan (weekdays at 3:45 PM IST) +45 15 * * 1-5 /path/to/run_india_stock_scanner.sh ``` -Logged executions are provided via helper shell scripts -`run_us_stock_scanner.sh` and `run_india_stock_scanner.sh`. +--- + +## ๐Ÿงช Testing + +```bash +# Run all tests +pytest tests/ -v + +# With coverage report +pytest tests/ -v --cov=src + +# Run only unit tests +pytest tests/unit/ -v + +# Run only integration tests +pytest tests/integration/ -v +``` -## Database Caching +--- -Price data can be saved in an SQLite file using `--db-path`. The database -file will be created automatically along with any missing parent directories. -The `migrate_pickle_to_db.py` script converts existing pickle caches to the new -format. +## ๐Ÿ“š Documentation -## Output +| Document | Description | +|----------|-------------| +| [Architecture](docs/architecture_documentation.md) | Full system architecture with Mermaid diagrams | +| [Trading Strategies](docs/trading_strategy_readme.md) | Screener strategies and signal logic | +| [Implementation Roadmap](docs/implementation_roadmap.md) | Development phases and progress | +| [Deployment Guide](docs/deployment_guide.md) | Production deployment instructions | +| [Email Setup](docs/email_setup.md) | SMTP configuration for report delivery | +| [API Guide](docs/api_guide.md) | External API integration details | -Processed charts and prediction tables are written to the `data/processed_data` -folder. Email functions are included for sending results, but credentials must -be supplied separately. +--- -This repository contains a number of Jupyter notebooks for exploratory work and -model experimentation. They are optional for running the basic pipeline. +## โš ๏ธ Disclaimer -Project Alpha is provided for educational purposes only and does not constitute -financial advice. +Project Alpha is provided for **educational and research purposes only**. It does not constitute financial advice. Always do your own due diligence before making investment decisions. diff --git a/Read_me.md b/Read_me.md deleted file mode 100755 index 2d57c98..0000000 --- a/Read_me.md +++ /dev/null @@ -1,61 +0,0 @@ -STRATEGY 1 -The 10-day SMA should be below the 30-day SMA. - 0 -The MACD value should be above the MACD signal line. - 1 -The MACD value should not be above 0. - 0 -The 10-day and the 30-day SMA should be above the 50-day SMA. - 1 -The 10-day, 30-day, and 50-day SMA should be below the 200-day SMA. - 0 -STRATEGY 2 -The 10-day SMA should be below the 30-day SMA. -The MACD value should be above the MACD signal line. -The MACD value should not be above 0. -The 10-day and the 30-day SMA should be above the 50-day SMA. -The 10-day, 30-day, and 50-day SMA should be above the 200-day SMA. -STRATEGY 3 -The 10-day SMA should be above the 30-day SMA. -The MACD value should be above the MACD signal line. -The MACD value should not be above 0. -The 10-day and the 30-day SMA should be above the 50-day SMA. -The 10-day, 30-day, and 50-day SMA should be below the 200-day SMA. -STRATEGY 4 -The 10-day SMA should be above the 30-day SMA. -The MACD value should be below the MACD signal line. -The MACD value should be above 0. -The 10-day and the 30-day SMA should be below the 50-day SMA. -The 10-day, 30-day, and 50-day SMA should be below the 200-day SMA. -STRATEGY 5 -The 10-day SMA should be above the 30-day SMA. -The MACD value should be below the MACD signal line. -The MACD value should be above 0. -The 10-day and the 30-day SMA should be below the 50-day SMA. -The 10-day, 30-day, and 50-day SMA should be above the 200-day SMA. -## Database Caching - -The application can store downloaded price data in a local SQLite database. Using a database avoids repeatedly downloading the same history and speeds up subsequent runs. - -Use the `--db-path` option to specify the location of the SQLite file when running `project_alpha.py`: - -```bash -python src/project_alpha.py --db-path my_data.db -``` - -If a database is provided, new prices are fetched only for dates that are not already stored. All historical data will be read from the database instead of relying solely on pickle caches. - -### Migrating old cache files - -For existing pickle caches you can populate a new database with the helper script: - -```bash -python scripts/migrate_pickle_to_db.py -``` - -This script reads the old cached dictionary and inserts its contents into the SQLite tables. - -## Saving and Loading Model Parameters - -Training the volatility model can be time consuming. You can save the learned -parameters to disk and reuse them in later runs. - -Use `--save-model ` to write the parameters after training and -`--load-model ` to initialize training from a previous run. Providing -saved parameters allows you to resume training, reduces start-up time and -enables incremental learning on new data. diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..e6cc458 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,25 @@ +services: + app: + # Use image from GitHub Container Registry for deployment + # Replace 'username' with your GitHub username + image: ghcr.io/${GITHUB_REPOSITORY:-username/project_alpha}:latest + build: . # Keep build context for local development convenience + container_name: project_alpha + restart: unless-stopped + deploy: + resources: + limits: + cpus: '2.0' + memory: 4G + reservations: + memory: 2G + volumes: + - ./data:/app/data + - ./logs:/app/logs + env_file: + - .env + environment: + - DATA_DIR=/app/data + - LOG_DIR=/app/logs + # Default command runs the help message. Override for specific tasks. + command: ["--help"] diff --git a/docs/api_guide.md b/docs/api_guide.md new file mode 100644 index 0000000..0d56389 --- /dev/null +++ b/docs/api_guide.md @@ -0,0 +1,75 @@ +# API Developer Guide + +## Core Modules + +### Consensus Engine +`src.classes.screeners.consensus.ConsensusEngine` + +Aggregates signals from various screeners and filters to produce a single confidence score. + +**Usage:** +```python +engine = ConsensusEngine() +result = engine.calculate_score(ticker, screener_results, filter_scores) +print(result.score, result.recommendation) +``` + +**Key Methods:** +- `calculate_score(ticker: str, screener_results: Dict[str, ScreenerResult], filter_results: Optional[Dict[str, float]]) -> ConsensusResult`: Calculates score based on weights from `settings.py`. + +### News Fetcher +`src.classes.data.news_fetcher.NewsFetcher` + +Retrieves news headlines for a given ticker, prioritizing Finnhub API over yfinance scraping. + +**Usage:** +```python +fetcher = NewsFetcher() +headlines = fetcher.fetch_headlines("AAPL", days=3) +``` + +## Filters + +### Fundamental Filter +`src.classes.filters.fundamental_filter.FundamentalFilter` + +Evaluates financial health using metrics like Debt/Equity, P/E Ratio, and ROE. Uses standard Finnhub API. + +**Key Methods:** +- `check_health(ticker: str) -> Dict[str, Any]`: Returns pass/fail status and reasoning. **Cached (LRU).** + +### Sentiment Filter +`src.classes.filters.sentiment_filter.SentimentFilter` + +Analyzes news sentiment using a pre-trained FinBERT model. + +**Key Methods:** +- `analyze_sentiment(headlines: List[str]) -> Dict[str, Any]`: Returns aggregated sentiment score and label. **Singleton Model.** + +## Data Management + +### Validators +`src.classes.data.validators` + +Ensures data integrity for OHLCV DataFrames. + +**Functions:** +- `validate_data_quality(df: pd.DataFrame, ticker: str) -> pd.DataFrame`: Runs all validation checks. +- `repair_data(df: pd.DataFrame, ticker: str) -> pd.DataFrame`: Auto-fixes minor issues like negative volume or missing prices. + +### Download +`src.classes.Download` + +Handles data fetching and local caching. + +## Screeners + +### Breakout Screener +`src.classes.screeners.breakout.BreakoutScreener` + +Detects consolidation breakouts using ADX and ATR expansion. + +### Trendline Screener +`src.classes.screeners.trendline.TrendlineScreener` + +Identifies stocks in strong uptrends using linear regression slope analysis. diff --git a/docs/architecture_documentation.md b/docs/architecture_documentation.md index c21961f..72d7a67 100644 --- a/docs/architecture_documentation.md +++ b/docs/architecture_documentation.md @@ -1,10 +1,6 @@ -# Project Alpha - Architecture Documentation +# Project Alpha โ€” Architecture Documentation -> A comprehensive stock market analysis and screening toolkit supporting US and Indian markets. - -## Overview - -Project Alpha is a Python-based stock market analysis application that combines **volatility-based predictions** with **technical screening strategies**. It downloads historical price data, trains probabilistic volatility models using TensorFlow, and runs various technical screeners to identify trading opportunities. +> A comprehensive stock market analysis and screening toolkit supporting US and Indian markets, featuring volatility modeling, technical screening, backtesting, risk management, and AI-powered filters. --- @@ -13,41 +9,112 @@ Project Alpha is a Python-based stock market analysis application that combines ```mermaid graph TB subgraph "Entry Points" - CLI[project_alpha.py] - Shell[Shell Scripts] + CLI["project_alpha.py
(rich-click CLI)"] + Shell["Shell Scripts
(cron-ready)"] end - + + subgraph "Configuration" + CFG["config/settings.py
(Pydantic Settings)"] + YAML["config/defaults.yaml"] + ENV[".env file"] + end + subgraph "Data Layer" - ILF[IndexListFetcher] - DL[Download] + ILF["IndexListFetcher"] + DL["Download
(yfinance / Polygon)"] DB[(SQLite DB)] Cache[(Pickle Cache)] + NEWS["NewsFetcher
(Finnhub / yfinance)"] end - + subgraph "Analysis Layer" - VOL[Volatile Model] - MOD[TensorFlow Models] - SCR[Screeners] + VA["VolatileAnalyzer"] + TA["TrendAnalyzer"] + CA["CorrelationAnalyzer"] + REGIME["RegimeDetector
(HMM)"] + MOD["TensorFlow Probability
Models"] + end + + subgraph "Screening Layer" + REG["ScreenerRegistry
(Singleton)"] + BASE["BaseScreener (ABC)"] + BRK["BreakoutScreener"] + TRD["TrendlineScreener"] + MA["MovingAverageScreener"] + MACD["MACDScreener"] + DON["DonchianScreener"] + CON["ConsensusEngine"] + end + + subgraph "Filters" + FUND["FundamentalFilter
(Finnhub API)"] + SENT["SentimentFilter
(FinBERT)"] + end + + subgraph "Risk Management" + RM["RiskManager"] + TC["TransactionCosts"] + end + + subgraph "Backtesting" + BT["BacktestEngine"] + ADAPT["ScreenerSignalAdapter"] + WFV["WalkForwardValidator"] + PERF["BacktestPerformance"] end - + subgraph "Output Layer" - PLOT[Plotting] - EMAIL[Email Notifications] - CSV[CSV Reports] + CHARTS["ChartBuilder"] + FMT["Formatters
(CSV / JSON / HTML / Table)"] + EXP["Exporters"] + CON_OUT["Console (Rich)"] + EMAIL["EmailServer"] end - - CLI --> ILF + + ENV --> CFG + YAML --> CFG + CFG --> CLI Shell --> CLI + + CLI --> ILF ILF --> DL DL --> DB DL --> Cache - DL --> VOL - DL --> SCR - VOL --> MOD - VOL --> PLOT - SCR --> PLOT - PLOT --> EMAIL - SCR --> CSV + DL --> VA + DL --> REG + + VA --> TA + VA --> CA + VA --> MOD + CLI --> REGIME + + REG --> BASE + BASE --> BRK + BASE --> TRD + BASE --> MA + BASE --> MACD + BASE --> DON + + BRK --> CON + TRD --> CON + FUND --> CON + SENT --> CON + + NEWS --> SENT + + CLI --> BT + BT --> ADAPT + ADAPT --> BASE + BT --> RM + BT --> TC + BT --> WFV + WFV --> PERF + + CON --> CHARTS + VA --> CHARTS + CHARTS --> EMAIL + CHARTS --> EXP + FMT --> CON_OUT ``` --- @@ -57,99 +124,356 @@ graph TB ``` project_alpha/ โ”œโ”€โ”€ src/ -โ”‚ โ”œโ”€โ”€ project_alpha.py # Main entry point -โ”‚ โ””โ”€โ”€ classes/ # Core modules (22 files) -โ”‚ โ”œโ”€โ”€ Download.py # Data fetching & caching -โ”‚ โ”œโ”€โ”€ DatabaseManager.py # SQLite persistence -โ”‚ โ”œโ”€โ”€ Volatile.py # Volatility predictions -โ”‚ โ”œโ”€โ”€ Models.py # TensorFlow probabilistic models -โ”‚ โ”œโ”€โ”€ Screener.py # Main screening logic (~800 lines) -โ”‚ โ”œโ”€โ”€ Screener_*.py # Specialized screeners (7 files) -โ”‚ โ”œโ”€โ”€ IndexListFetcher.py # Market index symbol lists -โ”‚ โ”œโ”€โ”€ Plotting.py # Visualization (matplotlib) -โ”‚ โ”œโ”€โ”€ Send_email.py # Email notifications -โ”‚ โ”œโ”€โ”€ Tools.py # Shared utilities -โ”‚ โ””โ”€โ”€ ... +โ”‚ โ”œโ”€โ”€ project_alpha.py # CLI entry point (~1 070 lines, rich-click) +โ”‚ โ”œโ”€โ”€ exceptions.py # Custom exception hierarchy +โ”‚ โ”œโ”€โ”€ logging_config.py # structlog configuration +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ config/ +โ”‚ โ”‚ โ”œโ”€โ”€ settings.py # Pydantic Settings (env + .env + defaults) +โ”‚ โ”‚ โ””โ”€โ”€ defaults.yaml # YAML defaults for model/screener/risk params +โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€ classes/ +โ”‚ โ”œโ”€โ”€ Download.py # Multi-threaded data fetching (yfinance/Polygon) +โ”‚ โ”œโ”€โ”€ DatabaseManager.py # SQLite persistence layer +โ”‚ โ”œโ”€โ”€ IndexListFetcher.py # Market index symbol resolution +โ”‚ โ”œโ”€โ”€ Volatile.py # Legacy volatile prediction interface +โ”‚ โ”œโ”€โ”€ Models.py # TensorFlow Probability model definitions +โ”‚ โ”œโ”€โ”€ Plotting.py # Legacy matplotlib visualizations +โ”‚ โ”œโ”€โ”€ Tools.py # Shared utilities (CSV, TradingView, cleanup) +โ”‚ โ”œโ”€โ”€ ScreenipyTA.py # Technical analysis helpers (screenipy) +โ”‚ โ”œโ”€โ”€ Add_indicators.py # Technical indicator additions +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ analysis/ # Modular volatility analysis pipeline +โ”‚ โ”‚ โ”œโ”€โ”€ VolatileAnalyzer.py # Main orchestrator (AnalysisResult) +โ”‚ โ”‚ โ”œโ”€โ”€ TrendAnalyzer.py # Hierarchical Bayesian trend models +โ”‚ โ”‚ โ”œโ”€โ”€ CorrelationAnalyzer.py# Stock correlation and matching +โ”‚ โ”‚ โ”œโ”€โ”€ VolatileConfig.py # Dataclass configs (TrainingConfig, RatingThresholds) +โ”‚ โ”‚ โ””โ”€โ”€ regime.py # HMM-based market regime detection +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ screeners/ # Pluggable screener framework +โ”‚ โ”‚ โ”œโ”€โ”€ base.py # BaseScreener ABC, Signal enum, ScreenerResult +โ”‚ โ”‚ โ”œโ”€โ”€ registry.py # ScreenerRegistry singleton + decorator +โ”‚ โ”‚ โ”œโ”€โ”€ consensus.py # ConsensusEngine (weighted multi-signal aggregation) +โ”‚ โ”‚ โ”œโ”€โ”€ breakout.py # Price & volume breakout detection +โ”‚ โ”‚ โ”œโ”€โ”€ trendline.py # Uptrend identification via slope analysis +โ”‚ โ”‚ โ”œโ”€โ”€ moving_average.py # SMA crossover strategies (10/30/50/200) +โ”‚ โ”‚ โ”œโ”€โ”€ macd.py # MACD signal line crossovers +โ”‚ โ”‚ โ””โ”€โ”€ donchian.py # Donchian channel breakout strategy +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ filters/ # Post-screening stock filters +โ”‚ โ”‚ โ”œโ”€โ”€ fundamental_filter.py # Finnhub-based financial health checks +โ”‚ โ”‚ โ””โ”€โ”€ sentiment_filter.py # FinBERT NLP sentiment analysis +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ risk/ # Risk management framework +โ”‚ โ”‚ โ”œโ”€โ”€ risk_manager.py # Position sizing, stop-loss, exposure limits +โ”‚ โ”‚ โ””โ”€โ”€ transaction_costs.py # Commission + slippage + spread modeling +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ backtesting/ # Strategy validation framework +โ”‚ โ”‚ โ”œโ”€โ”€ engine.py # BacktestEngine + ProjectAlphaStrategy +โ”‚ โ”‚ โ”œโ”€โ”€ adapter.py # ScreenerSignalAdapter (point-in-time signals) +โ”‚ โ”‚ โ”œโ”€โ”€ walk_forward.py # Walk-Forward Validation (expanding windows) +โ”‚ โ”‚ โ””โ”€โ”€ performance.py # BacktestResult dataclass + HTML reports +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ output/ # Formatting and delivery +โ”‚ โ”‚ โ”œโ”€โ”€ charts.py # ChartBuilder (candlestick + indicators) +โ”‚ โ”‚ โ”œโ”€โ”€ formatters.py # CSV / JSON / HTML / Table formatters +โ”‚ โ”‚ โ”œโ”€โ”€ exporters.py # File export utilities +โ”‚ โ”‚ โ”œโ”€โ”€ console.py # Rich console UI (banners, tables, progress) +โ”‚ โ”‚ โ””โ”€โ”€ email.py # EmailConfig + EmailServer (SMTP + attachments) +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ data/ # External data fetchers +โ”‚ โ”‚ โ””โ”€โ”€ news_fetcher.py # News headlines (Finnhub โ†’ yfinance fallback) +โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€ _archive/ # Deprecated legacy modules +โ”‚ โ”œโ”€โ”€ Screener.py # Monolithic screener (~40 KB, superseded) +โ”‚ โ”œโ”€โ”€ Screener_*.py # Old standalone screeners +โ”‚ โ”œโ”€โ”€ Send_email.py # Old email implementation +โ”‚ โ”œโ”€โ”€ Plot_stocks.py # Old plotting +โ”‚ โ”œโ”€โ”€ Console.py # Old console output +โ”‚ โ””โ”€โ”€ Evaluation.py # Old evaluation logic +โ”‚ +โ”œโ”€โ”€ tests/ +โ”‚ โ”œโ”€โ”€ unit/ # 15 unit test modules +โ”‚ โ”‚ โ”œโ”€โ”€ test_backtesting.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_walk_forward.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_screeners.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_consensus.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_filters.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_risk_manager.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_transaction_costs.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_regime.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_config.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_database_manager.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_download.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_models.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_provider_chain.py +โ”‚ โ”‚ โ”œโ”€โ”€ test_data_repair.py +โ”‚ โ”‚ โ””โ”€โ”€ test_breakout_confirmation.py +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ integration/ # Integration tests +โ”‚ โ”‚ โ”œโ”€โ”€ test_download_module.py +โ”‚ โ”‚ โ””โ”€โ”€ test_pipeline.py +โ”‚ โ”‚ +โ”‚ โ”œโ”€โ”€ test_data_layer.py +โ”‚ โ”œโ”€โ”€ test_analysis_layer.py +โ”‚ โ”œโ”€โ”€ test_screeners.py +โ”‚ โ”œโ”€โ”€ test_output.py +โ”‚ โ””โ”€โ”€ conftest.py +โ”‚ โ”œโ”€โ”€ scripts/ -โ”‚ โ””โ”€โ”€ migrate_pickle_to_db.py # Cache migration utility -โ”œโ”€โ”€ website/ # Simple SVG viewer (HTML/CSS/JS) -โ”œโ”€โ”€ run_us_stock_scanner.sh # Scheduled US market scan -โ”œโ”€โ”€ run_india_stock_scanner.sh # Scheduled India market scan -โ”œโ”€โ”€ pyproject.toml # Poetry dependencies -โ””โ”€โ”€ README.md +โ”‚ โ”œโ”€โ”€ migrate_pickle_to_db.py # Pickle โ†’ SQLite migration +โ”‚ โ””โ”€โ”€ run_daily_analysis.sh # Automated daily analysis +โ”‚ +โ”œโ”€โ”€ website/ # Minimal SVG chart viewer (HTML/CSS/JS) +โ”‚ +โ”œโ”€โ”€ run_us_stock_scanner.sh # Cron-ready US market scan +โ”œโ”€โ”€ run_india_stock_scanner.sh # Cron-ready India market scan +โ”œโ”€โ”€ pyproject.toml # Poetry dependencies & project metadata +โ”œโ”€โ”€ Dockerfile # Container build +โ””โ”€โ”€ docker-compose.yml # Service orchestration ``` --- ## Core Components -### 1. Data Layer +### 1. Configuration Layer | Component | File | Responsibility | |-----------|------|----------------| -| **IndexListFetcher** | [IndexListFetcher.py](file:///opt/developments/project_alpha/src/classes/IndexListFetcher.py) | Fetches stock symbol lists for various indices (S&P 500, NASDAQ, NSE 500, etc.) via web scraping | -| **Download** | [Download.py](file:///opt/developments/project_alpha/src/classes/Download.py) | Multi-threaded price download using `yfinance`, handles caching and exchange rates | -| **DatabaseManager** | [DatabaseManager.py](file:///opt/developments/project_alpha/src/classes/DatabaseManager.py) | SQLite-based persistence with `price_data` and `company_info` tables | +| **Settings** | [settings.py](file:///opt/developments/project_alpha/src/config/settings.py) | Central Pydantic Settings model โ€” loads from `.env` (prefix `PA_`), environment vars, and code defaults. Covers data, model, screener, consensus, risk, email, and API key settings. | +| **Defaults** | [defaults.yaml](file:///opt/developments/project_alpha/src/config/defaults.yaml) | YAML defaults for model hyperparameters, screener thresholds, and risk parameters | +| **Logging** | [logging_config.py](file:///opt/developments/project_alpha/src/logging_config.py) | `structlog` configuration with console (colored) or JSON output modes | +| **Exceptions** | [exceptions.py](file:///opt/developments/project_alpha/src/exceptions.py) | Custom hierarchy: `ProjectAlphaError` โ†’ `DataFetchError`, `ScreenerError`, `ModelTrainingError`, `ConfigurationError`, `DataValidationError` | + +**Configuration Precedence:** CLI flags โ†’ Environment variables (`PA_*`) โ†’ `.env` file โ†’ Code defaults + +--- + +### 2. Data Layer + +| Component | File | Responsibility | +|-----------|------|----------------| +| **IndexListFetcher** | [IndexListFetcher.py](file:///opt/developments/project_alpha/src/classes/IndexListFetcher.py) | Resolves market index symbol lists via web scraping (Wikipedia, NSE, stockmonitor.com) | +| **Download** | [Download.py](file:///opt/developments/project_alpha/src/classes/Download.py) | Multi-threaded price download with provider abstraction (`yfinance` / `Polygon.io`), caching, and exchange rate handling | +| **DatabaseManager** | [DatabaseManager.py](file:///opt/developments/project_alpha/src/classes/DatabaseManager.py) | SQLite persistence with `price_data` and `company_info` tables | +| **NewsFetcher** | [news_fetcher.py](file:///opt/developments/project_alpha/src/classes/data/news_fetcher.py) | News headline fetcher with Finnhub API primary, yfinance fallback | **Data Flow:** -1. Fetch symbol lists from external sources (Wikipedia, NSE, stockmonitor.com) -2. Download OHLCV data via yfinance with multi-threading (`@multitasking.task`) -3. Cache data as pickle files OR persist to SQLite database -4. Load cached data on subsequent runs to avoid re-downloading + +```mermaid +sequenceDiagram + participant CLI + participant ILF as IndexListFetcher + participant DL as Download + participant Cache as Pickle Cache + participant DB as SQLite DB + participant API as yfinance / Polygon + + CLI->>ILF: sp_500() / nse_500() + ILF-->>CLI: (index_name, symbols[]) + CLI->>DL: load_data(cache, symbols, market) + DL->>Cache: Check for cached data + alt Cache hit + Cache-->>DL: Cached DataFrame + else Cache miss + DL->>API: Download OHLCV (multi-threaded) + API-->>DL: Price DataFrames + DL->>Cache: Save pickle + DL->>DB: Persist to SQLite (if --db-path) + end + DL-->>CLI: {tickers, price_data, company_info, sectors, industries} +``` --- -### 2. Model Layer +### 3. Analysis Layer | Component | File | Responsibility | |-----------|------|----------------| -| **Models** | [Models.py](file:///opt/developments/project_alpha/src/classes/Models.py) | TensorFlow Probability models for volatility estimation | -| **Volatile** | [Volatile.py](file:///opt/developments/project_alpha/src/classes/Volatile.py) | Prediction generation, trend rating, stock clustering | +| **VolatileAnalyzer** | [VolatileAnalyzer.py](file:///opt/developments/project_alpha/src/classes/analysis/VolatileAnalyzer.py) | Main orchestrator โ€” coordinates model training, trend analysis, correlation analysis, and returns `AnalysisResult` | +| **TrendAnalyzer** | [TrendAnalyzer.py](file:///opt/developments/project_alpha/src/classes/analysis/TrendAnalyzer.py) | Hierarchical Bayesian trend estimation using TensorFlow Probability | +| **CorrelationAnalyzer** | [CorrelationAnalyzer.py](file:///opt/developments/project_alpha/src/classes/analysis/CorrelationAnalyzer.py) | Stock correlation matching and similarity scoring | +| **VolatileConfig** | [VolatileConfig.py](file:///opt/developments/project_alpha/src/classes/analysis/VolatileConfig.py) | Typed dataclass configs: `VolatileConfig`, `TrainingConfig`, `RatingThresholds` | +| **RegimeDetector** | [regime.py](file:///opt/developments/project_alpha/src/classes/analysis/regime.py) | HMM-based market regime detection (Bull / Bear / Sideways) using `hmmlearn` | **Model Architecture:** -- **Hierarchical Bayesian Model**: Market โ†’ Sector โ†’ Industry โ†’ Stock levels -- **Polynomial regression** for trend estimation +- **Hierarchical Bayesian Model**: Market โ†’ Sector โ†’ Industry โ†’ Stock levels (MSIS-MCS training) +- **Polynomial regression** for trend estimation (configurable order) - **Gaussian conjugate model** for sequential log-price prediction -- Uses `TensorFlow 2.14` with `TensorFlow Probability 0.22` +- **Hidden Markov Model** for regime detection using log-returns + rolling volatility features -**Key Functions:** -- `train_msis_mcs()` - Sequential Adam optimization across hierarchy levels -- `estimate_logprice_statistics()` - Mean/variance estimation -- `rate()` - Classifies stocks as "HIGHLY BELOW TREND" to "HIGHLY ABOVE TREND" +**Key Outputs:** +- `AnalysisResult`: scores, growth, volatility, ratings, correlation matches +- Stock ratings: `HIGHLY BELOW TREND` โ†’ `HIGHLY ABOVE TREND` +- Regime labels: `Bull`, `Bear`, `Sideways` --- -### 3. Screening Layer +### 4. Screening Layer + +The screener framework uses a **pluggable architecture** with a base class, registry, and consensus engine. + +#### Screener Framework + +```mermaid +classDiagram + class BaseScreener { + <> + +name: str + +description: str + +screen(ticker, data) ScreenerResult + +screen_batch(tickers, price_data) BatchScreenerResult + +validate_data(data) bool + } + + class Signal { + <> + BUY + SELL + HOLD + STRONG_BUY + STRONG_SELL + } + + class ScreenerResult { + +ticker: str + +signal: Signal + +confidence: float + +details: dict + +is_bullish: bool + +is_bearish: bool + } + + class ScreenerRegistry { + <> + +register(screener) + +get(name) BaseScreener + +list_available() list + } + + class ConsensusEngine { + +weights: dict + +calculate_score(ticker, screener_results, filter_results) ConsensusResult + } + + BaseScreener <|-- BreakoutScreener + BaseScreener <|-- TrendlineScreener + BaseScreener <|-- MovingAverageScreener + BaseScreener <|-- MACDScreener + BaseScreener <|-- DonchianScreener + BaseScreener --> ScreenerResult + ScreenerResult --> Signal + ScreenerRegistry --> BaseScreener + ConsensusEngine --> ScreenerResult +``` + +#### Available Screeners | Screener | File | Strategy | |----------|------|----------| -| **Main Screener** | [Screener.py](file:///opt/developments/project_alpha/src/classes/Screener.py) | 25+ validation methods (VCP, Lorentzian, trendlines, volume, RSI, etc.) | -| **MA Screener** | [Screener_ma.py](file:///opt/developments/project_alpha/src/classes/Screener_ma.py) | Moving average crossover strategies (10/30/50/200 SMA) | -| **MACD Screener** | [Screener_macd.py](file:///opt/developments/project_alpha/src/classes/Screener_macd.py) | MACD signal line crossovers | -| **Breakout** | [Screener_breakout.py](file:///opt/developments/project_alpha/src/classes/Screener_breakout.py) | Price & volume breakout detection | -| **Trendline** | [Screener_trendline.py](file:///opt/developments/project_alpha/src/classes/Screener_trendline.py) | Strong uptrend identification via slope analysis | -| **Donchian** | [Screener_donchain.py](file:///opt/developments/project_alpha/src/classes/Screener_donchain.py) | Channel breakout strategy | -| **Value** | [Screener_value.py](file:///opt/developments/project_alpha/src/classes/Screener_value.py) | Value investing metrics | +| **Breakout** | [breakout.py](file:///opt/developments/project_alpha/src/classes/screeners/breakout.py) | Price & volume breakout detection with ADX trend strength and ATR expansion filtering | +| **Trendline** | [trendline.py](file:///opt/developments/project_alpha/src/classes/screeners/trendline.py) | Strong uptrend identification via slope analysis | +| **Moving Average** | [moving_average.py](file:///opt/developments/project_alpha/src/classes/screeners/moving_average.py) | SMA crossover strategies (10/30/50/200 periods) | +| **MACD** | [macd.py](file:///opt/developments/project_alpha/src/classes/screeners/macd.py) | MACD signal line crossovers | +| **Donchian** | [donchian.py](file:///opt/developments/project_alpha/src/classes/screeners/donchian.py) | Channel breakout strategy | + +#### Consensus Engine + +The `ConsensusEngine` aggregates signals from multiple screeners and filters into a single confidence score: + +``` +Default Weights: + breakout: 0.4 + trend: 0.3 + volatility: 0.1 + fundamental: 0.1 + sentiment: 0.1 +``` + +Includes a **synergy bonus** (+0.1) when both Breakout and Trend signals are present. + +--- + +### 5. Filters + +Post-screening filters that further refine symbol lists: + +| Filter | File | Data Source | Technique | +|--------|------|-------------|-----------| +| **FundamentalFilter** | [fundamental_filter.py](file:///opt/developments/project_alpha/src/classes/filters/fundamental_filter.py) | Finnhub API | Debt/Equity < 200%, P/E ratio bounds, ROE > 0, Revenue Growth > 0. Results cached with `@lru_cache`. | +| **SentimentFilter** | [sentiment_filter.py](file:///opt/developments/project_alpha/src/classes/filters/sentiment_filter.py) | FinBERT (HuggingFace) | NLP sentiment scoring of news headlines. Lazy singleton model loading. Score range: -1.0 to +1.0. | + +Both filters **degrade gracefully** โ€” if API keys are missing or models fail to load, they pass all stocks through. + +--- + +### 6. Risk Management + +| Component | File | Responsibility | +|-----------|------|----------------| +| **RiskManager** | [risk_manager.py](file:///opt/developments/project_alpha/src/classes/risk/risk_manager.py) | ATR-based stop-loss calculation, Kelly-style position sizing (`Risk Amount / Risk Per Share`), exposure validation | +| **TransactionCosts** | [transaction_costs.py](file:///opt/developments/project_alpha/src/classes/risk/transaction_costs.py) | Commission + slippage + spread modeling with `us_default()` and `india_default()` presets | + +**Key Parameters** (from `Settings`): + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `risk_per_trade` | 1% | Maximum capital risked per trade | +| `atr_multiplier` | 2.0ร— | Stop-loss distance in ATR units | +| `atr_period` | 14 | ATR calculation lookback | +| `max_positions` | 10 | Concurrent open position limit | +| `max_portfolio_exposure` | 100% | Total portfolio exposure cap | +| `trailing_stop` | `True` | Enable trailing stop-loss | + +--- + +### 7. Backtesting Framework + +| Component | File | Responsibility | +|-----------|------|----------------| +| **BacktestEngine** | [engine.py](file:///opt/developments/project_alpha/src/classes/backtesting/engine.py) | Wrapper around `backtesting.py` library. Configures and runs strategy backtests. | +| **ProjectAlphaStrategy** | [engine.py](file:///opt/developments/project_alpha/src/classes/backtesting/engine.py) | Base `Strategy` subclass integrating screener signals with `RiskManager` logic (ATR stop-loss, position sizing) | +| **ScreenerSignalAdapter** | [adapter.py](file:///opt/developments/project_alpha/src/classes/backtesting/adapter.py) | Adapts `BaseScreener` to historical signal generation. Supports point-in-time iteration and vectorized fast-paths for Breakout/Trend. | +| **WalkForwardValidator** | [walk_forward.py](file:///opt/developments/project_alpha/src/classes/backtesting/walk_forward.py) | Anchored expanding window validation. Runs sequential IS/OOS backtests and computes Sharpe degradation for overfitting detection. | +| **BacktestPerformance** | [performance.py](file:///opt/developments/project_alpha/src/classes/backtesting/performance.py) | Extracts metrics into `BacktestResult` dataclass (30+ fields) and generates interactive HTML reports | + +**Walk-Forward Validation Flow:** + +```mermaid +graph LR + A["Full Historical Data"] --> B["Window 1
Train: Fixed Start โ†’ T1"] + B --> C["Test: T1 โ†’ T2"] + A --> D["Window 2
Train: Fixed Start โ†’ T2"] + D --> E["Test: T2 โ†’ T3"] + A --> F["Window N
Train: Fixed Start โ†’ Tn"] + F --> G["Test: Tn โ†’ End"] + C --> H["Aggregate OOS Metrics"] + E --> H + G --> H + H --> I["Overfitting Check
(Sharpe Degradation < 0.5)"] +``` --- -### 4. Output Layer +### 8. Output Layer -| Component | File | Output Type | -|-----------|------|-------------| -| **Plotting** | [Plotting.py](file:///opt/developments/project_alpha/src/classes/Plotting.py) | Market/sector/industry/stock trend charts with uncertainty bounds | -| **Plot_stocks** | [Plot_stocks.py](file:///opt/developments/project_alpha/src/classes/Plot_stocks.py) | Candlestick charts with indicators | -| **Send_email** | [Send_email.py](file:///opt/developments/project_alpha/src/classes/Send_email.py) | SMTP-based email with CSV/image attachments | -| **Tools** | [Tools.py](file:///opt/developments/project_alpha/src/classes/Tools.py) | CSV export, progress bars, TradingView integration | +| Component | File | Responsibility | +|-----------|------|----------------| +| **ChartBuilder** | [charts.py](file:///opt/developments/project_alpha/src/classes/output/charts.py) | Candlestick charts with technical indicators, batch chart generation | +| **Formatters** | [formatters.py](file:///opt/developments/project_alpha/src/classes/output/formatters.py) | `ResultFormatter` base with `CSVFormatter`, `JSONFormatter`, `TableFormatter`, `HTMLFormatter` | +| **Exporters** | [exporters.py](file:///opt/developments/project_alpha/src/classes/output/exporters.py) | File-based export utilities (`export_csv`, `export_json`, `export_html`) | +| **Console** | [console.py](file:///opt/developments/project_alpha/src/classes/output/console.py) | Rich-powered terminal UI โ€” banners, section headers, progress bars, result tables, config/summary panels | +| **EmailServer** | [email.py](file:///opt/developments/project_alpha/src/classes/output/email.py) | SMTP email with inline charts, CSV data, and PDF report attachments | --- ## Market Support -| Market | Index Functions | Symbol Format | -|--------|-----------------|---------------| -| **US** | `sp_500()`, `nasdaq_all()`, `dow_jones()`, `tech_100()` | Ticker only (e.g., `AAPL`) | -| **India** | `nse_500()`, `nse_50()`, `nse_100()`, `nse_all()` | Ticker with `.NS` suffix | +| Market | Index Functions | Symbol Format | Data Provider | +|--------|-----------------|---------------|---------------| +| **US** | `sp_500()`, `nasdaq_all()`, `dow_jones()`, `tech_100()` | Ticker only (e.g., `AAPL`) | yfinance / Polygon.io | +| **India** | `nse_500()`, `nse_50()`, `nse_100()`, `nse_all()` | `.NS` suffix (e.g., `RELIANCE.NS`) | yfinance | --- @@ -159,81 +483,227 @@ project_alpha/ sequenceDiagram participant User participant CLI as project_alpha.py + participant CFG as Settings participant Index as IndexListFetcher participant DL as Download - participant Vol as Volatile + participant Regime as RegimeDetector + participant Vol as VolatileAnalyzer participant Scr as Screeners + participant Filt as Filters + participant Con as ConsensusEngine + participant BT as BacktestEngine participant Out as Output - - User->>CLI: python src/project_alpha.py --market us + + User->>CLI: python project_alpha.py --market us --screeners breakout,trend + CLI->>CFG: Load settings (.env + defaults) + + opt Regime Detection (--regime-detection) + CLI->>Regime: fit(index_df) + predict() + Regime-->>CLI: Current regime (Bull/Bear/Sideways) + end + CLI->>Index: sp_500() Index-->>CLI: (index_name, symbols[]) CLI->>DL: load_data(cache, symbols, market) DL-->>CLI: {tickers, price_data, company_info} - - CLI->>Vol: load_volatile_data() + volatile() - Note over Vol: Train TensorFlow model - Vol-->>CLI: volatile_df (predictions) - - CLI->>Scr: breakout_screener(data) - Scr-->>CLI: {BUY: [...], SELL: [...]} - - CLI->>Scr: trendline_screener(data) - Scr-->>CLI: {Trend: [(sym, trend), ...]} - - CLI->>Out: create_plot_and_email_batched() - CLI->>Out: save_screener_results_to_csv() + + alt Screening Mode (default) + opt Volatility Analysis + CLI->>Vol: analyze(data, rank_method) + Vol-->>CLI: AnalysisResult (scores, growth, ratings) + end + + CLI->>Scr: screen_batch(tickers, price_data) + Scr-->>CLI: BatchScreenerResult {BUY, SELL, HOLD} + + opt Fundamental Filter (--fundamental) + CLI->>Filt: check_health(ticker) + Filt-->>CLI: {passed, reason} + end + + opt Sentiment Filter (--sentiment) + CLI->>Filt: analyze_sentiment(headlines) + Filt-->>CLI: {score, label} + end + + opt Consensus Scoring (--consensus) + CLI->>Con: calculate_score(ticker, screener_results, filter_results) + Con-->>CLI: ConsensusResult (score, recommendation) + end + + CLI->>Out: Charts + CSV + Email + + else Backtesting Mode (--backtest) + CLI->>BT: run(strategy, screener_cls) + BT-->>CLI: BacktestResult metrics + + else Walk-Forward Mode (--walk-forward) + CLI->>BT: WalkForwardValidator.validate(screener_cls) + BT-->>CLI: Summary DataFrame + overfitting warnings + end ``` --- -## Technology Stack +## Command-Line Interface -| Category | Technology | Version | -|----------|------------|---------| -| **Language** | Python | โ‰ฅ3.11, <3.13 | -| **ML Framework** | TensorFlow + TensorFlow Probability | 2.14.0 / 0.22.0 | -| **Data Processing** | Pandas, NumPy | 2.3.0+ / 1.26.4 | -| **Technical Analysis** | pandas-ta, advanced-ta | 0.3.14b0 / 0.1.8 | -| **Data Source** | yfinance, TradingView-TA | 0.2.63 / 3.3.0 | -| **Visualization** | Matplotlib, Plotly | 3.10.3 / 6.1.2 | -| **Web Scraping** | BeautifulSoup4, Requests | 4.13.4 / 2.32.4 | -| **Database** | SQLite3 | Built-in | -| **Package Manager** | Poetry | 2.0+ | +The CLI is built with `rich-click` and organized into 8 option groups: ---- +### Market Selection + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--market` | `us` / `india` | `us` | Market to analyze | +| `--symbols` | str (multiple) | โ€” | Specific symbols (overrides market) | + +### Screener Configuration + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--screeners` | comma-separated | `all` | `all`, `volatility`, `breakout`, `trend`, `ma`, `macd`, `donchain` | +| `--rank` | choice | `growth` | Ranking: `rate`, `growth`, `volatility` | +| `--top` | int | โ€” | Limit to top N stocks | +| `--min-price` / `--max-price` | float | โ€” | Price range filter | + +### Output Options -## Configuration +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--format` | choice | `table` | Output: `table`, `csv`, `json` | +| `--save-table` / `--no-save-table` | flag | `True` | Save results to CSV | +| `--no-plots` | flag | `False` | Disable chart generation | +| `--verbose` / `--quiet` | flag | โ€” | Log verbosity control | +| `--json-logs` | flag | `False` | Structured JSON log output | +| `--log-level` | choice | `INFO` | `DEBUG`, `INFO`, `WARNING`, `ERROR` | -### Command-Line Arguments +### Data & Caching | Argument | Type | Default | Description | |----------|------|---------|-------------| -| `--market` | str | `us` | Market selection (`us` or `india`) | -| `--rank` | str | `growth` | Ranking method: `rate`, `growth`, `volatility` | -| `--cache` | flag | `True` | Use cached data if available | -| `--db-path` | str | None | SQLite database path for persistence | -| `--save-table` | flag | `True` | Export prediction table to CSV | -| `--no-plots` | flag | False | Disable chart generation | -| `--load-model` | str | None | Load pre-trained model parameters | -| `--save-model` | str | None | Save trained model parameters | +| `--cache` / `--no-cache` | flag | `True` | Use cached data | +| `--db-path` | path | โ€” | SQLite database path | +| `--data-provider` | choice | `yfinance` | `yfinance` / `polygon` | +| `--polygon-api-key` | str | โ€” | Polygon.io API key | + +### Model Options + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--load-model` | path | โ€” | Load pre-trained model parameters | +| `--save-model` | path | โ€” | Save trained model parameters | + +### Risk Management + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--risk-per-trade` | float | `0.01` | Risk per trade (1%) | +| `--atr-multiplier` | float | `2.0` | ATR multiplier for stop-loss | +| `--max-positions` | int | `10` | Max concurrent positions | + +### Backtesting + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--backtest` | flag | `False` | Run backtest mode | +| `--initial-capital` | float | `10000` | Starting capital | +| `--benchmark` | str | `SPY` | Benchmark symbol | +| `--walk-forward` | flag | `False` | Walk-forward validation mode | +| `--wf-train-months` | int | `12` | Training window (months) | +| `--wf-test-months` | int | `3` | Testing window (months) | + +### Additional Features + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--fundamental` / `--no-fundamental` | flag | `False` | Enable fundamental analysis (Finnhub) | +| `--sentiment` / `--no-sentiment` | flag | `False` | Enable sentiment analysis (FinBERT) | +| `--consensus` / `--no-consensus` | flag | `False` | Enable multi-signal consensus scoring | +| `--regime-detection` | flag | `False` | Enable HMM regime detection | +| `--regime-index` | str | `SPY` | Index for regime detection | +| `--value` | flag | `False` | Include value stocks (India only) | + +--- + +## Technology Stack + +| Category | Technology | Version | +|----------|------------|---------| +| **Language** | Python | โ‰ฅ3.12, <3.14 | +| **ML Framework** | TensorFlow + TensorFlow Probability | โ‰ฅ2.20 / โ‰ฅ0.25 | +| **ML Utilities** | scikit-learn, hmmlearn | โ‰ฅ1.5 / โ‰ฅ0.3.3 | +| **NLP** | HuggingFace Transformers + PyTorch (FinBERT) | โ‰ฅ5.2 / โ‰ฅ2.10 | +| **Data Processing** | Pandas, NumPy, SciPy | โ‰ฅ2.0 / โ‰ฅ2.1 / โ‰ฅ1.14 | +| **Technical Analysis** | `ta` (technical analysis library) | โ‰ฅ0.11 | +| **Data Sources** | yfinance, Polygon.io, Finnhub, TradingView-TA | โ‰ฅ0.2.63 / โ‰ฅ1.16 / โ‰ฅ2.4 / โ‰ฅ3.3 | +| **Backtesting** | backtesting.py, pyfolio-reloaded | โ‰ฅ0.3.3 / โ‰ฅ0.9.5 | +| **Visualization** | Matplotlib, Plotly, Kaleido | โ‰ฅ3.8 / โ‰ฅ5.18 / โ‰ฅ1.2 | +| **CLI** | Click + rich-click + Rich | โ‰ฅ8.1 / โ‰ฅ1.8 / โ‰ฅ14.0 | +| **Web Scraping** | BeautifulSoup4, Requests | โ‰ฅ4.12 / โ‰ฅ2.32 | +| **Configuration** | Pydantic Settings | โ‰ฅ2.0 | +| **Logging** | structlog | โ‰ฅ24.0 | +| **Database** | SQLite3 | Built-in | +| **Reports** | ReportLab (PDF) | โ‰ฅ4.4.9 | +| **Package Manager** | Poetry | 2.0+ | --- ## Data Storage ### Pickle Cache -- Location: `data/historic_data/{market}/{index}_data_YYMMDD.pkl` -- Contains: `{tickers, price_data, company_info, sectors, industries, currencies}` +- **Location:** `data/historic_data/{market}/{index}_data_YYMMDD.pkl` +- **Contents:** `{tickers, price_data, company_info, sectors, industries, currencies}` -### SQLite Database (Optional) +### SQLite Database (Optional โ€” `--db-path`) ```sql --- Tables price_data(symbol, date, open, high, low, close, adj_close, volume, dividends, splits) company_info(symbol, info_json) ``` ### Output Files -- Charts: `data/processed_data/{screener_name}/*.svg` -- CSV Reports: `data/processed_data/screener_{name}/*.csv` -- Logs: `logs/project_alpha_{market}.log` +- **Charts:** `data/processed_data/{screener_name}/*.svg` +- **CSV Reports:** `data/processed_data/screener_{name}/*.csv` +- **Backtest Reports:** `data/processed_data/backtest_report.html` +- **Logs:** `logs/project_alpha_{market}.log` + +--- + +## Testing + +Tests are organized into unit and integration suites: + +| Suite | Coverage | +|-------|----------| +| **Unit** (15 modules) | Backtesting, walk-forward, screeners, consensus, filters, risk manager, transaction costs, regime detection, config, database, download, models, provider chain, data repair, breakout confirmation | +| **Integration** (2 modules) | Download pipeline, end-to-end screening pipeline | + +Run tests with: +```bash +pytest tests/ -v --cov=src +``` + +--- + +## Deployment + +### Docker +```bash +docker compose up --build +``` + +### Cron Scheduling +```bash +# US market scan (daily at 4:30 PM ET) +30 16 * * 1-5 /path/to/run_us_stock_scanner.sh + +# India market scan (daily at 3:45 PM IST) +45 15 * * 1-5 /path/to/run_india_stock_scanner.sh +``` + +### Environment Variables +All settings can be overridden with `PA_` prefix: +```bash +export PA_MARKET=us +export PA_FINNHUB_API_KEY=your_key +export PA_POLYGON_API_KEY=your_key +export PA_RISK_PER_TRADE=0.02 +``` diff --git a/docs/data_providers.md b/docs/data_providers.md new file mode 100644 index 0000000..dceda72 --- /dev/null +++ b/docs/data_providers.md @@ -0,0 +1,64 @@ +# Data Provider Chain + +The Data Provider Chain is a new abstraction layer designed to unify market data fetching from multiple sources (YFinance, Polygon.io, etc.). It ensures that regardless of the source, the application receives data in a consistent format. + +## Architecture + +The core is the abstract base class `DataProvider` defined in `src/classes/data/provider_chain.py`. All specific providers inherit from this class and must implement: + +1. `fetch_data(ticker, start_date, end_date) -> pd.DataFrame` +2. `check_health() -> bool` + +### Standardized Output + +All providers return a pandas DataFrame with the following columns: +- `Open` (float) +- `High` (float) +- `Low` (float) +- `Close` (float) +- `Volume` (int/float) + +The index is always a `DatetimeIndex` sorted in ascending order. + +## Supported Providers + +### 1. YFinanceProvider (`yfinance`) - Default +- **Source**: Yahoo Finance (via `yfinance` library). +- **Pros**: Free, extensive coverage, no API key required. +- **Cons**: Rate limits can be unpredictable, data quality varies. +- **Health Check**: Fetches 1 day of historical data for `SPY`. + +### 2. PolygonProvider (`polygon-api-client`) +- **Source**: Polygon.io API. +- **Requires**: API Key (set via `PA_POLYGON_API_KEY` or `Settings`). +- **Pros**: High quality, official API, faster. +- **Cons**: Free tier has rate limits (5 calls/min). +- **Health Check**: Fetches the last trade for `SPY`. + +## Usage Example + +```python +from src.classes.data.provider_chain import YFinanceProvider, PolygonProvider +from datetime import datetime, timedelta + +# Initialize provider +# provider = YFinanceProvider() +provider = PolygonProvider(api_key="YOUR_KEY") + +# Check health +if provider.check_health(): + print("Provider is healthy") + +# Fetch data +start = datetime.now() - timedelta(days=30) +end = datetime.now() +df = provider.fetch_data("AAPL", start, end) + +print(df.head()) +``` + +## Future Improvements + +- **AlphaVantage Support**: Add `AlphaVantageProvider`. +- **Failover Logic**: Implement a `ChainProvider` that tries providers in sequence (e.g., Polygon -> YFinance). +- **Rate Limiting**: Add strict rate limiting decorators to respect API tier limits. diff --git a/docs/deployment_guide.md b/docs/deployment_guide.md new file mode 100644 index 0000000..6520366 --- /dev/null +++ b/docs/deployment_guide.md @@ -0,0 +1,145 @@ +# Deployment Guide: Project Alpha (Rocky Linux 9) + +This guide details how to deploy Project Alpha to a production Linux server (VPS/VM) using Docker Compose on **Rocky Linux 9**. + +## 1. Prerequisites +- **Server**: A Linux VPS running Rocky Linux 9 (or similar RHEL-based distro). + - Minimum specs: 2 vCPU, 4GB RAM (due to ML models). +- **Domain (Optional)**: If you plan to expose the application via HTTP. +- **SSH Key**: For secure server access. + +## 2. Server Provisioning & Security +Before deploying the application, secure the server. + +### 2.1 Login and Update +```bash +ssh root@your_server_ip +dnf upgrade -y +``` + +### 2.2 Create a Non-Root User +Do not run the application as root. We will create a user named `deploy` and add them to the `wheel` group for sudo access. +```bash +adduser deploy +passwd deploy # Set a password for the deploy user +usermod -aG wheel deploy +su - deploy +``` + +### 2.3 Configure Firewall (firewalld) +Rocky Linux uses `firewalld` by default. Allow only SSH (22). +```bash +sudo systemctl start firewalld +sudo systemctl enable firewalld +sudo firewall-cmd --permanent --add-service=ssh +# If you plan to add a web interface later, uncomment the following: +# sudo firewall-cmd --permanent --add-service=http +# sudo firewall-cmd --permanent --add-service=https +sudo firewall-cmd --reload +``` + +## 3. Environment Setup + +### 3.1 Install Git and Docker +1. **Install Git and dependencies:** + ```bash + sudo dnf install -y git dnf-plugins-core + ``` + +2. **Add Docker Repository:** + ```bash + sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + ``` + +3. **Install Docker Engine & Compose:** + ```bash + sudo dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + ``` + +4. **Start and Enable Docker:** + ```bash + sudo systemctl start docker + sudo systemctl enable docker + ``` + +5. **Allow 'deploy' user to use Docker without sudo:** + ```bash + sudo usermod -aG docker $USER + # Log out and back in for this to take effect + exit + ssh deploy@your_server_ip + ``` + +## 4. Application Deployment + +### 4.1 Clone Repository +```bash +git clone https://github.com/yourusername/project_alpha.git +cd project_alpha +``` + +### 4.2 Configure Secrets +Create the production environment file. +```bash +cp .env.example .env +nano .env +``` +**Important:** +- Set `FINNHUB_API_KEY` to your production key. +- Ensure `DATA_DIR` and `LOG_DIR` are correct (default `./data`, `./logs`). + +### 4.3 Build and Run +```bash +# Build the production image +docker compose build + +# Start the service in the background +docker compose up -d + +# Check status +docker compose ps +``` + +## 5. Operations & Maintenance + +### 5.1 Viewing Logs +```bash +# Follow logs in real-time +docker compose logs -f +``` + +### 5.2 Updating the Application +When you have pushed code changes to Git: +```bash +git pull origin main +docker compose build +docker compose up -d +``` +Docker Compose will recreate only the containers that have changed. + +### 5.3 Automated Tasks +The application currently runs via CLI. To schedule periodic runs (e.g., daily scan at 6 AM), use `cron`. + +1. **Ensure Cron Service is Running:** + ```bash + sudo dnf install -y cronie + sudo systemctl enable --now crond + ``` + +2. **Edit Crontab:** + ```bash + crontab -e + ``` + +3. **Add Job:** + ```cron + # Run daily US market scan at 6:00 AM UTC + 0 6 * * * cd /home/deploy/project_alpha && /usr/bin/docker compose run --rm app --market us --top 20 + ``` + +### 5.4 Backups +The `data/` directory contains your SQLite database and cache. Backup this directory regularly. +```bash +# Example manual backup +tar -czvf project_alpha_backup_$(date +%F).tar.gz data/ +``` diff --git a/docs/implementation_roadmap.md b/docs/implementation_roadmap.md index 8340ab0..46a617c 100644 --- a/docs/implementation_roadmap.md +++ b/docs/implementation_roadmap.md @@ -134,8 +134,8 @@ tests/ ### TODO Checklist -- [ ] **1.1.1** Create `tests/conftest.py` with shared pytest fixtures -- [ ] **1.1.2** Create `tests/fixtures/sample_data.py` โ€” generate synthetic OHLCV DataFrames for known patterns +- [x] **1.1.1** Create `tests/conftest.py` with shared pytest fixtures +- [x] **1.1.2** Create `tests/fixtures/sample_data.py` โ€” generate synthetic OHLCV DataFrames for known patterns ```python def make_uptrend(days=60, start_price=100, daily_return=0.005): """Generate synthetic uptrend data for testing.""" @@ -144,17 +144,17 @@ tests/ def make_bear_market(days=60, start_price=100, daily_return=-0.008): """Generate synthetic downtrend data.""" ``` -- [ ] **1.1.3** Write `test_database_manager.py` โ€” test CRUD, duplicate handling, empty DataFrames -- [ ] **1.1.4** Write `test_download.py` โ€” mock `yfinance.download()`, test retry logic, missing data handling -- [ ] **1.1.5** Write `test_models.py` โ€” test model I/O (save/load), verify loss decreases during training -- [ ] **1.1.6** Write `test_breakout.py` โ€” verify signal fires only when all 5 conditions met -- [ ] **1.1.7** Write `test_trendline.py` โ€” validate angle thresholds map to correct trend labels -- [ ] **1.1.8** Write `test_macd.py` โ€” verify crossover detection in synthetic data -- [ ] **1.1.9** Write `test_donchian.py` โ€” verify channel boundary signals -- [ ] **1.1.10** Write `test_moving_average.py` โ€” verify all 5 sub-strategies -- [ ] **1.1.11** Write `test_pipeline.py` โ€” integration test for full end-to-end workflow -- [ ] **1.1.12** Write `test_data_consistency.py` โ€” verify DB โ†” Pickle produce identical results -- [ ] **1.1.13** Add `pytest.ini` or `pyproject.toml` pytest config with coverage thresholds +- [x] **1.1.3** Write `test_database_manager.py` โ€” test CRUD, duplicate handling, empty DataFrames +- [x] **1.1.4** Write `test_download.py` โ€” mock `yfinance.download()`, test retry logic, missing data handling +- [x] **1.1.5** Write `test_models.py` โ€” test model I/O (save/load), verify loss decreases during training +- [x] **1.1.6** Write `test_breakout.py` โ€” verify signal fires only when all 5 conditions met +- [x] **1.1.7** Write `test_trendline.py` โ€” validate angle thresholds map to correct trend labels +- [x] **1.1.8** Write `test_macd.py` โ€” verify crossover detection in synthetic data +- [x] **1.1.9** Write `test_donchian.py` โ€” verify channel boundary signals +- [x] **1.1.10** Write `test_moving_average.py` โ€” verify all 5 sub-strategies +- [x] **1.1.11** Write `test_pipeline.py` โ€” integration test for full end-to-end workflow +- [x] **1.1.12** Write `test_data_consistency.py` โ€” verify DB โ†” Pickle produce identical results +- [x] **1.1.13** Add `pytest.ini` or `pyproject.toml` pytest config with coverage thresholds ```ini [tool.pytest.ini_options] minversion = "8.0" @@ -166,7 +166,7 @@ tests/ [tool.coverage.report] fail_under = 80 ``` -- [ ] **1.1.14** Verify `pytest --cov` shows โ‰ฅ 80% coverage across core modules +- [x] **1.1.14** Verify `pytest --cov` shows โ‰ฅ 80% coverage across core modules ### Research Notes @@ -190,7 +190,7 @@ src/config/ ### TODO Checklist -- [ ] **1.2.1** Create `src/config/settings.py` with Pydantic `BaseSettings` +- [x] **1.2.1** Create `src/config/settings.py` with Pydantic `BaseSettings` ```python from pydantic_settings import BaseSettings from pydantic import Field @@ -233,13 +233,13 @@ src/config/ model_config = {"env_file": ".env", "env_prefix": "PA_"} ``` -- [ ] **1.2.2** Create `.env.example` documenting all environment variables -- [ ] **1.2.3** Refactor `VolatileConfig` to source defaults from `Settings` -- [ ] **1.2.4** Refactor each screener's `__init__` to accept parameters from `Settings` -- [ ] **1.2.5** Refactor `project_alpha.py` CLI arguments to use `Settings` as defaults (CLI overrides env) -- [ ] **1.2.6** Create `defaults.yaml` for non-sensitive defaults -- [ ] **1.2.7** Write `tests/unit/test_config.py` โ€” test env loading, overrides, validation -- [ ] **1.2.8** Add `.env` to `.gitignore` +- [x] **1.2.2** Create `.env.example` documenting all environment variables +- [x] **1.2.3** Refactor `VolatileConfig` to source defaults from `Settings` +- [x] **1.2.4** Refactor each screener's `__init__` to accept parameters from `Settings` +- [x] **1.2.5** Refactor `project_alpha.py` CLI arguments to use `Settings` as defaults (CLI overrides env) +- [x] **1.2.6** Create `defaults.yaml` for non-sensitive defaults +- [x] **1.2.7** Write `tests/unit/test_config.py` โ€” test env loading, overrides, validation +- [x] **1.2.8** Add `.env` to `.gitignore` ### Research Notes @@ -261,7 +261,7 @@ src/exceptions.py # Custom exception hierarchy ### TODO Checklist -- [ ] **1.3.1** Create `src/logging_config.py` +- [x] **1.3.1** Create `src/logging_config.py` ```python import structlog import logging @@ -280,7 +280,7 @@ src/exceptions.py # Custom exception hierarchy structlog.configure(processors=processors) ``` -- [ ] **1.3.2** Create `src/exceptions.py` with custom exception hierarchy +- [x] **1.3.2** Create `src/exceptions.py` with custom exception hierarchy ```python class ProjectAlphaError(Exception): """Base exception for all application errors.""" @@ -300,13 +300,13 @@ src/exceptions.py # Custom exception hierarchy class DataValidationError(ProjectAlphaError): """Input data failed validation.""" ``` -- [ ] **1.3.3** Replace `print()` calls in `Download.py` with `logger.info()`/`logger.error()` -- [ ] **1.3.4** Replace `print()` calls in `Volatile.py` with structured log calls -- [ ] **1.3.5** Replace `print()` calls in `project_alpha.py` with structured log calls -- [ ] **1.3.6** Replace string error returns (e.g., `return "Error: ..."`) with proper exception raising -- [ ] **1.3.7** Add `try/except` blocks in screener batch execution with `logger.exception()` -- [ ] **1.3.8** Add `--log-level` and `--json-logs` CLI options -- [ ] **1.3.9** Write tests verifying log output format and exception propagation +- [x] **1.3.3** Replace `print()` calls in `Download.py` with `logger.info()`/`logger.error()` +- [x] **1.3.4** Replace `print()` calls in `Volatile.py` with structured log calls +- [x] **1.3.5** Replace `print()` calls in `project_alpha.py` with structured log calls +- [x] **1.3.6** Replace string error returns (e.g., `return "Error: ..."`) with proper exception raising +- [x] **1.3.7** Add `try/except` blocks in screener batch execution with `logger.exception()` +- [x] **1.3.8** Add `--log-level` and `--json-logs` CLI options +- [x] **1.3.9** Write tests verifying log output format and exception propagation ### Research Notes @@ -329,8 +329,8 @@ src/classes/risk/ ### TODO Checklist -- [ ] **1.4.1** Create `src/classes/risk/__init__.py` and `risk_manager.py` -- [ ] **1.4.2** Implement `RiskConfig` dataclass sourcing defaults from `Settings` (ยง1.2) +- [x] **1.4.1** Create `src/classes/risk/__init__.py` and `risk_manager.py` +- [x] **1.4.2** Implement `RiskConfig` dataclass sourcing defaults from `Settings` (ยง1.2) ```python @dataclass class RiskConfig: @@ -341,12 +341,12 @@ src/classes/risk/ max_open_positions: int = 10 trailing_stop: bool = True ``` -- [ ] **1.4.3** Implement ATR-based stop-loss: `stop = entry - (multiplier ร— ATR)` -- [ ] **1.4.4** Implement fixed-risk position sizing: `shares = (portfolio ร— risk%) / (entry - stop)` -- [ ] **1.4.5** Implement trailing stop logic (ratchets up only, never down) -- [ ] **1.4.6** Implement portfolio-level exposure limits (max positions, sector concentration cap) -- [ ] **1.4.7** Add `--risk-per-trade`, `--atr-multiplier`, `--max-positions` CLI options -- [ ] **1.4.8** Create `transaction_costs.py` +- [x] **1.4.3** Implement ATR-based stop-loss: `stop = entry - (multiplier ร— ATR)` +- [x] **1.4.4** Implement fixed-risk position sizing: `shares = (portfolio ร— risk%) / (entry - stop)` +- [x] **1.4.5** Implement trailing stop logic (ratchets up only, never down) +- [x] **1.4.6** Implement portfolio-level exposure limits (max positions, sector concentration cap) +- [x] **1.4.7** Add `--risk-per-trade`, `--atr-multiplier`, `--max-positions` CLI options +- [x] **1.4.8** Create `transaction_costs.py` ```python @dataclass class TransactionCosts: @@ -361,7 +361,7 @@ src/classes/risk/ @classmethod def india_default(cls): return cls(commission_per_trade=20.0, slippage_bps=10.0, spread_bps=5.0) ``` -- [ ] **1.4.9** Write `tests/unit/test_risk_manager.py` and `test_transaction_costs.py` +- [x] **1.4.9** Write `tests/unit/test_risk_manager.py` and `test_transaction_costs.py` ### Research Notes @@ -384,7 +384,7 @@ src/classes/backtesting/ ### TODO Checklist -- [ ] **1.5.1** Create `adapter.py` โ€” `ScreenerSignalAdapter` +- [x] **1.5.1** Create `adapter.py` โ€” `ScreenerSignalAdapter` ```python class ScreenerSignalAdapter: """Runs any BaseScreener over historical data to produce a signal array.""" @@ -392,7 +392,7 @@ src/classes/backtesting/ def compute_signals(self, df: pd.DataFrame) -> np.ndarray: """Walk through df in rolling windows, call screener.screen(), return +1/0/-1.""" ``` -- [ ] **1.5.2** Create `engine.py` โ€” `ProjectAlphaStrategy(Strategy)` +- [x] **1.5.2** Create `engine.py` โ€” `ProjectAlphaStrategy(Strategy)` ```python from backtesting import Strategy, Backtest @@ -413,16 +413,16 @@ src/classes/backtesting/ elif self.signal[-1] == -1 and self.position: self.position.close() ``` -- [ ] **1.5.3** Integrate `TransactionCosts` โ€” apply per-trade cost deduction -- [ ] **1.5.4** Implement `run_backtest(screener, ticker, data, config) -> BacktestResult` -- [ ] **1.5.5** Implement `run_batch_backtest(screener, tickers, data, config) -> BatchBacktestResult` -- [ ] **1.5.6** Create `performance.py` โ€” compute Sharpe, Sortino, max drawdown, win rate, profit factor, CAGR -- [ ] **1.5.7** Integrate pyfolio tear sheet generation (drawdown plot, rolling Sharpe, monthly returns) -- [ ] **1.5.8** Generate interactive HTML backtest report via `bt.plot()` -- [ ] **1.5.9** Output per-trade P&L CSV -- [ ] **1.5.10** Integrate into email/PDF pipeline -- [ ] **1.5.11** Add `--backtest`, `--initial-capital`, `--benchmark SPY` CLI options -- [ ] **1.5.12** Write integration tests using known historical data +- [x] **1.5.3** Integrate `TransactionCosts` โ€” apply per-trade cost deduction +- [x] **1.5.4** Implement `run_backtest(screener, ticker, data, config) -> BacktestResult` +- [x] **1.5.5** Implement `run_batch_backtest(screener, tickers, data, config) -> BatchBacktestResult` +- [x] **1.5.6** Create `performance.py` โ€” compute Sharpe, Sortino, max drawdown, win rate, profit factor, CAGR +- [x] **1.5.7** Integrate pyfolio tear sheet generation (drawdown plot, rolling Sharpe, monthly returns) +- [x] **1.5.8** Generate interactive HTML backtest report via `bt.plot()` +- [x] **1.5.9** Output per-trade P&L CSV +- [x] **1.5.10** Integrate into email/PDF pipeline +- [x] **1.5.11** Add `--backtest`, `--initial-capital`, `--benchmark SPY` CLI options +- [x] **1.5.12** Write integration tests using known historical data ### Research Notes @@ -434,12 +434,12 @@ src/classes/backtesting/ ### Phase 1 Test Plan -- [ ] **1.T.1** `pytest tests/unit/ --cov=src --cov-report=html` achieves โ‰ฅ 80% coverage -- [ ] **1.T.2** Run backtest on 1 year AAPL with BreakoutScreener โ€” verify HTML report generates -- [ ] **1.T.3** Verify `Settings` loads from `.env` and CLI overrides work -- [ ] **1.T.4** Verify structured logs output valid JSON with `--json-logs` -- [ ] **1.T.5** Smoke test: `python src/project_alpha.py --market us --backtest --top 5` -- [ ] **1.T.6** Verify all pre-existing tests still pass (no regressions) +- [x] **1.T.1** `pytest tests/unit/ --cov=src --cov-report=html` achieves โ‰ฅ 80% coverage +- [x] **1.T.2** Run backtest on 1 year AAPL with BreakoutScreener โ€” verify HTML report generates +- [x] **1.T.3** Verify `Settings` loads from `.env` and CLI overrides work +- [x] **1.T.4** Verify structured logs output valid JSON with `--json-logs` +- [x] **1.T.5** Smoke test: `python src/project_alpha.py --market us --backtest --top 5` +- [x] **1.T.6** Verify all pre-existing tests still pass (no regressions) --- @@ -464,7 +464,7 @@ src/classes/data/ ### TODO Checklist -- [ ] **2.1.1** Create `validators.py` with OHLCV validation functions +- [x] **2.1.1** Create `validators.py` with OHLCV validation functions ```python from src.exceptions import DataValidationError @@ -487,7 +487,7 @@ src/classes/data/ raise DataValidationError(f"{ticker}: {'; '.join(errors)}") return df ``` -- [ ] **2.1.2** Create `schemas.py` with Pydantic model for price rows +- [x] **2.1.2** Create `schemas.py` with Pydantic model for price rows ```python from pydantic import BaseModel, validator @@ -505,10 +505,10 @@ src/classes/data/ raise ValueError("high must be >= low") return v ``` -- [ ] **2.1.3** Integrate `validate_ohlcv()` into `Download.py` after data fetch -- [ ] **2.1.4** Add auto-repair for common issues: fill missing dates, interpolate gaps โ‰ค 3 days -- [ ] **2.1.5** Log validation warnings using structured logging (ยง1.3) -- [ ] **2.1.6** Write tests with intentionally malformed data +- [x] **2.1.3** Integrate `validate_ohlcv()` into `Download.py` after data fetch +- [x] **2.1.4** Add auto-repair for common issues: fill missing dates, interpolate gaps โ‰ค 3 days +- [x] **2.1.5** Log validation warnings using structured logging (ยง1.3) +- [x] **2.1.6** Write tests with intentionally malformed data --- @@ -518,14 +518,14 @@ src/classes/data/ ### TODO Checklist -- [ ] **2.2.1** Add ADX calculation: `pandas_ta.adx(high, low, close, length=14)` -- [ ] **2.2.2** Add ATR expansion check: `atr_current > 1.5 ร— atr_20d_mean` -- [ ] **2.2.3** Add configurable thresholds: `adx_min=20`, `atr_expansion_factor=1.5` (sourced from Settings) -- [ ] **2.2.4** Only emit BUY if existing conditions AND confirmation filters pass -- [ ] **2.2.5** Add trendline direction filter: suppress breakouts opposite to prevailing trend -- [ ] **2.2.6** Add optional TTM Squeeze detection (Bollinger inside Keltner) -- [ ] **2.2.7** Update tests to cover confirmation logic -- [ ] **2.2.8** Add edge case tests (ADX at threshold, ATR at expansion ratio) +- [x] **2.2.1** Add ADX calculation: `pandas_ta.adx(high, low, close, length=14)` +- [x] **2.2.2** Add ATR expansion check: `atr_current > 1.5 ร— atr_20d_mean` +- [x] **2.2.3** Add configurable thresholds: `adx_min=20`, `atr_expansion_factor=1.5` (sourced from Settings) +- [x] **2.2.4** Only emit BUY if existing conditions AND confirmation filters pass +- [x] **2.2.5** Add trendline direction filter: suppress breakouts opposite to prevailing trend +- [x] **2.2.6** Add optional TTM Squeeze detection (Bollinger inside Keltner) +- [x] **2.2.7** Update tests to cover confirmation logic +- [x] **2.2.8** Add edge case tests (ADX at threshold, ATR at expansion ratio) ### Research Notes @@ -547,7 +547,7 @@ src/classes/filters/ ### TODO Checklist -- [ ] **2.3.1** Create `fundamental_filter.py` with Finnhub provider +- [x] **2.3.1** Create `fundamental_filter.py` with Finnhub provider ```python import finnhub @@ -560,8 +560,8 @@ src/classes/filters/ # Suppress BUY if: net income declining AND debt/equity > 2.0 ... ``` -- [ ] **2.3.2** Implement FundamentalAnalysis library as fallback provider -- [ ] **2.3.3** Define suppression rules: +- [x] **2.3.2** Implement FundamentalAnalysis library as fallback provider +- [x] **2.3.3** Define suppression rules: | Signal Type | Suppress When | |-------------|---------------| @@ -569,10 +569,10 @@ src/classes/filters/ | BUY (Momentum) | Revenue declining 2+ consecutive quarters | | BUY (Breakout) | P/E > 100 (speculative) | -- [ ] **2.3.4** Add 24-hour caching layer (fundamentals update quarterly) -- [ ] **2.3.5** Add `--fundamental-filter` and `--finnhub-api-key` CLI options (key from Settings/env) -- [ ] **2.3.6** Graceful fallback when API key not provided (skip filter, log warning) -- [ ] **2.3.7** Write tests with mocked API responses +- [x] **2.3.4** Add 24-hour caching layer (fundamentals update quarterly) +- [x] **2.3.5** Add `--fundamental-filter` and `--finnhub-api-key` CLI options (key from Settings/env) +- [x] **2.3.6** Graceful fallback when API key not provided (skip filter, log warning) +- [x] **2.3.7** Write tests with mocked API responses ### Research Notes @@ -587,7 +587,7 @@ src/classes/filters/ ### TODO Checklist -- [ ] **2.4.1** Create `sentiment_filter.py` with FinBERT pipeline +- [x] **2.4.1** Create `sentiment_filter.py` with FinBERT pipeline ```python from transformers import pipeline @@ -600,12 +600,12 @@ src/classes/filters/ # Weighted average: positive=+1, neutral=0, negative=-1 ... ``` -- [ ] **2.4.2** Implement headline fetching from `yfinance` (primary) and Finnhub (fallback) -- [ ] **2.4.3** Implement suppression: suppress BUY if aggregate sentiment < โˆ’0.5 -- [ ] **2.4.4** Add model caching โ€” download FinBERT once (~420 MB), reuse locally -- [ ] **2.4.5** Add `--sentiment-filter` CLI flag (disabled by default) -- [ ] **2.4.6** Handle no-headlines case gracefully (skip filter) -- [ ] **2.4.7** Write tests with fixture headlines +- [x] **2.4.2** Implement headline fetching from `yfinance` (primary) and Finnhub (fallback) +- [x] **2.4.3** Implement suppression: suppress BUY if aggregate sentiment < โˆ’0.5 +- [x] **2.4.4** Add model caching โ€” download FinBERT once (~420 MB), reuse locally +- [x] **2.4.5** Add `--sentiment-filter` CLI flag (disabled by default) +- [x] **2.4.6** Handle no-headlines case gracefully (skip filter) +- [x] **2.4.7** Write tests with fixture headlines ### Research Notes @@ -620,7 +620,7 @@ src/classes/filters/ ### TODO Checklist -- [ ] **2.5.1** Create `ConsensusScorer` class +- [x] **2.5.1** Create `ConsensusScorer` class ```python WEIGHTS = { "volatility": 0.30, "breakout": 0.20, "trendline": 0.15, @@ -628,23 +628,23 @@ src/classes/filters/ "fundamental": 0.05, } ``` -- [ ] **2.5.2** Implement weighted scoring: `score = ฮฃ(weight ร— signal_value ร— confidence)` -- [ ] **2.5.3** Implement agreement detection (all screeners agree โ†’ high conviction) -- [ ] **2.5.4** Implement contradiction detection (screeners disagree โ†’ HOLD) -- [ ] **2.5.5** Output consensus ranking with per-screener breakdown -- [ ] **2.5.6** Add `--consensus` flag, consensus column in CSV, consensus overlay on charts -- [ ] **2.5.7** Write tests covering agreement, contradiction, missing screeners +- [x] **2.5.2** Implement weighted scoring: `score = ฮฃ(weight ร— signal_value ร— confidence)` +- [x] **2.5.3** Implement agreement detection (all screeners agree โ†’ high conviction) +- [x] **2.5.4** Implement contradiction detection (screeners disagree โ†’ HOLD) +- [x] **2.5.5** Output consensus ranking with per-screener breakdown +- [x] **2.5.6** Add `--consensus` flag, consensus column in CSV, consensus overlay on charts +- [x] **2.5.7** Write tests covering agreement, contradiction, missing screeners --- ### Phase 2 Test Plan -- [ ] **2.T.1** Validation catches malformed OHLCV data and raises `DataValidationError` -- [ ] **2.T.2** Breakout confirmation reduces false signals by โ‰ฅ 30% in historical data -- [ ] **2.T.3** Fundamental filter correctly suppresses BUY for stocks with negative earnings -- [ ] **2.T.4** FinBERT classifies sample financial headlines correctly -- [ ] **2.T.5** Consensus scoring arithmetic is correct -- [ ] **2.T.6** End-to-end: `--consensus --fundamental-filter` produces valid output +- [x] **2.T.1** Validation catches malformed OHLCV data and raises `DataValidationError` +- [x] **2.T.2** Breakout confirmation reduces false signals by โ‰ฅ 30% in historical data +- [x] **2.T.3** Fundamental filter correctly suppresses BUY for stocks with negative earnings +- [x] **2.T.4** FinBERT classifies sample financial headlines correctly +- [x] **2.T.5** Consensus scoring arithmetic is correct +- [x] **2.T.6** End-to-end: `--consensus --fundamental-filter` produces valid output --- diff --git a/docs/remaining_tasks.md b/docs/remaining_tasks.md new file mode 100644 index 0000000..9ce601a --- /dev/null +++ b/docs/remaining_tasks.md @@ -0,0 +1,103 @@ +# Remaining Tasks (Phase 3: Infrastructure & Production) + +These tasks are extracted from `docs/implementation_roadmap.md` and represent the outstanding work required to complete the project. + +## ยง3.1 Data Provider Chain +- [ ] **3.1.1** Create abstract `DataProvider` base class +- [ ] **3.1.2** Implement `YFinanceProvider` โ€” wrap existing `Download.py` logic +- [ ] **3.1.3** Implement `PolygonProvider` +- [ ] **3.1.4** Implement `AlphaVantageProvider` +- [ ] **3.1.5** Implement column normalisation โ€” all providers return `Open, High, Low, Close, Volume` DatetimeIndex +- [ ] **3.1.6** Integrate `validate_ohlcv()` from ยง2.1 into the provider chain +- [ ] **3.1.7** Add rate limiting per provider (Polygon: 5/min, Alpha Vantage: 25/day) +- [ ] **3.1.8** Replace direct yfinance calls in `Download.py` with `DataProviderChain` +- [ ] **3.1.9** Add `--data-provider` and API key CLI options (keys from Settings) +- [ ] **3.1.10** Add provider health check on startup +- [ ] **3.1.11** Write integration tests with mocked provider responses + +## ยง3.2 Market Regime Detection +- [ ] **3.2.1** Create `RegimeDetector` with 3-state `GaussianHMM` +- [ ] **3.2.2** Implement feature engineering: log returns + 20-day rolling volatility +- [ ] **3.2.3** Classify states by mean return: Bull (highest) / Bear (lowest) / Neutral +- [ ] **3.2.4** Implement regime-based signal adjustment +- [ ] **3.2.5** Add `--regime-detection` and `--regime-index SPY` options +- [ ] **3.2.6** Generate regime overlay on charts (colour periods by regime) +- [ ] **3.2.7** Write tests with synthetic bull/bear/sideways data +- [ ] **3.2.8** Validate on historical S&P 500 (should detect 2020 crash, 2022 bear) + +## ยง3.3 Walk-Forward Validation +- [ ] **3.3.1** Create anchored expanding-window walk-forward validator +- [ ] **3.3.2** Implement window generation (training starts fixed, end expands, test slides forward) +- [ ] **3.3.3** Reuse `engine.py` from Phase 1 for per-window backtest +- [ ] **3.3.4** Aggregate out-of-sample metrics across all windows +- [ ] **3.3.5** Implement overfitting detection: OOS/IS Sharpe ratio < 0.5 = overfit warning +- [ ] **3.3.6** Add `--walk-forward`, `--wf-train-months`, `--wf-test-months` options +- [ ] **3.3.7** Generate walk-forward report: per-window performance + aggregate metrics +- [ ] **3.3.8** Write tests verifying window arithmetic (no overlaps, no gaps) + +## ยง3.4 Volatility Model Improvements +- [ ] **3.4.1** Warm-start default: reduce steps when `--load-model` is used (50000 โ†’ 10000) +- [ ] **3.4.2** Add `--polynomial-order` CLI option to experiment with lower correlation orders +- [ ] **3.4.3** Add validation loss tracking: 80/20 time-series split, log hold-out loss +- [ ] **3.4.4** Log training time and convergence metrics to structured log +- [ ] **3.4.5** Add GPU detection and auto-placement with `tf.config.list_physical_devices('GPU')` + +## ยง3.5 Monitoring & Observability +- [ ] **3.5.1** Create `src/monitoring/metrics.py` with Prometheus metrics +- [ ] **3.5.2** Instrument screeners: increment `SIGNALS_TOTAL` on each signal +- [ ] **3.5.3** Instrument data fetching: observe `DOWNLOAD_DURATION` per request +- [ ] **3.5.4** Instrument model training: set `TRAINING_LOSS` per training step +- [ ] **3.5.5** Instrument caching: track hit/miss ratio +- [ ] **3.5.6** Create Prometheus scrape config (`prometheus.yml`) +- [ ] **3.5.7** Create Grafana dashboard: Screener Signals +- [ ] **3.5.8** Create Grafana dashboard: Data Pipeline +- [ ] **3.5.9** Create alerting rules +- [ ] **3.5.10** Expose `/metrics` endpoint in FastAPI (ยง3.6) +- [ ] **3.5.11** Write tests verifying metrics are incremented correctly + +## ยง3.6 API Layer (FastAPI) +- [ ] **3.6.1** Create `main.py` โ€” FastAPI app with CORS, lifespan events, and `/metrics` endpoint +- [ ] **3.6.2** Create `schemas/requests.py` and `responses.py` with Pydantic models +- [ ] **3.6.3** Implement endpoints (`/health`, `/screeners`, `/predictions`, `/backtest`, `/symbols`) +- [ ] **3.6.4** Create `dependencies.py` โ€” DI for Settings, database, data providers +- [ ] **3.6.5** Add request/response logging middleware using structlog (ยง1.3) +- [ ] **3.6.6** Add `/metrics` Prometheus endpoint (ยง3.5) +- [ ] **3.6.7** Add `uvicorn` startup command +- [ ] **3.6.8** Write API integration tests with `TestClient` +- [ ] **3.6.9** Generate OpenAPI spec (`/docs`) and export `openapi.yaml` + +## ยง3.7 Security +- [ ] **3.7.1** Move email credentials from JSON to `.env` / `Settings` (ยง1.2) +- [ ] **3.7.2** Create `auth.py` with API key authentication for FastAPI +- [ ] **3.7.3** Add optional JWT authentication for multi-user scenarios +- [ ] **3.7.4** Create `rate_limiting.py` โ€” rate limit API endpoints +- [ ] **3.7.5** Add input validation on all API endpoints +- [ ] **3.7.6** Add audit logging for sensitive operations +- [ ] **3.7.7** Write security tests + +## ยง3.8 Scalability Foundation +- [ ] **3.8.1** Refactor data fetching to use `asyncio` + `aiohttp` +- [ ] **3.8.2** Refactor screener batch execution to use `concurrent.futures.ProcessPoolExecutor` +- [ ] **3.8.3** Add optional Celery task queue for long-running operations +- [ ] **3.8.4** Add `--async` flag for async data fetching +- [ ] **3.8.5** Add `docker-compose.yml` with Redis for Celery broker +- [ ] **3.8.6** Write tests verifying async download produces same results as sync + +## ยง3.9 Documentation +- [ ] **3.9.1** Set up MkDocs with `mkdocs-material` theme +- [ ] **3.9.2** Write `getting-started/` guides +- [ ] **3.9.3** Write `user-guide/` +- [ ] **3.9.4** Write `developer-guide/` +- [ ] **3.9.5** Export and include OpenAPI spec from FastAPI +- [ ] **3.9.6** Write `deployment/` +- [ ] **3.9.7** Add `mkdocs.yml` + +## Phase 3 Test Plan +- [ ] **3.T.1** Data provider chain fallback +- [ ] **3.T.2** Regime detector classification +- [ ] **3.T.3** Walk-forward integrity +- [ ] **3.T.4** API endpoints +- [ ] **3.T.5** Security mechanisms +- [ ] **3.T.6** Prometheus metrics +- [ ] **3.T.7** Async results +- [ ] **3.T.8** MkDocs build diff --git a/docs/system_architecture.md b/docs/system_architecture.md new file mode 100644 index 0000000..8a85287 --- /dev/null +++ b/docs/system_architecture.md @@ -0,0 +1,93 @@ +# System Architecture + +## Overview +Project Alpha is a modular stock screening and analysis system designed to identify trading opportunities using technical and fundamental indicators. The system allows for flexible screening, backtesting, and automated reporting. + +## High-Level Architecture + +```mermaid +graph TD + User[User / CLI] -->|Arguments| Controller[ProjectAlpha Controller] + + subgraph Data Layer + Download[Data Downloader] + Validate[Data Validator] + Cache[Data Cache / FS] + DB[(Local DB)] + end + + subgraph Analysis Layer + Tech[Technical Screeners] + Fund[Fundamental Filter] + Sent[Sentiment Filter] + Consensus[Consensus Engine] + end + + subgraph Output Layer + Charts[Chart Generator] + Reports[CSV/Console Reports] + end + + Controller --> Download + Download --> Validate + Validate --> Cache + Cache --> Tech + + Tech -->|Candidates| Fund + Tech -->|Candidates| Sent + + Tech --> Consensus + Fund --> Consensus + Sent --> Consensus + + Consensus -->|Ranked Results| Reports + Consensus -->|Top Picks| Charts +``` + +## Core Components + +### 1. Data Layer +Responsible for acquiring, validating, and storing market data. +- **Download**: Fetches OHLCV data from yfinance. +- **Validators**: Enforces data quality (no NaNs, positive prices, volume checks). +- **Repairs**: Automatically fixes minor data arithmetic errors. +- **Cache**: Stores processed data in `data/` directory to minimize API calls. + +### 2. Analysis Layer +The heart of the system, responsible for signal generation. +- **Screeners**: + - `BreakoutScreener`: Detects consolidation breakouts (ADX, ATR). + - `TrendlineScreener`: Identifies strong uptrends using linear regression. + - `VolatilityScreener`: TensorFlow-based volatility categorization. +- **Filters**: + - `FundamentalFilter`: Checks financial health (P/E, Debt/Equity) via Finnhub. + - `SentimentFilter`: Analyzes news headlines using FinBERT (HuggingFace). +- **Consensus Engine**: + - Aggregates signals from all screeners and filters. + - Calculates a weighted confidence score (0.0 - 1.0). + - Applies synergy bonuses for multi-strategy alignment. + +### 3. Output Layer +Presenting results to the user. +- **Console**: Rich text tables and progress bars. +- **Charts**: Matplotlib-based technical charts with indicators. +- **Reports**: CSV exports for offline analysis. + +## Data Flow + +1. **Initialization**: CLI args are parsed; configuration loaded from `settings.py`. +2. **Data Loading**: Market index symbols (e.g., S&P 500) are loaded. +3. **Data Fetching**: Historical data is downloaded and cached. +4. **Validation**: Data integrity is checked; bad data is rejected or repaired. +5. **Screening**: + - Technical screeners identifying candidate stocks. + - Candidates filtered by price/volume. +6. **Refinement**: + - Fundamental and Sentiment analysis run on candidates. + - Results cached to optimize performance. +7. **Consensus**: + - Signals aggregated and scored. + - Stocks ranked by consensus score. +8. **Reporting**: + - Top candidates displayed in console. + - Charts generated for visual verification. diff --git a/poetry.lock b/poetry.lock index ce15576..5b4f1fb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -12,6 +12,65 @@ files = [ {file = "absl_py-2.4.0.tar.gz", hash = "sha256:8c6af82722b35cf71e0f4d1d47dcaebfff286e27110a99fc359349b247dfb5d4"}, ] +[[package]] +name = "annotated-doc" +version = "0.0.4" +description = "Document parameters, class attributes, return types, and variables inline, with Annotated." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320"}, + {file = "annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4"}, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.12.1" +description = "High-level concurrency and networking framework on top of asyncio or Trio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c"}, + {file = "anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703"}, +] + +[package.dependencies] +idna = ">=2.8" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +trio = ["trio (>=0.31.0) ; python_version < \"3.10\"", "trio (>=0.32.0) ; python_version >= \"3.10\""] + +[[package]] +name = "asttokens" +version = "3.0.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a"}, + {file = "asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7"}, +] + +[package.extras] +astroid = ["astroid (>=2,<5)"] +test = ["astroid (>=2,<5)", "pytest (<9.0)", "pytest-cov", "pytest-xdist"] + [[package]] name = "astunparse" version = "1.6.3" @@ -40,6 +99,28 @@ files = [ {file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"}, ] +[[package]] +name = "backtesting" +version = "0.6.5" +description = "Backtest trading strategies in Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "backtesting-0.6.5-py3-none-any.whl", hash = "sha256:8ac2fa500c8fd83dc783b72957b600653a72687986fe3ca86d6ef6c8b8d74363"}, + {file = "backtesting-0.6.5.tar.gz", hash = "sha256:738a1dee28fc53df2eda35ea2f2d1a1c37ddba01df14223fc9e87d80a1efbc2e"}, +] + +[package.dependencies] +bokeh = "==3.1.* || >=3.3.dev0" +numpy = ">=1.17.0" +pandas = ">0.25.0" + +[package.extras] +dev = ["coverage", "flake8", "mypy"] +doc = ["ipykernel", "jupyter-client", "jupytext (>=1.3)", "nbconvert", "pdoc3"] +test = ["ipywidgets", "matplotlib", "sambo", "scikit-learn", "tqdm"] + [[package]] name = "beautifulsoup4" version = "4.14.3" @@ -63,16 +144,106 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "bokeh" +version = "3.8.2" +description = "Interactive plots and applications in the browser from Python" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "bokeh-3.8.2-py3-none-any.whl", hash = "sha256:5e2c0d84f75acb25d60efb9e4d2f434a791c4639b47d685534194c4e07bd0111"}, + {file = "bokeh-3.8.2.tar.gz", hash = "sha256:8e7dcacc21d53905581b54328ad2705954f72f2997f99fc332c1de8da53aa3cc"}, +] + +[package.dependencies] +contourpy = ">=1.2" +Jinja2 = ">=2.9" +narwhals = ">=1.13" +numpy = ">=1.16" +packaging = ">=16.8" +pandas = ">=1.2" +pillow = ">=7.1.0" +PyYAML = ">=3.10" +tornado = {version = ">=6.2", markers = "sys_platform != \"emscripten\""} +xyzservices = ">=2021.09.1" + +[[package]] +name = "bottleneck" +version = "1.6.0" +description = "Fast NumPy array functions written in C" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "bottleneck-1.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:40de6be68218ba32cd15addbf4ad7bbbf0075b5c5c4347c579aeae110a5c9a96"}, + {file = "bottleneck-1.6.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ad1882ba8c8da1f404de2610b45b05291e39eec56150270b03b5b25cf2bbb7f"}, + {file = "bottleneck-1.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f29b14b0ba5a816df6ab559add415c88ea8cf2146364e55f5f4c24ff7c85e494"}, + {file = "bottleneck-1.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17c227ed361cf9a2ab3751a727620298faca9a1e33dd76711ae80834cf34b254"}, + {file = "bottleneck-1.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d278b5633cea38bdae6eaf7df23d54ecb5e4db52f2ebc13fe40c0e738842f2a1"}, + {file = "bottleneck-1.6.0-cp310-cp310-win32.whl", hash = "sha256:26c87c2f6364d82b67eab7218f0346e9c42f336088ca4e19d77dc76eecf272fc"}, + {file = "bottleneck-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:9d33bcd60a13d0603f5db9d953352a3c098242c46f8f919290fd11c54b42b9e5"}, + {file = "bottleneck-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef4514782afe39db2497aaea93b1c167ab7ab3bc5e3930500ef9cf11841db7"}, + {file = "bottleneck-1.6.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:727363f99edc6dc83d52ed28224d4cb858c07a01c336c7499c0c2e5dd4fd3e4a"}, + {file = "bottleneck-1.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:847671a9e392220d1dfd2ff2524b4d61ec47b2a36ea78e169d2aa357fd9d933a"}, + {file = "bottleneck-1.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:daef2603ab7b4ec4f032bb54facf5fa92dacd3a264c2fd9677c9fc22bcb5a245"}, + {file = "bottleneck-1.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fc7f09bda980d967f2e9f1a746eda57479f824f66de0b92b9835c431a8c922d4"}, + {file = "bottleneck-1.6.0-cp311-cp311-win32.whl", hash = "sha256:1f78bad13ad190180f73cceb92d22f4101bde3d768f4647030089f704ae7cac7"}, + {file = "bottleneck-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f2adef59fdb9edf2983fe3a4c07e5d1b677c43e5669f4711da2c3daad8321ad"}, + {file = "bottleneck-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3bb16a16a86a655fdbb34df672109a8a227bb5f9c9cf5bb8ae400a639bc52fa3"}, + {file = "bottleneck-1.6.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0fbf5d0787af9aee6cef4db9cdd14975ce24bd02e0cc30155a51411ebe2ff35f"}, + {file = "bottleneck-1.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d08966f4a22384862258940346a72087a6f7cebb19038fbf3a3f6690ee7fd39f"}, + {file = "bottleneck-1.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:604f0b898b43b7bc631c564630e936a8759d2d952641c8b02f71e31dbcd9deaa"}, + {file = "bottleneck-1.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d33720bad761e642abc18eda5f188ff2841191c9f63f9d0c052245decc0faeb9"}, + {file = "bottleneck-1.6.0-cp312-cp312-win32.whl", hash = "sha256:a1e5907ec2714efbe7075d9207b58c22ab6984a59102e4ecd78dced80dab8374"}, + {file = "bottleneck-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:81e3822499f057a917b7d3972ebc631ac63c6bbcc79ad3542a66c4c40634e3a6"}, + {file = "bottleneck-1.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015de414ca016ebe56440bdf5d3d1204085080527a3c51f5b7b7a3e704fe6fd"}, + {file = "bottleneck-1.6.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:456757c9525b0b12356f472e38020ed4b76b18375fd76e055f8d33fb62956f5e"}, + {file = "bottleneck-1.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c65254d51b6063c55f6272f175e867e2078342ae75f74be29d6612e9627b2c0"}, + {file = "bottleneck-1.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a172322895fbb79c6127474f1b0db0866895f0b804a18d5c6b841fea093927fe"}, + {file = "bottleneck-1.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5e81b642eb0d5a5bf00312598d7ed142d389728b694322a118c26813f3d1fa9"}, + {file = "bottleneck-1.6.0-cp313-cp313-win32.whl", hash = "sha256:543d3a89d22880cd322e44caff859af6c0489657bf9897977d1f5d3d3f77299c"}, + {file = "bottleneck-1.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:48a44307d604ceb81e256903e5d57d3adb96a461b1d3c6a69baa2c67e823bd36"}, + {file = "bottleneck-1.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:547e6715115867c4657c9ae8cc5ddac1fec8fdad66690be3a322a7488721b06b"}, + {file = "bottleneck-1.6.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5e4a4a6e05b6f014c307969129e10d1a0afd18f3a2c127b085532a4a76677aef"}, + {file = "bottleneck-1.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2baae0d1589b4a520b2f9cf03528c0c8b20717b3f05675e212ec2200cf628f12"}, + {file = "bottleneck-1.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2e407139b322f01d8d5b6b2e8091b810f48a25c7fa5c678cfcdc420dfe8aea0a"}, + {file = "bottleneck-1.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1adefb89b92aba6de9c6ea871d99bcd29d519f4fb012cc5197917813b4fc2c7f"}, + {file = "bottleneck-1.6.0-cp313-cp313t-win32.whl", hash = "sha256:64b8690393494074923780f6abdf5f5577d844b9d9689725d1575a936e74e5f0"}, + {file = "bottleneck-1.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:cb67247f65dcdf62af947c76c6c8b77d9f0ead442cac0edbaa17850d6da4e48d"}, + {file = "bottleneck-1.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:98f1d789042511a0f042b3bdcd2903e8567e956d3aa3be189cce3746daeb8550"}, + {file = "bottleneck-1.6.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1fad24c99e39ad7623fc2a76d37feb26bd32e4dd170885edf4dbf4bfce2199a3"}, + {file = "bottleneck-1.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643e61e50a6f993debc399b495a1609a55b3bd76b057e433e4089505d9f605c7"}, + {file = "bottleneck-1.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa668efbe4c6b200524ea0ebd537212da9b9801287138016fdf64119d6fcf201"}, + {file = "bottleneck-1.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9f7dd35262e89e28fedd79d45022394b1fa1aceb61d2e747c6d6842e50546daa"}, + {file = "bottleneck-1.6.0-cp314-cp314-win32.whl", hash = "sha256:bd90bec3c470b7fdfafc2fbdcd7a1c55a4e57b5cdad88d40eea5bc9bab759bf1"}, + {file = "bottleneck-1.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:b43b6d36a62ffdedc6368cf9a708e4d0a30d98656c2b5f33d88894e1bcfd6857"}, + {file = "bottleneck-1.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:53296707a8e195b5dcaa804b714bd222b5e446bd93cd496008122277eb43fa87"}, + {file = "bottleneck-1.6.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d6df19cc48a83efd70f6d6874332aa31c3f5ca06a98b782449064abbd564cf0e"}, + {file = "bottleneck-1.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96bb3a52cb3c0aadfedce3106f93ab940a49c9d35cd4ed612e031f6deb27e80f"}, + {file = "bottleneck-1.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d1db9e831b69d5595b12e79aeb04cb02873db35576467c8dd26cdc1ee6b74581"}, + {file = "bottleneck-1.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4dd7ac619570865fcb7a0e8925df418005f076286ad2c702dd0f447231d7a055"}, + {file = "bottleneck-1.6.0-cp314-cp314t-win32.whl", hash = "sha256:7fb694165df95d428fe00b98b9ea7d126ef786c4a4b7d43ae2530248396cadcb"}, + {file = "bottleneck-1.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:174b80930ce82bd8456c67f1abb28a5975c68db49d254783ce2cb6983b4fea40"}, + {file = "bottleneck-1.6.0.tar.gz", hash = "sha256:028d46ee4b025ad9ab4d79924113816f825f62b17b87c9e1d0d8ce144a4a0e31"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +doc = ["gitpython", "numpydoc", "sphinx"] + [[package]] name = "certifi" -version = "2026.1.4" +version = "2025.11.12" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"}, - {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"}, + {file = "certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b"}, + {file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"}, ] [[package]] @@ -344,12 +515,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main"] -markers = "sys_platform == \"win32\" or platform_system == \"Windows\"" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "sys_platform == \"win32\" or platform_system == \"Windows\"", dev = "sys_platform == \"win32\""} [[package]] name = "contourpy" @@ -447,10 +618,9 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" name = "coverage" version = "7.13.3" description = "Code coverage measurement for Python" -optional = true +optional = false python-versions = ">=3.10" -groups = ["main"] -markers = "extra == \"dev\"" +groups = ["main", "dev"] files = [ {file = "coverage-7.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0b4f345f7265cdbdb5ec2521ffff15fa49de6d6c39abf89fc7ad68aa9e3a55f0"}, {file = "coverage-7.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:96c3be8bae9d0333e403cc1a8eb078a7f928b5650bae94a18fb4820cc993fb9b"}, @@ -549,6 +719,60 @@ files = [ [package.extras] toml = ["tomli ; python_full_version <= \"3.11.0a6\""] +[[package]] +name = "cuda-bindings" +version = "12.9.4" +description = "Python bindings for CUDA" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "cuda_bindings-12.9.4-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a022c96b8bd847e8dc0675523431149a4c3e872f440e3002213dbb9e08f0331a"}, + {file = "cuda_bindings-12.9.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d3c842c2a4303b2a580fe955018e31aea30278be19795ae05226235268032e5"}, + {file = "cuda_bindings-12.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:f69107389e6b9948969bfd0a20c4f571fd1aefcfb1d2e1b72cc8ba5ecb7918ab"}, + {file = "cuda_bindings-12.9.4-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6a429dc6c13148ff1e27c44f40a3dd23203823e637b87fd0854205195988306"}, + {file = "cuda_bindings-12.9.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c912a3d9e6b6651853eed8eed96d6800d69c08e94052c292fec3f282c5a817c9"}, + {file = "cuda_bindings-12.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:443b0875916879c2e4c3722941e25e42d5ab9bcbf34c9e83404fb100fa1f6913"}, + {file = "cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:694ba35023846625ef471257e6b5a4bc8af690f961d197d77d34b1d1db393f56"}, + {file = "cuda_bindings-12.9.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fda147a344e8eaeca0c6ff113d2851ffca8f7dfc0a6c932374ee5c47caa649c8"}, + {file = "cuda_bindings-12.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:696ca75d249ddf287d01b9a698b8e2d8a05046495a9c051ca15659dc52d17615"}, + {file = "cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cf8bfaedc238f3b115d957d1fd6562b7e8435ba57f6d0e2f87d0e7149ccb2da5"}, + {file = "cuda_bindings-12.9.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:32bdc5a76906be4c61eb98f546a6786c5773a881f3b166486449b5d141e4a39f"}, + {file = "cuda_bindings-12.9.4-cp313-cp313-win_amd64.whl", hash = "sha256:a2e82c8985948f953c2be51df45c3fe11c812a928fca525154fb9503190b3e64"}, + {file = "cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3adf4958dcf68ae7801a59b73fb00a8b37f8d0595060d66ceae111b1002de38d"}, + {file = "cuda_bindings-12.9.4-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56e0043c457a99ac473ddc926fe0dc4046694d99caef633e92601ab52cbe17eb"}, + {file = "cuda_bindings-12.9.4-cp313-cp313t-win_amd64.whl", hash = "sha256:b32d8b685f0e66f5658bcf4601ef034e89fc2843582886f0a58784a4302da06c"}, + {file = "cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f53a7f453d4b2643d8663d036bafe29b5ba89eb904c133180f295df6dc151e5"}, + {file = "cuda_bindings-12.9.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8b72ee72a9cc1b531db31eebaaee5c69a8ec3500e32c6933f2d3b15297b53686"}, + {file = "cuda_bindings-12.9.4-cp314-cp314-win_amd64.whl", hash = "sha256:53a10c71fdbdb743e0268d07964e5a996dd00b4e43831cbfce9804515d97d575"}, + {file = "cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:20f2699d61d724de3eb3f3369d57e2b245f93085cab44fd37c3bea036cea1a6f"}, + {file = "cuda_bindings-12.9.4-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80bffc357df9988dca279734bc9674c3934a654cab10cadeed27ce17d8635ee"}, + {file = "cuda_bindings-12.9.4-cp314-cp314t-win_amd64.whl", hash = "sha256:53e11991a92ff6f26a0c8a98554cd5d6721c308a6b7bfb08bebac9201e039e43"}, + {file = "cuda_bindings-12.9.4-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:893ca68114b5b769c1d4c02583b91ed22691887c3ed513b59467d23540104db4"}, + {file = "cuda_bindings-12.9.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9866ceec83e39337d1a1d64837864c964ad902992478caa288a0bc1be95f21aa"}, + {file = "cuda_bindings-12.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:37744e721a18a514423e81863f52a4f7f46f5a6f9cccd569f2735f8067f4d8c2"}, +] + +[package.dependencies] +cuda-pathfinder = ">=1.1,<2.0" + +[package.extras] +all = ["nvidia-cuda-nvcc-cu12", "nvidia-cuda-nvrtc-cu12", "nvidia-cufile-cu12 ; sys_platform == \"linux\"", "nvidia-nvjitlink-cu12 (>=12.3)"] +test = ["cython (>=3.1,<3.2)", "numpy (>=1.21.1)", "pyglet (>=2.1.9)", "pytest (>=6.2.4)", "pytest-benchmark (>=3.4.1)", "setuptools (>=77.0.0)"] + +[[package]] +name = "cuda-pathfinder" +version = "1.3.4" +description = "Pathfinder for CUDA components" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "cuda_pathfinder-1.3.4-py3-none-any.whl", hash = "sha256:fb983f6e0d43af27ef486e14d5989b5f904ef45cedf40538bfdcbffa6bb01fb2"}, +] + [[package]] name = "curl-cffi" version = "0.14.0" @@ -648,6 +872,73 @@ numpy = [ ] wrapt = ">=1.11.2" +[[package]] +name = "empyrical-reloaded" +version = "0.5.9" +description = "empyrical computes performance and risk statistics commonly used in quantitative finance" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "empyrical-reloaded-0.5.9.tar.gz", hash = "sha256:9d8163e301bdff411adef5e604547958a3a17122fc1274f6c9ae906f14ee5fb7"}, + {file = "empyrical_reloaded-0.5.9-py3-none-any.whl", hash = "sha256:d1ffd695f2f4685c385c1c70716adbee56a6283ddd077ba589e01c2f82955ec5"}, +] + +[package.dependencies] +bottleneck = ">=1.3.0" +numpy = ">=1.9.2" +pandas = ">=1.0.0" +pandas-datareader = ">=0.4" +scipy = ">=0.15.1" +yfinance = ">=0.1.63" + +[package.extras] +dev = ["black", "flake8 (>=3.9.1)", "pre-commit (>=2.12.1)"] +doc = ["Cython", "Sphinx (>=1.3.2)", "m2r2", "nbsphinx", "numpydoc (>=0.5.0)", "pydata-sphinx-theme", "sphinx-autobuild (>=0.6.0)", "sphinx-copybutton", "sphinx-markdown-tables"] +test = ["black", "flake8 (>=3.9.1)", "pytest (>=6.2.3)", "pytest-cov (>=2.11.1)", "tox (>=2.3.1)"] + +[[package]] +name = "executing" +version = "2.2.1" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017"}, + {file = "executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] + +[[package]] +name = "filelock" +version = "3.24.2" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "filelock-3.24.2-py3-none-any.whl", hash = "sha256:667d7dc0b7d1e1064dd5f8f8e80bdac157a6482e8d2e02cd16fd3b6b33bd6556"}, + {file = "filelock-3.24.2.tar.gz", hash = "sha256:c22803117490f156e59fafce621f0550a7a853e2bbf4f87f112b11d469b6c81b"}, +] + +[[package]] +name = "finnhub-python" +version = "2.4.27" +description = "Finnhub API" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "finnhub_python-2.4.27-py3-none-any.whl", hash = "sha256:2e5bd70ea9c41c077224d9eaebaa2b9c66e0445d4b4d2768fee20a56ce2af07f"}, + {file = "finnhub_python-2.4.27.tar.gz", hash = "sha256:c4402f481005bbb48f72acfa14d7c91e95af1b1c8367ba2f4457eebe4e61b611"}, +] + +[package.dependencies] +requests = ">=2.22.0" + [[package]] name = "flatbuffers" version = "25.12.19" @@ -833,6 +1124,46 @@ files = [ {file = "frozendict-2.4.7.tar.gz", hash = "sha256:e478fb2a1391a56c8a6e10cc97c4a9002b410ecd1ac28c18d780661762e271bd"}, ] +[[package]] +name = "fsspec" +version = "2026.2.0" +description = "File-system specification" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "fsspec-2026.2.0-py3-none-any.whl", hash = "sha256:98de475b5cb3bd66bedd5c4679e87b4fdfe1a3bf4d707b151b3c07e58c9a2437"}, + {file = "fsspec-2026.2.0.tar.gz", hash = "sha256:6544e34b16869f5aacd5b90bdf1a71acb37792ea3ddf6125ee69a22a53fb8bff"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff (>=0.5)"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs (>2024.2.0)", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs (>2024.2.0)", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs (>2024.2.0)"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs (>2024.2.0)"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "backports-zstd ; python_version < \"3.14\"", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas (<3.0.0)", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard ; python_version < \"3.14\""] +tqdm = ["tqdm"] + [[package]] name = "gast" version = "0.7.0" @@ -938,6 +1269,18 @@ typing-extensions = ">=4.12,<5.0" [package.extras] protobuf = ["grpcio-tools (>=1.76.0)"] +[[package]] +name = "h11" +version = "0.16.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + [[package]] name = "h5py" version = "3.15.1" @@ -991,6 +1334,175 @@ files = [ [package.dependencies] numpy = ">=1.21.2" +[[package]] +name = "hf-xet" +version = "1.2.0" +description = "Fast transfer of large files with the Hugging Face Hub." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\"" +files = [ + {file = "hf_xet-1.2.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:ceeefcd1b7aed4956ae8499e2199607765fbd1c60510752003b6cc0b8413b649"}, + {file = "hf_xet-1.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b70218dd548e9840224df5638fdc94bd033552963cfa97f9170829381179c813"}, + {file = "hf_xet-1.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d40b18769bb9a8bc82a9ede575ce1a44c75eb80e7375a01d76259089529b5dc"}, + {file = "hf_xet-1.2.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd3a6027d59cfb60177c12d6424e31f4b5ff13d8e3a1247b3a584bf8977e6df5"}, + {file = "hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6de1fc44f58f6dd937956c8d304d8c2dea264c80680bcfa61ca4a15e7b76780f"}, + {file = "hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f182f264ed2acd566c514e45da9f2119110e48a87a327ca271027904c70c5832"}, + {file = "hf_xet-1.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:293a7a3787e5c95d7be1857358a9130694a9c6021de3f27fa233f37267174382"}, + {file = "hf_xet-1.2.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:10bfab528b968c70e062607f663e21e34e2bba349e8038db546646875495179e"}, + {file = "hf_xet-1.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a212e842647b02eb6a911187dc878e79c4aa0aa397e88dd3b26761676e8c1f8"}, + {file = "hf_xet-1.2.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e06daccb3a7d4c065f34fc26c14c74f4653069bb2b194e7f18f17cbe9939c0"}, + {file = "hf_xet-1.2.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:29c8fc913a529ec0a91867ce3d119ac1aac966e098cf49501800c870328cc090"}, + {file = "hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e159cbfcfbb29f920db2c09ed8b660eb894640d284f102ada929b6e3dc410a"}, + {file = "hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9c91d5ae931510107f148874e9e2de8a16052b6f1b3ca3c1b12f15ccb491390f"}, + {file = "hf_xet-1.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:210d577732b519ac6ede149d2f2f34049d44e8622bf14eb3d63bbcd2d4b332dc"}, + {file = "hf_xet-1.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:46740d4ac024a7ca9b22bebf77460ff43332868b661186a8e46c227fdae01848"}, + {file = "hf_xet-1.2.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:27df617a076420d8845bea087f59303da8be17ed7ec0cd7ee3b9b9f579dff0e4"}, + {file = "hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd"}, + {file = "hf_xet-1.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d06fa97c8562fb3ee7a378dd9b51e343bc5bc8190254202c9771029152f5e08c"}, + {file = "hf_xet-1.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4c1428c9ae73ec0939410ec73023c4f842927f39db09b063b9482dac5a3bb737"}, + {file = "hf_xet-1.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a55558084c16b09b5ed32ab9ed38421e2d87cf3f1f89815764d1177081b99865"}, + {file = "hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69"}, + {file = "hf_xet-1.2.0.tar.gz", hash = "sha256:a8c27070ca547293b6890c4bf389f713f80e8c478631432962bb7f4bc0bd7d7f"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "hmmlearn" +version = "0.3.3" +description = "Hidden Markov Models in Python with scikit-learn like API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "hmmlearn-0.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:06abdc7f1905c1c10132e62d4e9d6bdf88bf17d267cae68b6ad834b0c2efef0b"}, + {file = "hmmlearn-0.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ad910c05fb42f4261b5cebd683adc44e270cfb53330d21a5ce2bd907df9703fe"}, + {file = "hmmlearn-0.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31390af12828999c11c6bb6417b91ee5d07f5ee7a91cb116d8ca763d1f4d9552"}, + {file = "hmmlearn-0.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6a807f79b5ecaaf57e899d0b876f76ac029026c4d9b2b2ba8c277205905e0f3"}, + {file = "hmmlearn-0.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:1411f05a0241cea01791eb91eeecaad76c15a5f0b3de536d6e82a8424a2b0f86"}, + {file = "hmmlearn-0.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0a174a1bf9db0971161c778b451ec8236a13c8fe9b0d1779e922179200f04de4"}, + {file = "hmmlearn-0.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:67af5823bc3b60b8cda7e9315f79b807aae9467fd7cb29aeafb789baa9b0f09e"}, + {file = "hmmlearn-0.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fea742fb093e722b8a75f21fe8648be135cdf62a497ae8d58bb2d798eadffe96"}, + {file = "hmmlearn-0.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a222a04d2b7c97299e9233342e2a446e218a740f2bcfa03eae88265d18424a"}, + {file = "hmmlearn-0.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:ad627014efb7ec8bb94746ff4b89ddb5c30444fae1a9f8c14a512f8e40449c46"}, + {file = "hmmlearn-0.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:898e2fd51244db793659b4eac584871addaf6e57efe72121e753d57b3fb1128f"}, + {file = "hmmlearn-0.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:86ad9fa4319588212c0bc7f7f9a050b01138f76b7f69d83f50d687fe4f9dcd28"}, + {file = "hmmlearn-0.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1128973a42fcf631579ef1b305e57e342aaf8769839a9899bff614ccc9d82c21"}, + {file = "hmmlearn-0.3.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6cd27dc66bfe7f6ff77804f7dbfe42b35d18e21403eb2bdfef0de6ad3bd2368f"}, + {file = "hmmlearn-0.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:49ed0d2451d9884d0bdfbe84dd14b3a699382b00bed931d6a5ca46997978a1c4"}, + {file = "hmmlearn-0.3.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:4a6da5aa01bb9beeef311706b03c16d411f93ef68220ad442f2eca8ef39da0a6"}, + {file = "hmmlearn-0.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:41d8e2ae07ac5e9e48e693a4918e3a3717edd32328bd4b9d878c323c5358e625"}, + {file = "hmmlearn-0.3.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11de9f8bc0bea1a70cdfeca9466682c2131b00da53d0898f3b1d79aadffa85a9"}, + {file = "hmmlearn-0.3.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:491f3bd9e3c01fa1201057e1de2c87039fed0e225ad562111ad4a0e25c40ef00"}, + {file = "hmmlearn-0.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:8f39d658a6366cb839157bdd8a075c62a43d3175e0f453145b5f2cdcdc05b4e3"}, + {file = "hmmlearn-0.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ade68e4dfed1e60233afe0169de101f6b670eb83ec66f3c0691230dd7276c0bf"}, + {file = "hmmlearn-0.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:517fdb17711181933b253f6d1970e2b2b6f2ec1bd7239de6f88d83c8e6a28fc8"}, + {file = "hmmlearn-0.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1e5398e43141f70232de799a08d4a4eab19ca7db86f56cea8509f68857146e9"}, + {file = "hmmlearn-0.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fcbf1438a5313b7da03ac380ff36d1578ebf515eb47bb269a1c1ecbf9bd59e2"}, + {file = "hmmlearn-0.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:498ad4758e069dac03ec15a9c1424cae2b6fa0422139f910d89c8757fed1363a"}, + {file = "hmmlearn-0.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:132846bdaf77731987e5b49aeb290f2ff38db9562fb1100731a20cfcae0edfc4"}, + {file = "hmmlearn-0.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b1646234be8f8689c2af13529492322fe7681eb0de5ae2610d191035fe75ddf9"}, + {file = "hmmlearn-0.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b81ce4da69a70f16a16e3ab0060d89b44b51b7e8b18907a8a2bde8ea53f67d"}, + {file = "hmmlearn-0.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5747d9542e8621dd7aae35f191e2d18e99148a71e88ab6867b51ae5b2044d98"}, + {file = "hmmlearn-0.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:eebd93dca06b2ec6e7196a0b432118de3f4050bf7f75497d54d068d7e238c537"}, + {file = "hmmlearn-0.3.3.tar.gz", hash = "sha256:1d3c5dc4c5257e0c238dc1fe5387700b8cb987eab808edb3e0c73829f1cc44ec"}, +] + +[package.dependencies] +numpy = ">=1.10" +scikit-learn = ">=0.16,<0.22.0 || >0.22.0" +scipy = ">=0.19" + +[package.extras] +docs = ["matplotlib", "pydata-sphinx-theme", "sphinx (>=2.0)", "sphinx-gallery"] +tests = ["pytest"] + +[[package]] +name = "httpcore" +version = "1.0.9" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.16" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.28.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" + +[package.extras] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "huggingface-hub" +version = "1.4.1" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.9.0" +groups = ["main"] +files = [ + {file = "huggingface_hub-1.4.1-py3-none-any.whl", hash = "sha256:9931d075fb7a79af5abc487106414ec5fba2c0ae86104c0c62fd6cae38873d18"}, + {file = "huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +hf-xet = {version = ">=1.2.0,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} +httpx = ">=0.23.0,<1" +packaging = ">=20.9" +pyyaml = ">=5.1" +shellingham = "*" +tqdm = ">=4.42.1" +typer-slim = "*" +typing-extensions = ">=4.1.0" + +[package.extras] +all = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0)", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-xet = ["hf-xet (>=1.2.0,<2.0.0)"] +mcp = ["mcp (>=1.8.0)"] +oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] +quality = ["libcst (>=1.4.0)", "mypy (==1.15.0)", "ruff (>=0.9.0)", "ty"] +testing = ["Jinja2", "Pillow", "authlib (>=1.3.2)", "fastapi", "fastapi", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.4.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + [[package]] name = "idna" version = "3.11" @@ -1012,12 +1524,97 @@ version = "2.3.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.10" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, ] +[[package]] +name = "ipython" +version = "9.10.0" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.11" +groups = ["main"] +files = [ + {file = "ipython-9.10.0-py3-none-any.whl", hash = "sha256:c6ab68cc23bba8c7e18e9b932797014cc61ea7fd6f19de180ab9ba73e65ee58d"}, + {file = "ipython-9.10.0.tar.gz", hash = "sha256:cd9e656be97618a0676d058134cd44e6dc7012c0e5cb36a9ce96a8c904adaf77"}, +] + +[package.dependencies] +colorama = {version = ">=0.4.4", markers = "sys_platform == \"win32\""} +decorator = ">=4.3.2" +ipython-pygments-lexers = ">=1.0.0" +jedi = ">=0.18.1" +matplotlib-inline = ">=0.1.5" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt_toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.11.0" +stack_data = ">=0.6.0" +traitlets = ">=5.13.0" + +[package.extras] +all = ["argcomplete (>=3.0)", "ipython[doc,matplotlib,terminal,test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "intersphinx_registry", "ipykernel", "ipython[matplotlib,test]", "setuptools (>=70.0)", "sphinx (>=8.0)", "sphinx-rtd-theme (>=0.1.8)", "sphinx_toml (==0.0.4)", "typing_extensions"] +matplotlib = ["matplotlib (>3.9)"] +test = ["packaging (>=20.1.0)", "pytest (>=7.0.0)", "pytest-asyncio (>=1.0.0)", "setuptools (>=61.2)", "testpath (>=0.2)"] +test-extra = ["curio", "ipykernel (>6.30)", "ipython[matplotlib]", "ipython[test]", "jupyter_ai", "nbclient", "nbformat", "numpy (>=1.27)", "pandas (>2.1)", "trio (>=0.1.0)"] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +description = "Defines a variety of Pygments lexers for highlighting IPython code." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c"}, + {file = "ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81"}, +] + +[package.dependencies] +pygments = "*" + +[[package]] +name = "jedi" +version = "0.19.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, +] + +[package.dependencies] +parso = ">=0.8.4,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + [[package]] name = "joblib" version = "1.5.3" @@ -1214,6 +1811,162 @@ files = [ {file = "logistro-2.0.1.tar.gz", hash = "sha256:8446affc82bab2577eb02bfcbcae196ae03129287557287b6a070f70c1985047"}, ] +[[package]] +name = "lxml" +version = "6.0.2" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, + {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f952dacaa552f3bb8834908dddd500ba7d508e6ea6eb8c52eb2d28f48ca06a31"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:71695772df6acea9f3c0e59e44ba8ac50c4f125217e84aab21074a1a55e7e5c9"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:17f68764f35fd78d7c4cc4ef209a184c38b65440378013d24b8aecd327c3e0c8"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:058027e261afed589eddcfe530fcc6f3402d7fd7e89bfd0532df82ebc1563dba"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8ffaeec5dfea5881d4c9d8913a32d10cfe3923495386106e4a24d45300ef79c"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:f2e3b1a6bb38de0bc713edd4d612969dd250ca8b724be8d460001a387507021c"}, + {file = "lxml-6.0.2-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d6690ec5ec1cce0385cb20896b16be35247ac8c2046e493d03232f1c2414d321"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2a50c3c1d11cad0ebebbac357a97b26aa79d2bcaf46f256551152aa85d3a4d1"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:3efe1b21c7801ffa29a1112fab3b0f643628c30472d507f39544fd48e9549e34"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:59c45e125140b2c4b33920d21d83681940ca29f0b83f8629ea1a2196dc8cfe6a"}, + {file = "lxml-6.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:452b899faa64f1805943ec1c0c9ebeaece01a1af83e130b69cdefeda180bb42c"}, + {file = "lxml-6.0.2-cp310-cp310-win32.whl", hash = "sha256:1e786a464c191ca43b133906c6903a7e4d56bef376b75d97ccbb8ec5cf1f0a4b"}, + {file = "lxml-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:dacf3c64ef3f7440e3167aa4b49aa9e0fb99e0aa4f9ff03795640bf94531bcb0"}, + {file = "lxml-6.0.2-cp310-cp310-win_arm64.whl", hash = "sha256:45f93e6f75123f88d7f0cfd90f2d05f441b808562bf0bc01070a00f53f5028b5"}, + {file = "lxml-6.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:13e35cbc684aadf05d8711a5d1b5857c92e5e580efa9a0d2be197199c8def607"}, + {file = "lxml-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b1675e096e17c6fe9c0e8c81434f5736c0739ff9ac6123c87c2d452f48fc938"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8ac6e5811ae2870953390452e3476694196f98d447573234592d30488147404d"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5aa0fc67ae19d7a64c3fe725dc9a1bb11f80e01f78289d05c6f62545affec438"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de496365750cc472b4e7902a485d3f152ecf57bd3ba03ddd5578ed8ceb4c5964"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:200069a593c5e40b8f6fc0d84d86d970ba43138c3e68619ffa234bc9bb806a4d"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d2de809c2ee3b888b59f995625385f74629707c9355e0ff856445cdcae682b7"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:b2c3da8d93cf5db60e8858c17684c47d01fee6405e554fb55018dd85fc23b178"}, + {file = "lxml-6.0.2-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:442de7530296ef5e188373a1ea5789a46ce90c4847e597856570439621d9c553"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2593c77efde7bfea7f6389f1ab249b15ed4aa5bc5cb5131faa3b843c429fbedb"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:3e3cb08855967a20f553ff32d147e14329b3ae70ced6edc2f282b94afbc74b2a"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ed6c667fcbb8c19c6791bbf40b7268ef8ddf5a96940ba9404b9f9a304832f6c"}, + {file = "lxml-6.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b8f18914faec94132e5b91e69d76a5c1d7b0c73e2489ea8929c4aaa10b76bbf7"}, + {file = "lxml-6.0.2-cp311-cp311-win32.whl", hash = "sha256:6605c604e6daa9e0d7f0a2137bdc47a2e93b59c60a65466353e37f8272f47c46"}, + {file = "lxml-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e5867f2651016a3afd8dd2c8238baa66f1e2802f44bc17e236f547ace6647078"}, + {file = "lxml-6.0.2-cp311-cp311-win_arm64.whl", hash = "sha256:4197fb2534ee05fd3e7afaab5d8bfd6c2e186f65ea7f9cd6a82809c887bd1285"}, + {file = "lxml-6.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a59f5448ba2ceccd06995c95ea59a7674a10de0810f2ce90c9006f3cbc044456"}, + {file = "lxml-6.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e8113639f3296706fbac34a30813929e29247718e88173ad849f57ca59754924"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a8bef9b9825fa8bc816a6e641bb67219489229ebc648be422af695f6e7a4fa7f"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:65ea18d710fd14e0186c2f973dc60bb52039a275f82d3c44a0e42b43440ea534"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c371aa98126a0d4c739ca93ceffa0fd7a5d732e3ac66a46e74339acd4d334564"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:700efd30c0fa1a3581d80a748157397559396090a51d306ea59a70020223d16f"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c33e66d44fe60e72397b487ee92e01da0d09ba2d66df8eae42d77b6d06e5eba0"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:90a345bbeaf9d0587a3aaffb7006aa39ccb6ff0e96a57286c0cb2fd1520ea192"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:064fdadaf7a21af3ed1dcaa106b854077fbeada827c18f72aec9346847cd65d0"}, + {file = "lxml-6.0.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fbc74f42c3525ac4ffa4b89cbdd00057b6196bcefe8bce794abd42d33a018092"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ddff43f702905a4e32bc24f3f2e2edfe0f8fde3277d481bffb709a4cced7a1f"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6da5185951d72e6f5352166e3da7b0dc27aa70bd1090b0eb3f7f7212b53f1bb8"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:57a86e1ebb4020a38d295c04fc79603c7899e0df71588043eb218722dabc087f"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2047d8234fe735ab77802ce5f2297e410ff40f5238aec569ad7c8e163d7b19a6"}, + {file = "lxml-6.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6f91fd2b2ea15a6800c8e24418c0775a1694eefc011392da73bc6cef2623b322"}, + {file = "lxml-6.0.2-cp312-cp312-win32.whl", hash = "sha256:3ae2ce7d6fedfb3414a2b6c5e20b249c4c607f72cb8d2bb7cc9c6ec7c6f4e849"}, + {file = "lxml-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:72c87e5ee4e58a8354fb9c7c84cbf95a1c8236c127a5d1b7683f04bed8361e1f"}, + {file = "lxml-6.0.2-cp312-cp312-win_arm64.whl", hash = "sha256:61cb10eeb95570153e0c0e554f58df92ecf5109f75eacad4a95baa709e26c3d6"}, + {file = "lxml-6.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9b33d21594afab46f37ae58dfadd06636f154923c4e8a4d754b0127554eb2e77"}, + {file = "lxml-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c8963287d7a4c5c9a432ff487c52e9c5618667179c18a204bdedb27310f022f"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1941354d92699fb5ffe6ed7b32f9649e43c2feb4b97205f75866f7d21aa91452"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb2f6ca0ae2d983ded09357b84af659c954722bbf04dea98030064996d156048"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb2a12d704f180a902d7fa778c6d71f36ceb7b0d317f34cdc76a5d05aa1dd1df"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:6ec0e3f745021bfed19c456647f0298d60a24c9ff86d9d051f52b509663feeb1"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:846ae9a12d54e368933b9759052d6206a9e8b250291109c48e350c1f1f49d916"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef9266d2aa545d7374938fb5c484531ef5a2ec7f2d573e62f8ce722c735685fd"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:4077b7c79f31755df33b795dc12119cb557a0106bfdab0d2c2d97bd3cf3dffa6"}, + {file = "lxml-6.0.2-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a7c5d5e5f1081955358533be077166ee97ed2571d6a66bdba6ec2f609a715d1a"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8f8d0cbd0674ee89863a523e6994ac25fd5be9c8486acfc3e5ccea679bad2679"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2cbcbf6d6e924c28f04a43f3b6f6e272312a090f269eff68a2982e13e5d57659"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dfb874cfa53340009af6bdd7e54ebc0d21012a60a4e65d927c2e477112e63484"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fb8dae0b6b8b7f9e96c26fdd8121522ce5de9bb5538010870bd538683d30e9a2"}, + {file = "lxml-6.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:358d9adae670b63e95bc59747c72f4dc97c9ec58881d4627fe0120da0f90d314"}, + {file = "lxml-6.0.2-cp313-cp313-win32.whl", hash = "sha256:e8cd2415f372e7e5a789d743d133ae474290a90b9023197fd78f32e2dc6873e2"}, + {file = "lxml-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:b30d46379644fbfc3ab81f8f82ae4de55179414651f110a1514f0b1f8f6cb2d7"}, + {file = "lxml-6.0.2-cp313-cp313-win_arm64.whl", hash = "sha256:13dcecc9946dca97b11b7c40d29fba63b55ab4170d3c0cf8c0c164343b9bfdcf"}, + {file = "lxml-6.0.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:b0c732aa23de8f8aec23f4b580d1e52905ef468afb4abeafd3fec77042abb6fe"}, + {file = "lxml-6.0.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4468e3b83e10e0317a89a33d28f7aeba1caa4d1a6fd457d115dd4ffe90c5931d"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:abd44571493973bad4598a3be7e1d807ed45aa2adaf7ab92ab7c62609569b17d"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:370cd78d5855cfbffd57c422851f7d3864e6ae72d0da615fca4dad8c45d375a5"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:901e3b4219fa04ef766885fb40fa516a71662a4c61b80c94d25336b4934b71c0"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:a4bf42d2e4cf52c28cc1812d62426b9503cdb0c87a6de81442626aa7d69707ba"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2c7fdaa4d7c3d886a42534adec7cfac73860b89b4e5298752f60aa5984641a0"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98a5e1660dc7de2200b00d53fa00bcd3c35a3608c305d45a7bbcaf29fa16e83d"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:dc051506c30b609238d79eda75ee9cab3e520570ec8219844a72a46020901e37"}, + {file = "lxml-6.0.2-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8799481bbdd212470d17513a54d568f44416db01250f49449647b5ab5b5dccb9"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9261bb77c2dab42f3ecd9103951aeca2c40277701eb7e912c545c1b16e0e4917"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:65ac4a01aba353cfa6d5725b95d7aed6356ddc0a3cd734de00124d285b04b64f"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b22a07cbb82fea98f8a2fd814f3d1811ff9ed76d0fc6abc84eb21527596e7cc8"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d759cdd7f3e055d6bc8d9bec3ad905227b2e4c785dc16c372eb5b5e83123f48a"}, + {file = "lxml-6.0.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:945da35a48d193d27c188037a05fec5492937f66fb1958c24fc761fb9d40d43c"}, + {file = "lxml-6.0.2-cp314-cp314-win32.whl", hash = "sha256:be3aaa60da67e6153eb15715cc2e19091af5dc75faef8b8a585aea372507384b"}, + {file = "lxml-6.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa25afbadead523f7001caf0c2382afd272c315a033a7b06336da2637d92d6ed"}, + {file = "lxml-6.0.2-cp314-cp314-win_arm64.whl", hash = "sha256:063eccf89df5b24e361b123e257e437f9e9878f425ee9aae3144c77faf6da6d8"}, + {file = "lxml-6.0.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:6162a86d86893d63084faaf4ff937b3daea233e3682fb4474db07395794fa80d"}, + {file = "lxml-6.0.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:414aaa94e974e23a3e92e7ca5b97d10c0cf37b6481f50911032c69eeb3991bba"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48461bd21625458dd01e14e2c38dd0aea69addc3c4f960c30d9f59d7f93be601"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:25fcc59afc57d527cfc78a58f40ab4c9b8fd096a9a3f964d2781ffb6eb33f4ed"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5179c60288204e6ddde3f774a93350177e08876eaf3ab78aa3a3649d43eb7d37"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:967aab75434de148ec80597b75062d8123cadf2943fb4281f385141e18b21338"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d100fcc8930d697c6561156c6810ab4a508fb264c8b6779e6e61e2ed5e7558f9"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ca59e7e13e5981175b8b3e4ab84d7da57993eeff53c07764dcebda0d0e64ecd"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:957448ac63a42e2e49531b9d6c0fa449a1970dbc32467aaad46f11545be9af1d"}, + {file = "lxml-6.0.2-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b7fc49c37f1786284b12af63152fe1d0990722497e2d5817acfe7a877522f9a9"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e19e0643cc936a22e837f79d01a550678da8377d7d801a14487c10c34ee49c7e"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:1db01e5cf14345628e0cbe71067204db658e2fb8e51e7f33631f5f4735fefd8d"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:875c6b5ab39ad5291588aed6925fac99d0097af0dd62f33c7b43736043d4a2ec"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:cdcbed9ad19da81c480dfd6dd161886db6096083c9938ead313d94b30aadf272"}, + {file = "lxml-6.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:80dadc234ebc532e09be1975ff538d154a7fa61ea5031c03d25178855544728f"}, + {file = "lxml-6.0.2-cp314-cp314t-win32.whl", hash = "sha256:da08e7bb297b04e893d91087df19638dc7a6bb858a954b0cc2b9f5053c922312"}, + {file = "lxml-6.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:252a22982dca42f6155125ac76d3432e548a7625d56f5a273ee78a5057216eca"}, + {file = "lxml-6.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:bb4c1847b303835d89d785a18801a883436cdfd5dc3d62947f9c49e24f0f5a2c"}, + {file = "lxml-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a656ca105115f6b766bba324f23a67914d9c728dafec57638e2b92a9dcd76c62"}, + {file = "lxml-6.0.2-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c54d83a2188a10ebdba573f16bd97135d06c9ef60c3dc495315c7a28c80a263f"}, + {file = "lxml-6.0.2-cp38-cp38-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:1ea99340b3c729beea786f78c38f60f4795622f36e305d9c9be402201efdc3b7"}, + {file = "lxml-6.0.2-cp38-cp38-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:af85529ae8d2a453feee4c780d9406a5e3b17cee0dd75c18bd31adcd584debc3"}, + {file = "lxml-6.0.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fe659f6b5d10fb5a17f00a50eb903eb277a71ee35df4615db573c069bcf967ac"}, + {file = "lxml-6.0.2-cp38-cp38-win32.whl", hash = "sha256:5921d924aa5468c939d95c9814fa9f9b5935a6ff4e679e26aaf2951f74043512"}, + {file = "lxml-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:0aa7070978f893954008ab73bb9e3c24a7c56c054e00566a21b553dc18105fca"}, + {file = "lxml-6.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2c8458c2cdd29589a8367c09c8f030f1d202be673f0ca224ec18590b3b9fb694"}, + {file = "lxml-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3fee0851639d06276e6b387f1c190eb9d7f06f7f53514e966b26bae46481ec90"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b2142a376b40b6736dfc214fd2902409e9e3857eff554fed2d3c60f097e62a62"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6b5b39cc7e2998f968f05309e666103b53e2edd01df8dc51b90d734c0825444"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4aec24d6b72ee457ec665344a29acb2d35937d5192faebe429ea02633151aad"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:b42f4d86b451c2f9d06ffb4f8bbc776e04df3ba070b9fe2657804b1b40277c48"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cdaefac66e8b8f30e37a9b4768a391e1f8a16a7526d5bc77a7928408ef68e93"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:b738f7e648735714bbb82bdfd030203360cfeab7f6e8a34772b3c8c8b820568c"}, + {file = "lxml-6.0.2-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:daf42de090d59db025af61ce6bdb2521f0f102ea0e6ea310f13c17610a97da4c"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:66328dabea70b5ba7e53d94aa774b733cf66686535f3bc9250a7aab53a91caaf"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:e237b807d68a61fc3b1e845407e27e5eb8ef69bc93fe8505337c1acb4ee300b6"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:ac02dc29fd397608f8eb15ac1610ae2f2f0154b03f631e6d724d9e2ad4ee2c84"}, + {file = "lxml-6.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:817ef43a0c0b4a77bd166dc9a09a555394105ff3374777ad41f453526e37f9cb"}, + {file = "lxml-6.0.2-cp39-cp39-win32.whl", hash = "sha256:bc532422ff26b304cfb62b328826bd995c96154ffd2bac4544f37dbb95ecaa8f"}, + {file = "lxml-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:995e783eb0374c120f528f807443ad5a83a656a8624c467ea73781fc5f8a8304"}, + {file = "lxml-6.0.2-cp39-cp39-win_arm64.whl", hash = "sha256:08b9d5e803c2e4725ae9e8559ee880e5328ed61aa0935244e0515d7d9dbec0aa"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e748d4cf8fef2526bb2a589a417eba0c8674e29ffcb570ce2ceca44f1e567bf6"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4ddb1049fa0579d0cbd00503ad8c58b9ab34d1254c77bc6a5576d96ec7853dba"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cb233f9c95f83707dae461b12b720c1af9c28c2d19208e1be03387222151daf5"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bc456d04db0515ce3320d714a1eac7a97774ff0849e7718b492d957da4631dd4"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2613e67de13d619fd283d58bda40bff0ee07739f624ffee8b13b631abf33083d"}, + {file = "lxml-6.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:24a8e756c982c001ca8d59e87c80c4d9dcd4d9b44a4cbeb8d9be4482c514d41d"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1c06035eafa8404b5cf475bb37a9f6088b0aca288d4ccc9d69389750d5543700"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c7d13103045de1bdd6fe5d61802565f1a3537d70cd3abf596aa0af62761921ee"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a3c150a95fbe5ac91de323aa756219ef9cf7fde5a3f00e2281e30f33fa5fa4f"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60fa43be34f78bebb27812ed90f1925ec99560b0fa1decdb7d12b84d857d31e9"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:21c73b476d3cfe836be731225ec3421fa2f048d84f6df6a8e70433dff1376d5a"}, + {file = "lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e"}, + {file = "lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html-clean = ["lxml_html_clean"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] + [[package]] name = "markdown" version = "3.10.1" @@ -1433,20 +2186,38 @@ python-dateutil = ">=2.7" dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] [[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" +name = "matplotlib-inline" +version = "0.2.1" +description = "Inline Matplotlib backend for Jupyter" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, + {file = "matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76"}, + {file = "matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe"}, ] -[[package]] -name = "ml-dtypes" -version = "0.5.4" +[package.dependencies] +traitlets = "*" + +[package.extras] +test = ["flake8", "nbdime", "nbval", "notebook", "pytest"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "ml-dtypes" +version = "0.5.4" description = "ml_dtypes is a stand-alone implementation of several NumPy dtype extensions used in machine learning." optional = false python-versions = ">=3.9" @@ -1502,6 +2273,24 @@ numpy = [ [package.extras] dev = ["absl-py", "pyink", "pylint (>=2.6.0)", "pytest", "pytest-xdist"] +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""] +tests = ["pytest (>=4.6)"] + [[package]] name = "multitasking" version = "0.0.11" @@ -1552,6 +2341,29 @@ pyspark-connect = ["pyspark[connect] (>=3.5.0)"] sql = ["duckdb (>=1.1)", "sqlparse"] sqlframe = ["sqlframe (>=3.22.0,!=3.39.3)"] +[[package]] +name = "networkx" +version = "3.6.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = "!=3.14.1,>=3.11" +groups = ["main"] +files = [ + {file = "networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762"}, + {file = "networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509"}, +] + +[package.extras] +benchmarking = ["asv", "virtualenv"] +default = ["matplotlib (>=3.8)", "numpy (>=1.25)", "pandas (>=2.0)", "scipy (>=1.11.2)"] +developer = ["mypy (>=1.15)", "pre-commit (>=4.1)"] +doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=10)", "pydata-sphinx-theme (>=0.16)", "sphinx (>=8.0)", "sphinx-gallery (>=0.18)", "texext (>=0.6.7)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "iplotx (>=0.9.0)", "momepy (>=0.7.2)", "osmnx (>=2.0.0)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +release = ["build (>=0.10)", "changelist (==0.5)", "twine (>=4.0)", "wheel (>=0.40)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)", "pytest-xdist (>=3.0)"] +test-extras = ["pytest-mpl", "pytest-randomly"] + [[package]] name = "numpy" version = "2.4.2" @@ -1634,6 +2446,227 @@ files = [ {file = "numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae"}, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.8.4.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0"}, + {file = "nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142"}, + {file = "nvidia_cublas_cu12-12.8.4.1-py3-none-win_amd64.whl", hash = "sha256:47e9b82132fa8d2b4944e708049229601448aaad7e6f296f630f2d1a32de35af"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.8.90" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed"}, + {file = "nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182"}, + {file = "nvidia_cuda_cupti_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:bb479dcdf7e6d4f8b0b01b115260399bf34154a1a2e9fe11c85c517d87efd98e"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.8.93" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994"}, + {file = "nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8"}, + {file = "nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:7a4b6b2904850fe78e0bd179c4b655c404d4bb799ef03ddc60804247099ae909"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.8.90" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d"}, + {file = "nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90"}, + {file = "nvidia_cuda_runtime_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:c0c6027f01505bfed6c3b21ec546f69c687689aad5f1a377554bc6ca4aa993a8"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.10.2.21" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8"}, + {file = "nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8"}, + {file = "nvidia_cudnn_cu12-9.10.2.21-py3-none-win_amd64.whl", hash = "sha256:c6288de7d63e6cf62988f0923f96dc339cea362decb1bf5b3141883392a7d65e"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.3.83" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a"}, + {file = "nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74"}, + {file = "nvidia_cufft_cu12-11.3.3.83-py3-none-win_amd64.whl", hash = "sha256:7a64a98ef2a7c47f905aaf8931b69a3a43f27c55530c698bb2ed7c75c0b42cb7"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cufile-cu12" +version = "1.13.1.3" +description = "cuFile GPUDirect libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc"}, + {file = "nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.9.90" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd"}, + {file = "nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9"}, + {file = "nvidia_curand_cu12-10.3.9.90-py3-none-win_amd64.whl", hash = "sha256:f149a8ca457277da854f89cf282d6ef43176861926c7ac85b2a0fbd237c587ec"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.3.90" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0"}, + {file = "nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450"}, + {file = "nvidia_cusolver_cu12-11.7.3.90-py3-none-win_amd64.whl", hash = "sha256:4a550db115fcabc4d495eb7d39ac8b58d4ab5d8e63274d3754df1c0ad6a22d34"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.8.93" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc"}, + {file = "nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b"}, + {file = "nvidia_cusparse_cu12-12.5.8.93-py3-none-win_amd64.whl", hash = "sha256:9a33604331cb2cac199f2e7f5104dfbb8a5a898c367a53dfda9ff2acb6b6b4dd"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.7.1" +description = "NVIDIA cuSPARSELt" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5"}, + {file = "nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623"}, + {file = "nvidia_cusparselt_cu12-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f67fbb5831940ec829c9117b7f33807db9f9678dc2a617fbe781cac17b4e1075"}, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.27.5" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:31432ad4d1fb1004eb0c56203dc9bc2178a1ba69d1d9e02d64a6938ab5e40e7a"}, + {file = "nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.8.93" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88"}, + {file = "nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7"}, + {file = "nvidia_nvjitlink_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:bd93fbeeee850917903583587f4fc3a4eafa022e34572251368238ab5e6bd67f"}, +] + +[[package]] +name = "nvidia-nvshmem-cu12" +version = "3.4.5" +description = "NVSHMEM creates a global address space that provides efficient and scalable communication for NVIDIA GPU clusters." +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b48363fc6964dede448029434c6abed6c5e37f823cb43c3bcde7ecfc0457e15"}, + {file = "nvidia_nvshmem_cu12-3.4.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:042f2500f24c021db8a06c5eec2539027d57460e1c1a762055a6554f72c369bd"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.8.90" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615"}, + {file = "nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f"}, + {file = "nvidia_nvtx_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:619c8304aedc69f02ea82dd244541a83c3d9d40993381b3b590f1adaed3db41e"}, +] + [[package]] name = "opt-einsum" version = "3.4.0" @@ -1860,7 +2893,7 @@ version = "26.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"}, {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"}, @@ -1962,6 +2995,39 @@ sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-d test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.9.2)"] +[[package]] +name = "pandas-datareader" +version = "0.10.0" +description = "Data readers extracted from the pandas codebase,should be compatible with recent pandas versions" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "pandas-datareader-0.10.0.tar.gz", hash = "sha256:9fc3c63d39bc0c10c2683f1c6d503ff625020383e38f6cbe14134826b454d5a6"}, + {file = "pandas_datareader-0.10.0-py3-none-any.whl", hash = "sha256:0b95ff3635bc3ee1a6073521b557ab0e3c39d219f4a3b720b6b0bc6e8cdb4bb7"}, +] + +[package.dependencies] +lxml = "*" +pandas = ">=0.23" +requests = ">=2.19.0" + +[[package]] +name = "parso" +version = "0.8.6" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "parso-0.8.6-py2.py3-none-any.whl", hash = "sha256:2c549f800b70a5c4952197248825584cb00f033b29c692671d3bf08bf380baff"}, + {file = "parso-0.8.6.tar.gz", hash = "sha256:2b9a0332696df97d454fa67b81618fd69c35a7b90327cbe6ba5c92d2c68a7bfd"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "types-setuptools (==67.2.0.1)", "zuban (==0.5.1)"] +testing = ["docopt", "pytest"] + [[package]] name = "peewee" version = "3.19.0" @@ -1979,6 +3045,22 @@ mysql = ["pymysql"] postgres = ["psycopg2-binary"] psycopg3 = ["psycopg[binary]"] +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +groups = ["main"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + [[package]] name = "pillow" version = "12.1.0" @@ -2135,7 +3217,7 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -2145,6 +3227,38 @@ files = [ dev = ["pre-commit", "tox"] testing = ["coverage", "pytest", "pytest-benchmark"] +[[package]] +name = "polygon-api-client" +version = "1.16.3" +description = "Official Polygon.io REST and Websocket client." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "polygon_api_client-1.16.3-py3-none-any.whl", hash = "sha256:4dec9e27fb5d17a36aacd9372d1df30af2f2b7639b4166ca10d2d48d128d2cd8"}, + {file = "polygon_api_client-1.16.3.tar.gz", hash = "sha256:df575ff6a4a7636cc92272803749adca7e81d2340af493c51a19fe75b51530fc"}, +] + +[package.dependencies] +certifi = ">=2022.5.18,<2026.0.0" +urllib3 = ">=1.26.9" +websockets = ">=14.0" + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955"}, + {file = "prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855"}, +] + +[package.dependencies] +wcwidth = "*" + [[package]] name = "protobuf" version = "6.33.5" @@ -2165,6 +3279,34 @@ files = [ {file = "protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c"}, ] +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +groups = ["main"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "pycparser" version = "3.0" @@ -2178,13 +3320,221 @@ files = [ {file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"}, ] +[[package]] +name = "pydantic" +version = "2.12.5" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.41.5" +typing-extensions = ">=4.14.1" +typing-inspection = ">=0.4.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, +] + +[package.dependencies] +typing-extensions = ">=4.14.1" + +[[package]] +name = "pydantic-settings" +version = "2.13.0" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "pydantic_settings-2.13.0-py3-none-any.whl", hash = "sha256:d67b576fff39cd086b595441bf9c75d4193ca9c0ed643b90360694d0f1240246"}, + {file = "pydantic_settings-2.13.0.tar.gz", hash = "sha256:95d875514610e8595672800a5c40b073e99e4aae467fa7c8f9c263061ea2e1fe"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" +typing-inspection = ">=0.4.0" + +[package.extras] +aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pyfolio-reloaded" +version = "0.9.9" +description = "Performance and risk analysis of financial portfolios with Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyfolio_reloaded-0.9.9-py3-none-any.whl", hash = "sha256:aa0e136b379bc6c7a48a74746c784c263de986575af5377348330eb1aed8791c"}, + {file = "pyfolio_reloaded-0.9.9.tar.gz", hash = "sha256:4dcd0bc6e07249458685e6591a105daafaa87c6b79a0680f71ba8e58435b5c0c"}, +] + +[package.dependencies] +empyrical-reloaded = ">=0.5.9" +ipython = ">=3.2.3" +matplotlib = ">=1.4.0" +numpy = {version = ">=1.26.0", markers = "python_version >= \"3.12\""} +pandas = ">=1.5.0,<3.0" +pytz = ">=2014.10" +scikit-learn = ">=0.16.1" +scipy = ">=0.14.0" +seaborn = ">=0.7.1" + +[package.extras] +dev = ["black", "flake8 (>=3.9.1)", "pre-commit (>=2.12.1)"] +docs = ["Cython", "Sphinx (>=1.3.2)", "m2r2", "numpydoc (>=0.5.0)", "pydata-sphinx-theme", "sphinx-autobuild (>=0.6.0)", "sphinx-markdown-tables", "sphinx_copybutton"] +test = ["black", "coverage (>=4.0.3)", "coveralls (==3.0.1)", "flake8 (>=3.9.1)", "parameterized (>=0.6.1)", "pytest (>=6.2)", "pytest-cov (>=2.12)", "pytest-rerunfailures", "pytest-xdist (>=2.5.0)", "tox (>=2.3.1)"] + [[package]] name = "pygments" version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -2214,7 +3564,7 @@ version = "9.0.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.10" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b"}, {file = "pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11"}, @@ -2234,10 +3584,9 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests name = "pytest-cov" version = "7.0.0" description = "Pytest plugin for measuring coverage." -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "extra == \"dev\"" +groups = ["main", "dev"] files = [ {file = "pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861"}, {file = "pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1"}, @@ -2251,6 +3600,24 @@ pytest = ">=7" [package.extras] testing = ["process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-mock" +version = "3.15.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d"}, + {file = "pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "pytest-timeout" version = "2.4.0" @@ -2281,6 +3648,21 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "python-dotenv" +version = "1.2.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61"}, + {file = "python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + [[package]] name = "pytz" version = "2025.2" @@ -2293,6 +3675,230 @@ files = [ {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, ] +[[package]] +name = "pyyaml" +version = "6.0.3" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"}, + {file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"}, + {file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"}, + {file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"}, + {file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"}, + {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"}, + {file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"}, + {file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"}, + {file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"}, + {file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"}, + {file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"}, + {file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"}, + {file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"}, + {file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"}, + {file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"}, + {file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"}, + {file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"}, + {file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"}, + {file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"}, + {file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"}, + {file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"}, + {file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"}, + {file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"}, + {file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"}, + {file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"}, + {file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"}, + {file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"}, + {file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"}, + {file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"}, + {file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"}, + {file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"}, + {file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"}, + {file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"}, + {file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"}, + {file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"}, + {file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"}, + {file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"}, + {file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"}, +] + +[[package]] +name = "regex" +version = "2026.1.15" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "regex-2026.1.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4e3dd93c8f9abe8aa4b6c652016da9a3afa190df5ad822907efe6b206c09896e"}, + {file = "regex-2026.1.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97499ff7862e868b1977107873dd1a06e151467129159a6ffd07b66706ba3a9f"}, + {file = "regex-2026.1.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0bda75ebcac38d884240914c6c43d8ab5fb82e74cde6da94b43b17c411aa4c2b"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7dcc02368585334f5bc81fc73a2a6a0bbade60e7d83da21cead622faf408f32c"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:693b465171707bbe882a7a05de5e866f33c76aa449750bee94a8d90463533cc9"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b0d190e6f013ea938623a58706d1469a62103fb2a241ce2873a9906e0386582c"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ff818702440a5878a81886f127b80127f5d50563753a28211482867f8318106"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f052d1be37ef35a54e394de66136e30fa1191fab64f71fc06ac7bc98c9a84618"}, + {file = "regex-2026.1.15-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6bfc31a37fd1592f0c4fc4bfc674b5c42e52efe45b4b7a6a14f334cca4bcebe4"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ce5ae80066b319ae3bc62fd55a557c9491baa5efd0d355f0de08c4ba54e79"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1704d204bd42b6bb80167df0e4554f35c255b579ba99616def38f69e14a5ccb9"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e3174a5ed4171570dc8318afada56373aa9289eb6dc0d96cceb48e7358b0e220"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:87adf5bd6d72e3e17c9cb59ac4096b1faaf84b7eb3037a5ffa61c4b4370f0f13"}, + {file = "regex-2026.1.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e85dc94595f4d766bd7d872a9de5ede1ca8d3063f3bdf1e2c725f5eb411159e3"}, + {file = "regex-2026.1.15-cp310-cp310-win32.whl", hash = "sha256:21ca32c28c30d5d65fc9886ff576fc9b59bbca08933e844fa2363e530f4c8218"}, + {file = "regex-2026.1.15-cp310-cp310-win_amd64.whl", hash = "sha256:3038a62fc7d6e5547b8915a3d927a0fbeef84cdbe0b1deb8c99bbd4a8961b52a"}, + {file = "regex-2026.1.15-cp310-cp310-win_arm64.whl", hash = "sha256:505831646c945e3e63552cc1b1b9b514f0e93232972a2d5bedbcc32f15bc82e3"}, + {file = "regex-2026.1.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ae6020fb311f68d753b7efa9d4b9a5d47a5d6466ea0d5e3b5a471a960ea6e4a"}, + {file = "regex-2026.1.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eddf73f41225942c1f994914742afa53dc0d01a6e20fe14b878a1b1edc74151f"}, + {file = "regex-2026.1.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e8cd52557603f5c66a548f69421310886b28b7066853089e1a71ee710e1cdc1"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5170907244b14303edc5978f522f16c974f32d3aa92109fabc2af52411c9433b"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2748c1ec0663580b4510bd89941a31560b4b439a0b428b49472a3d9944d11cd8"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2f2775843ca49360508d080eaa87f94fa248e2c946bbcd963bb3aae14f333413"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9ea2604370efc9a174c1b5dcc81784fb040044232150f7f33756049edfc9026"}, + {file = "regex-2026.1.15-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0dcd31594264029b57bf16f37fd7248a70b3b764ed9e0839a8f271b2d22c0785"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c08c1f3e34338256732bd6938747daa3c0d5b251e04b6e43b5813e94d503076e"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e43a55f378df1e7a4fa3547c88d9a5a9b7113f653a66821bcea4718fe6c58763"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:f82110ab962a541737bd0ce87978d4c658f06e7591ba899192e2712a517badbb"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:27618391db7bdaf87ac6c92b31e8f0dfb83a9de0075855152b720140bda177a2"}, + {file = "regex-2026.1.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bfb0d6be01fbae8d6655c8ca21b3b72458606c4aec9bbc932db758d47aba6db1"}, + {file = "regex-2026.1.15-cp311-cp311-win32.whl", hash = "sha256:b10e42a6de0e32559a92f2f8dc908478cc0fa02838d7dbe764c44dca3fa13569"}, + {file = "regex-2026.1.15-cp311-cp311-win_amd64.whl", hash = "sha256:e9bf3f0bbdb56633c07d7116ae60a576f846efdd86a8848f8d62b749e1209ca7"}, + {file = "regex-2026.1.15-cp311-cp311-win_arm64.whl", hash = "sha256:41aef6f953283291c4e4e6850607bd71502be67779586a61472beacb315c97ec"}, + {file = "regex-2026.1.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4c8fcc5793dde01641a35905d6731ee1548f02b956815f8f1cab89e515a5bdf1"}, + {file = "regex-2026.1.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bfd876041a956e6a90ad7cdb3f6a630c07d491280bfeed4544053cd434901681"}, + {file = "regex-2026.1.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9250d087bc92b7d4899ccd5539a1b2334e44eee85d848c4c1aef8e221d3f8c8f"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c8a154cf6537ebbc110e24dabe53095e714245c272da9c1be05734bdad4a61aa"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8050ba2e3ea1d8731a549e83c18d2f0999fbc99a5f6bd06b4c91449f55291804"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf065240704cb8951cc04972cf107063917022511273e0969bdb34fc173456c"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c32bef3e7aeee75746748643667668ef941d28b003bfc89994ecf09a10f7a1b5"}, + {file = "regex-2026.1.15-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d5eaa4a4c5b1906bd0d2508d68927f15b81821f85092e06f1a34a4254b0e1af3"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:86c1077a3cc60d453d4084d5b9649065f3bf1184e22992bd322e1f081d3117fb"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2b091aefc05c78d286657cd4db95f2e6313375ff65dcf085e42e4c04d9c8d410"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:57e7d17f59f9ebfa9667e6e5a1c0127b96b87cb9cede8335482451ed00788ba4"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c6c4dcdfff2c08509faa15d36ba7e5ef5fcfab25f1e8f85a0c8f45bc3a30725d"}, + {file = "regex-2026.1.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf8ff04c642716a7f2048713ddc6278c5fd41faa3b9cab12607c7abecd012c22"}, + {file = "regex-2026.1.15-cp312-cp312-win32.whl", hash = "sha256:82345326b1d8d56afbe41d881fdf62f1926d7264b2fc1537f99ae5da9aad7913"}, + {file = "regex-2026.1.15-cp312-cp312-win_amd64.whl", hash = "sha256:4def140aa6156bc64ee9912383d4038f3fdd18fee03a6f222abd4de6357ce42a"}, + {file = "regex-2026.1.15-cp312-cp312-win_arm64.whl", hash = "sha256:c6c565d9a6e1a8d783c1948937ffc377dd5771e83bd56de8317c450a954d2056"}, + {file = "regex-2026.1.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e69d0deeb977ffe7ed3d2e4439360089f9c3f217ada608f0f88ebd67afb6385e"}, + {file = "regex-2026.1.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3601ffb5375de85a16f407854d11cca8fe3f5febbe3ac78fb2866bb220c74d10"}, + {file = "regex-2026.1.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4c5ef43b5c2d4114eb8ea424bb8c9cec01d5d17f242af88b2448f5ee81caadbc"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:968c14d4f03e10b2fd960f1d5168c1f0ac969381d3c1fcc973bc45fb06346599"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:56a5595d0f892f214609c9f76b41b7428bed439d98dc961efafdd1354d42baae"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf650f26087363434c4e560011f8e4e738f6f3e029b85d4904c50135b86cfa5"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18388a62989c72ac24de75f1449d0fb0b04dfccd0a1a7c1c43af5eb503d890f6"}, + {file = "regex-2026.1.15-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d220a2517f5893f55daac983bfa9fe998a7dbcaee4f5d27a88500f8b7873788"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9c08c2fbc6120e70abff5d7f28ffb4d969e14294fb2143b4b5c7d20e46d1714"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7ef7d5d4bd49ec7364315167a4134a015f61e8266c6d446fc116a9ac4456e10d"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e42844ad64194fa08d5ccb75fe6a459b9b08e6d7296bd704460168d58a388f3"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cfecdaa4b19f9ca534746eb3b55a5195d5c95b88cac32a205e981ec0a22b7d31"}, + {file = "regex-2026.1.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:08df9722d9b87834a3d701f3fca570b2be115654dbfd30179f30ab2f39d606d3"}, + {file = "regex-2026.1.15-cp313-cp313-win32.whl", hash = "sha256:d426616dae0967ca225ab12c22274eb816558f2f99ccb4a1d52ca92e8baf180f"}, + {file = "regex-2026.1.15-cp313-cp313-win_amd64.whl", hash = "sha256:febd38857b09867d3ed3f4f1af7d241c5c50362e25ef43034995b77a50df494e"}, + {file = "regex-2026.1.15-cp313-cp313-win_arm64.whl", hash = "sha256:8e32f7896f83774f91499d239e24cebfadbc07639c1494bb7213983842348337"}, + {file = "regex-2026.1.15-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ec94c04149b6a7b8120f9f44565722c7ae31b7a6d2275569d2eefa76b83da3be"}, + {file = "regex-2026.1.15-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40c86d8046915bb9aeb15d3f3f15b6fd500b8ea4485b30e1bbc799dab3fe29f8"}, + {file = "regex-2026.1.15-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:726ea4e727aba21643205edad8f2187ec682d3305d790f73b7a51c7587b64bdd"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1cb740d044aff31898804e7bf1181cc72c03d11dfd19932b9911ffc19a79070a"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05d75a668e9ea16f832390d22131fe1e8acc8389a694c8febc3e340b0f810b93"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d991483606f3dbec93287b9f35596f41aa2e92b7c2ebbb935b63f409e243c9af"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:194312a14819d3e44628a44ed6fea6898fdbecb0550089d84c403475138d0a09"}, + {file = "regex-2026.1.15-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe2fda4110a3d0bc163c2e0664be44657431440722c5c5315c65155cab92f9e5"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:124dc36c85d34ef2d9164da41a53c1c8c122cfb1f6e1ec377a1f27ee81deb794"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1774cd1981cd212506a23a14dba7fdeaee259f5deba2df6229966d9911e767a"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:b5f7d8d2867152cdb625e72a530d2ccb48a3d199159144cbdd63870882fb6f80"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:492534a0ab925d1db998defc3c302dae3616a2fc3fe2e08db1472348f096ddf2"}, + {file = "regex-2026.1.15-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c661fc820cfb33e166bf2450d3dadbda47c8d8981898adb9b6fe24e5e582ba60"}, + {file = "regex-2026.1.15-cp313-cp313t-win32.whl", hash = "sha256:99ad739c3686085e614bf77a508e26954ff1b8f14da0e3765ff7abbf7799f952"}, + {file = "regex-2026.1.15-cp313-cp313t-win_amd64.whl", hash = "sha256:32655d17905e7ff8ba5c764c43cb124e34a9245e45b83c22e81041e1071aee10"}, + {file = "regex-2026.1.15-cp313-cp313t-win_arm64.whl", hash = "sha256:b2a13dd6a95e95a489ca242319d18fc02e07ceb28fa9ad146385194d95b3c829"}, + {file = "regex-2026.1.15-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d920392a6b1f353f4aa54328c867fec3320fa50657e25f64abf17af054fc97ac"}, + {file = "regex-2026.1.15-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b5a28980a926fa810dbbed059547b02783952e2efd9c636412345232ddb87ff6"}, + {file = "regex-2026.1.15-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:621f73a07595d83f28952d7bd1e91e9d1ed7625fb7af0064d3516674ec93a2a2"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d7d92495f47567a9b1669c51fc8d6d809821849063d168121ef801bbc213846"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8dd16fba2758db7a3780a051f245539c4451ca20910f5a5e6ea1c08d06d4a76b"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1e1808471fbe44c1a63e5f577a1d5f02fe5d66031dcbdf12f093ffc1305a858e"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0751a26ad39d4f2ade8fe16c59b2bf5cb19eb3d2cd543e709e583d559bd9efde"}, + {file = "regex-2026.1.15-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0c7684c7f9ca241344ff95a1de964f257a5251968484270e91c25a755532c5"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:74f45d170a21df41508cb67165456538425185baaf686281fa210d7e729abc34"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f1862739a1ffb50615c0fde6bae6569b5efbe08d98e59ce009f68a336f64da75"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:453078802f1b9e2b7303fb79222c054cb18e76f7bdc220f7530fdc85d319f99e"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:a30a68e89e5a218b8b23a52292924c1f4b245cb0c68d1cce9aec9bbda6e2c160"}, + {file = "regex-2026.1.15-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9479cae874c81bf610d72b85bb681a94c95722c127b55445285fb0e2c82db8e1"}, + {file = "regex-2026.1.15-cp314-cp314-win32.whl", hash = "sha256:d639a750223132afbfb8f429c60d9d318aeba03281a5f1ab49f877456448dcf1"}, + {file = "regex-2026.1.15-cp314-cp314-win_amd64.whl", hash = "sha256:4161d87f85fa831e31469bfd82c186923070fc970b9de75339b68f0c75b51903"}, + {file = "regex-2026.1.15-cp314-cp314-win_arm64.whl", hash = "sha256:91c5036ebb62663a6b3999bdd2e559fd8456d17e2b485bf509784cd31a8b1705"}, + {file = "regex-2026.1.15-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ee6854c9000a10938c79238de2379bea30c82e4925a371711af45387df35cab8"}, + {file = "regex-2026.1.15-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c2b80399a422348ce5de4fe40c418d6299a0fa2803dd61dc0b1a2f28e280fcf"}, + {file = "regex-2026.1.15-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:dca3582bca82596609959ac39e12b7dad98385b4fefccb1151b937383cec547d"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef71d476caa6692eea743ae5ea23cde3260677f70122c4d258ca952e5c2d4e84"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c243da3436354f4af6c3058a3f81a97d47ea52c9bd874b52fd30274853a1d5df"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8355ad842a7c7e9e5e55653eade3b7d1885ba86f124dd8ab1f722f9be6627434"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f192a831d9575271a22d804ff1a5355355723f94f31d9eef25f0d45a152fdc1a"}, + {file = "regex-2026.1.15-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:166551807ec20d47ceaeec380081f843e88c8949780cd42c40f18d16168bed10"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9ca1cbdc0fbfe5e6e6f8221ef2309988db5bcede52443aeaee9a4ad555e0dac"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b30bcbd1e1221783c721483953d9e4f3ab9c5d165aa709693d3f3946747b1aea"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2a8d7b50c34578d0d3bf7ad58cde9652b7d683691876f83aedc002862a35dc5e"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9d787e3310c6a6425eb346be4ff2ccf6eece63017916fd77fe8328c57be83521"}, + {file = "regex-2026.1.15-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:619843841e220adca114118533a574a9cd183ed8a28b85627d2844c500a2b0db"}, + {file = "regex-2026.1.15-cp314-cp314t-win32.whl", hash = "sha256:e90b8db97f6f2c97eb045b51a6b2c5ed69cedd8392459e0642d4199b94fabd7e"}, + {file = "regex-2026.1.15-cp314-cp314t-win_amd64.whl", hash = "sha256:5ef19071f4ac9f0834793af85bd04a920b4407715624e40cb7a0631a11137cdf"}, + {file = "regex-2026.1.15-cp314-cp314t-win_arm64.whl", hash = "sha256:ca89c5e596fc05b015f27561b3793dc2fa0917ea0d7507eebb448efd35274a70"}, + {file = "regex-2026.1.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:55b4ea996a8e4458dd7b584a2f89863b1655dd3d17b88b46cbb9becc495a0ec5"}, + {file = "regex-2026.1.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e1e28be779884189cdd57735e997f282b64fd7ccf6e2eef3e16e57d7a34a815"}, + {file = "regex-2026.1.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0057de9eaef45783ff69fa94ae9f0fd906d629d0bd4c3217048f46d1daa32e9b"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc7cd0b2be0f0269283a45c0d8b2c35e149d1319dcb4a43c9c3689fa935c1ee6"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8db052bbd981e1666f09e957f3790ed74080c2229007c1dd67afdbf0b469c48b"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:343db82cb3712c31ddf720f097ef17c11dab2f67f7a3e7be976c4f82eba4e6df"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:55e9d0118d97794367309635df398bdfd7c33b93e2fdfa0b239661cd74b4c14e"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:008b185f235acd1e53787333e5690082e4f156c44c87d894f880056089e9bc7c"}, + {file = "regex-2026.1.15-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fd65af65e2aaf9474e468f9e571bd7b189e1df3a61caa59dcbabd0000e4ea839"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f42e68301ff4afee63e365a5fc302b81bb8ba31af625a671d7acb19d10168a8c"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f7792f27d3ee6e0244ea4697d92b825f9a329ab5230a78c1a68bd274e64b5077"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dbaf3c3c37ef190439981648ccbf0c02ed99ae066087dd117fcb616d80b010a4"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:adc97a9077c2696501443d8ad3fa1b4fc6d131fc8fd7dfefd1a723f89071cf0a"}, + {file = "regex-2026.1.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:069f56a7bf71d286a6ff932a9e6fb878f151c998ebb2519a9f6d1cee4bffdba3"}, + {file = "regex-2026.1.15-cp39-cp39-win32.whl", hash = "sha256:ea4e6b3566127fda5e007e90a8fd5a4169f0cf0619506ed426db647f19c8454a"}, + {file = "regex-2026.1.15-cp39-cp39-win_amd64.whl", hash = "sha256:cda1ed70d2b264952e88adaa52eea653a33a1b98ac907ae2f86508eb44f65cdc"}, + {file = "regex-2026.1.15-cp39-cp39-win_arm64.whl", hash = "sha256:b325d4714c3c48277bfea1accd94e193ad6ed42b4bad79ad64f3b8f8a31260a5"}, + {file = "regex-2026.1.15.tar.gz", hash = "sha256:164759aa25575cbc0651bef59a0b18353e54300d79ace8084c818ad8ac72b7d5"}, +] + [[package]] name = "reportlab" version = "4.4.9" @@ -2378,6 +3984,53 @@ rich = ">=12" dev = ["inline-snapshot (>=0.24)", "jsonschema (>=4)", "mypy (>=1.14.1)", "nodeenv (>=1.9.1)", "packaging (>=25)", "pre-commit (>=3.5)", "pytest (>=8.3.5)", "pytest-cov (>=5)", "rich-codex (>=1.2.11)", "ruff (>=0.12.4)", "typer (>=0.15)", "types-setuptools (>=75.8.0.20250110)"] docs = ["markdown-include (>=0.8.1)", "mike (>=2.1.3)", "mkdocs-github-admonitions-plugin (>=0.1.1)", "mkdocs-glightbox (>=0.4)", "mkdocs-include-markdown-plugin (>=7.1.7) ; python_version >= \"3.9\"", "mkdocs-material-extensions (>=1.3.1)", "mkdocs-material[imaging] (>=9.5.18,<9.6.0)", "mkdocs-redirects (>=1.2.2)", "mkdocs-rss-plugin (>=1.15)", "mkdocs[docs] (>=1.6.1)", "mkdocstrings[python] (>=0.26.1)", "rich-codex (>=1.2.11)", "typer (>=0.15)"] +[[package]] +name = "safetensors" +version = "0.7.0" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "safetensors-0.7.0-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c82f4d474cf725255d9e6acf17252991c3c8aac038d6ef363a4bf8be2f6db517"}, + {file = "safetensors-0.7.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:94fd4858284736bb67a897a41608b5b0c2496c9bdb3bf2af1fa3409127f20d57"}, + {file = "safetensors-0.7.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e07d91d0c92a31200f25351f4acb2bc6aff7f48094e13ebb1d0fb995b54b6542"}, + {file = "safetensors-0.7.0-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8469155f4cb518bafb4acf4865e8bb9d6804110d2d9bdcaa78564b9fd841e104"}, + {file = "safetensors-0.7.0-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:54bef08bf00a2bff599982f6b08e8770e09cc012d7bba00783fc7ea38f1fb37d"}, + {file = "safetensors-0.7.0-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42cb091236206bb2016d245c377ed383aa7f78691748f3bb6ee1bfa51ae2ce6a"}, + {file = "safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac7252938f0696ddea46f5e855dd3138444e82236e3be475f54929f0c510d48"}, + {file = "safetensors-0.7.0-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1d060c70284127fa805085d8f10fbd0962792aed71879d00864acda69dbab981"}, + {file = "safetensors-0.7.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cdab83a366799fa730f90a4ebb563e494f28e9e92c4819e556152ad55e43591b"}, + {file = "safetensors-0.7.0-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:672132907fcad9f2aedcb705b2d7b3b93354a2aec1b2f706c4db852abe338f85"}, + {file = "safetensors-0.7.0-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:5d72abdb8a4d56d4020713724ba81dac065fedb7f3667151c4a637f1d3fb26c0"}, + {file = "safetensors-0.7.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b0f6d66c1c538d5a94a73aa9ddca8ccc4227e6c9ff555322ea40bdd142391dd4"}, + {file = "safetensors-0.7.0-cp38-abi3-win32.whl", hash = "sha256:c74af94bf3ac15ac4d0f2a7c7b4663a15f8c2ab15ed0fc7531ca61d0835eccba"}, + {file = "safetensors-0.7.0-cp38-abi3-win_amd64.whl", hash = "sha256:d1239932053f56f3456f32eb9625590cc7582e905021f94636202a864d470755"}, + {file = "safetensors-0.7.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4729811a6640d019a4b7ba8638ee2fd21fa5ca8c7e7bdf0fed62068fcaac737"}, + {file = "safetensors-0.7.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:12f49080303fa6bb424b362149a12949dfbbf1e06811a88f2307276b0c131afd"}, + {file = "safetensors-0.7.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0071bffba4150c2f46cae1432d31995d77acfd9f8db598b5d1a2ce67e8440ad2"}, + {file = "safetensors-0.7.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:473b32699f4200e69801bf5abf93f1a4ecd432a70984df164fc22ccf39c4a6f3"}, + {file = "safetensors-0.7.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b95a3fa7b3abb9b5b0e07668e808364d0d40f6bbbf9ae0faa8b5b210c97b140"}, + {file = "safetensors-0.7.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cfdead2f57330d76aa7234051dadfa7d4eedc0e5a27fd08e6f96714a92b00f09"}, + {file = "safetensors-0.7.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc92bc2db7b45bda4510e4f51c59b00fe80b2d6be88928346e4294ce1c2abe7c"}, + {file = "safetensors-0.7.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6999421eb8ba9df4450a16d9184fcb7bef26240b9f98e95401f17af6c2210b71"}, + {file = "safetensors-0.7.0.tar.gz", hash = "sha256:07663963b67e8bd9f0b8ad15bb9163606cd27cc5a1b96235a50d8369803b96b0"}, +] + +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.18.0)"] +quality = ["ruff"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +testingfree = ["huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +torch = ["packaging", "safetensors[numpy]", "torch (>=1.10)"] + [[package]] name = "scikit-learn" version = "1.8.0" @@ -2519,6 +4172,28 @@ dev = ["click (<8.3.0)", "cython-lint (>=0.12.2)", "mypy (==1.10.0)", "pycodesty doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)", "tabulate"] test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "seaborn" +version = "0.13.2" +description = "Statistical data visualization" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987"}, + {file = "seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7"}, +] + +[package.dependencies] +matplotlib = ">=3.4,<3.6.1 || >3.6.1" +numpy = ">=1.20,<1.24.0 || >1.24.0" +pandas = ">=1.2" + +[package.extras] +dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"] +docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx (<6.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] +stats = ["scipy (>=1.7)", "statsmodels (>=0.12)"] + [[package]] name = "setuptools" version = "80.10.2" @@ -2540,6 +4215,18 @@ enabler = ["pytest-enabler (>=2.2)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + [[package]] name = "simplejson" version = "3.20.2" @@ -2684,6 +4371,56 @@ files = [ {file = "soupsieve-2.8.3.tar.gz", hash = "sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349"}, ] +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "structlog" +version = "25.5.0" +description = "Structured Logging for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "structlog-25.5.0-py3-none-any.whl", hash = "sha256:a8453e9b9e636ec59bd9e79bbd4a72f025981b3ba0f5837aebf48f02f37a7f9f"}, + {file = "structlog-25.5.0.tar.gz", hash = "sha256:098522a3bebed9153d4570c6d0288abf80a031dfdb2048d59a49e9dc2190fc98"}, +] + +[[package]] +name = "sympy" +version = "1.14.0" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"}, + {file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + [[package]] name = "ta" version = "0.11.0" @@ -2859,6 +4596,166 @@ files = [ {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, ] +[[package]] +name = "tokenizers" +version = "0.22.2" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c"}, + {file = "tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b"}, + {file = "tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a"}, + {file = "tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5"}, + {file = "tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92"}, + {file = "tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48"}, + {file = "tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195"}, + {file = "tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:319f659ee992222f04e58f84cbf407cfa66a65fe3a8de44e8ad2bc53e7d99012"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e50f8554d504f617d9e9d6e4c2c2884a12b388a97c5c77f0bc6cf4cd032feee"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a62ba2c5faa2dd175aaeed7b15abf18d20266189fb3406c5d0550dd34dd5f37"}, + {file = "tokenizers-0.22.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143b999bdc46d10febb15cbffb4207ddd1f410e2c755857b5a0797961bbdc113"}, + {file = "tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<2.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff", "ty"] + +[[package]] +name = "torch" +version = "2.10.0" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "torch-2.10.0-2-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2b980edd8d7c0a68c4e951ee1856334a43193f98730d97408fbd148c1a933313"}, + {file = "torch-2.10.0-2-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:418997cb02d0a0f1497cf6a09f63166f9f5df9f3e16c8a716ab76a72127c714f"}, + {file = "torch-2.10.0-2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:13ec4add8c3faaed8d13e0574f5cd4a323c11655546f91fbe6afa77b57423574"}, + {file = "torch-2.10.0-2-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:e521c9f030a3774ed770a9c011751fb47c4d12029a3d6522116e48431f2ff89e"}, + {file = "torch-2.10.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5276fa790a666ee8becaffff8acb711922252521b28fbce5db7db5cf9cb2026d"}, + {file = "torch-2.10.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:aaf663927bcd490ae971469a624c322202a2a1e68936eb952535ca4cd3b90444"}, + {file = "torch-2.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:a4be6a2a190b32ff5c8002a0977a25ea60e64f7ba46b1be37093c141d9c49aeb"}, + {file = "torch-2.10.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:35e407430795c8d3edb07a1d711c41cc1f9eaddc8b2f1cc0a165a6767a8fb73d"}, + {file = "torch-2.10.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3282d9febd1e4e476630a099692b44fdc214ee9bf8ee5377732d9d9dfe5712e4"}, + {file = "torch-2.10.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a2f9edd8dbc99f62bc4dfb78af7bf89499bca3d753423ac1b4e06592e467b763"}, + {file = "torch-2.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:29b7009dba4b7a1c960260fc8ac85022c784250af43af9fb0ebafc9883782ebd"}, + {file = "torch-2.10.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:b7bd80f3477b830dd166c707c5b0b82a898e7b16f59a7d9d42778dd058272e8b"}, + {file = "torch-2.10.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:5fd4117d89ffd47e3dcc71e71a22efac24828ad781c7e46aaaf56bf7f2796acf"}, + {file = "torch-2.10.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:787124e7db3b379d4f1ed54dd12ae7c741c16a4d29b49c0226a89bea50923ffb"}, + {file = "torch-2.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2c66c61f44c5f903046cc696d088e21062644cbe541c7f1c4eaae88b2ad23547"}, + {file = "torch-2.10.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:6d3707a61863d1c4d6ebba7be4ca320f42b869ee657e9b2c21c736bf17000294"}, + {file = "torch-2.10.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:5c4d217b14741e40776dd7074d9006fd28b8a97ef5654db959d8635b2fe5f29b"}, + {file = "torch-2.10.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6b71486353fce0f9714ca0c9ef1c850a2ae766b409808acd58e9678a3edb7738"}, + {file = "torch-2.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:c2ee399c644dc92ef7bc0d4f7e74b5360c37cdbe7c5ba11318dda49ffac2bc57"}, + {file = "torch-2.10.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:3202429f58309b9fa96a614885eace4b7995729f44beb54d3e4a47773649d382"}, + {file = "torch-2.10.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:aae1b29cd68e50a9397f5ee897b9c24742e9e306f88a807a27d617f07adb3bd8"}, + {file = "torch-2.10.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:6021db85958db2f07ec94e1bc77212721ba4920c12a18dc552d2ae36a3eb163f"}, + {file = "torch-2.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff43db38af76fda183156153983c9a096fc4c78d0cd1e07b14a2314c7f01c2c8"}, + {file = "torch-2.10.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:cdf2a523d699b70d613243211ecaac14fe9c5df8a0b0a9c02add60fb2a413e0f"}, + {file = "torch-2.10.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:bf0d9ff448b0218e0433aeb198805192346c4fd659c852370d5cc245f602a06a"}, + {file = "torch-2.10.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:233aed0659a2503b831d8a67e9da66a62c996204c0bba4f4c442ccc0c68a3f60"}, + {file = "torch-2.10.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:682497e16bdfa6efeec8cde66531bc8d1fbbbb4d8788ec6173c089ed3cc2bfe5"}, + {file = "torch-2.10.0-cp314-cp314-win_amd64.whl", hash = "sha256:6528f13d2a8593a1a412ea07a99812495bec07e9224c28b2a25c0a30c7da025c"}, + {file = "torch-2.10.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:f5ab4ba32383061be0fb74bda772d470140a12c1c3b58a0cfbf3dae94d164c28"}, + {file = "torch-2.10.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:716b01a176c2a5659c98f6b01bf868244abdd896526f1c692712ab36dbaf9b63"}, + {file = "torch-2.10.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:d8f5912ba938233f86361e891789595ff35ca4b4e2ac8fe3670895e5976731d6"}, + {file = "torch-2.10.0-cp314-cp314t-win_amd64.whl", hash = "sha256:71283a373f0ee2c89e0f0d5f446039bdabe8dbc3c9ccf35f0f784908b0acd185"}, +] + +[package.dependencies] +cuda-bindings = {version = "12.9.4", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +filelock = "*" +fsspec = ">=0.8.5" +jinja2 = "*" +networkx = ">=2.5.1" +nvidia-cublas-cu12 = {version = "12.8.4.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.8.90", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.8.93", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.8.90", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "9.10.2.21", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.3.3.83", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufile-cu12 = {version = "1.13.1.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.9.90", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.7.3.90", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.5.8.93", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparselt-cu12 = {version = "0.7.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.27.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvjitlink-cu12 = {version = "12.8.93", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvshmem-cu12 = {version = "3.4.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.8.90", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +setuptools = {version = "*", markers = "python_version >= \"3.12\""} +sympy = ">=1.13.3" +triton = {version = "3.6.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = ">=4.10.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.13.0)"] +pyyaml = ["pyyaml"] + +[[package]] +name = "tornado" +version = "6.5.4" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "sys_platform != \"emscripten\"" +files = [ + {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d6241c1a16b1c9e4cc28148b1cda97dd1c6cb4fb7068ac1bedc610768dff0ba9"}, + {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2d50f63dda1d2cac3ae1fa23d254e16b5e38153758470e9956cbc3d813d40843"}, + {file = "tornado-6.5.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cf66105dc6acb5af613c054955b8137e34a03698aa53272dbda4afe252be17"}, + {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ff0a58b0dc97939d29da29cd624da010e7f804746621c78d14b80238669335"}, + {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f"}, + {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9c86b1643b33a4cd415f8d0fe53045f913bf07b4a3ef646b735a6a86047dda84"}, + {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:6eb82872335a53dd063a4f10917b3efd28270b56a33db69009606a0312660a6f"}, + {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6076d5dda368c9328ff41ab5d9dd3608e695e8225d1cd0fd1e006f05da3635a8"}, + {file = "tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1"}, + {file = "tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc"}, + {file = "tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1"}, + {file = "tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7"}, +] + +[[package]] +name = "tqdm" +version = "4.67.3" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf"}, + {file = "tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + [[package]] name = "tradingview-ta" version = "3.3.0" @@ -2874,6 +4771,139 @@ files = [ [package.dependencies] requests = "*" +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "transformers" +version = "5.2.0" +description = "Transformers: the model-definition framework for state-of-the-art machine learning models in text, vision, audio, and multimodal models, for both inference and training." +optional = false +python-versions = ">=3.10.0" +groups = ["main"] +files = [ + {file = "transformers-5.2.0-py3-none-any.whl", hash = "sha256:9ecaf243dc45bee11a7d93f8caf03746accc0cb069181bbf4ad8566c53e854b4"}, + {file = "transformers-5.2.0.tar.gz", hash = "sha256:0088b8b46ccc9eff1a1dca72b5d618a5ee3b1befc3e418c9512b35dea9f9a650"}, +] + +[package.dependencies] +huggingface-hub = ">=1.3.0,<2.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +safetensors = ">=0.4.3" +tokenizers = ">=0.22.0,<=0.23.0" +tqdm = ">=4.27" +typer-slim = "*" + +[package.extras] +accelerate = ["accelerate (>=1.1.0)"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=1.1.0)", "av", "blobfile", "jinja2 (>=3.1.0)", "jmespath (>=1.0.1)", "kernels (>=0.10.2,<0.11)", "librosa", "mistral-common[image] (>=1.8.8)", "num2words", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "tiktoken", "timm (>=1.0.23)", "torch (>=2.4)", "torchaudio", "torchvision"] +audio = ["librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +benchmark = ["optimum-benchmark (>=0.3.0)"] +chat-template = ["jinja2 (>=3.1.0)", "jmespath (>=1.0.1)"] +codecarbon = ["codecarbon (>=2.8.1)"] +deepspeed = ["accelerate (>=1.1.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=1.1.0)", "accelerate (>=1.1.0)", "beautifulsoup4", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.4.6)", "faiss-cpu", "fastapi", "filelock", "libcst", "mistral-common[image] (>=1.8.8)", "nltk (<=3.8.1)", "openai (>=1.98.0)", "optuna", "parameterized (>=0.9)", "protobuf", "protobuf", "psutil", "pydantic (>=2)", "pytest (>=7.2.0,<9.0.0)", "pytest-asyncio (>=1.2.0)", "pytest-env", "pytest-order", "pytest-random-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.14.10)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "tensorboard", "timeout-decorator", "torch (>=2.4)", "urllib3 (<2.0.0)", "uvicorn"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=1.1.0)", "accelerate (>=1.1.0)", "av", "beautifulsoup4", "blobfile", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "dill (<0.3.5)", "evaluate (>=0.4.6)", "faiss-cpu", "fastapi", "filelock", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "jinja2 (>=3.1.0)", "jmespath (>=1.0.1)", "kernels (>=0.10.2,<0.11)", "libcst", "librosa", "mistral-common[image] (>=1.8.8)", "mistral-common[image] (>=1.8.8)", "nltk (<=3.8.1)", "num2words", "openai (>=1.98.0)", "parameterized (>=0.9)", "phonemizer", "protobuf", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (>=2)", "pytest (>=7.2.0,<9.0.0)", "pytest-asyncio (>=1.2.0)", "pytest-env", "pytest-order", "pytest-random-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.14.10)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tiktoken", "timeout-decorator", "timm (>=1.0.23)", "torch (>=2.4)", "torch (>=2.4)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)", "urllib3 (<2.0.0)", "uvicorn"] +integrations = ["codecarbon (>=2.8.1)", "kernels (>=0.10.2,<0.11)", "optuna", "ray[tune] (>=2.7.0)"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)"] +kernels = ["kernels (>=0.10.2,<0.11)"] +mistral-common = ["mistral-common[image] (>=1.8.8)"] +num2words = ["num2words"] +open-telemetry = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (>=2.15.0)", "libcst", "rich", "ruff (==0.14.10)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (>=2.15.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["accelerate (>=1.1.0)", "fastapi", "openai (>=1.98.0)", "pydantic (>=2)", "rich", "starlette", "torch (>=2.4)", "uvicorn"] +sklearn = ["scikit-learn"] +testing = ["GitPython (<3.1.19)", "accelerate (>=1.1.0)", "beautifulsoup4", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "dill (<0.3.5)", "evaluate (>=0.4.6)", "faiss-cpu", "fastapi", "filelock", "libcst", "mistral-common[image] (>=1.8.8)", "nltk (<=3.8.1)", "openai (>=1.98.0)", "parameterized (>=0.9)", "protobuf", "psutil", "pydantic (>=2)", "pytest (>=7.2.0,<9.0.0)", "pytest-asyncio (>=1.2.0)", "pytest-env", "pytest-order", "pytest-random-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.14.10)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "tensorboard", "timeout-decorator", "torch (>=2.4)", "urllib3 (<2.0.0)", "uvicorn"] +tiktoken = ["blobfile", "tiktoken"] +timm = ["timm (>=1.0.23)"] +torch = ["accelerate (>=1.1.0)", "torch (>=2.4)"] +video = ["av"] +vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] + +[[package]] +name = "triton" +version = "3.6.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "<3.15,>=3.10" +groups = ["main"] +markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" +files = [ + {file = "triton-3.6.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c723cfb12f6842a0ae94ac307dba7e7a44741d720a40cf0e270ed4a4e3be781"}, + {file = "triton-3.6.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6550fae429e0667e397e5de64b332d1e5695b73650ee75a6146e2e902770bea"}, + {file = "triton-3.6.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49df5ef37379c0c2b5c0012286f80174fcf0e073e5ade1ca9a86c36814553651"}, + {file = "triton-3.6.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8e323d608e3a9bfcc2d9efcc90ceefb764a82b99dea12a86d643c72539ad5d3"}, + {file = "triton-3.6.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:374f52c11a711fd062b4bfbb201fd9ac0a5febd28a96fb41b4a0f51dde3157f4"}, + {file = "triton-3.6.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74caf5e34b66d9f3a429af689c1c7128daba1d8208df60e81106b115c00d6fca"}, + {file = "triton-3.6.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448e02fe6dc898e9e5aa89cf0ee5c371e99df5aa5e8ad976a80b93334f3494fd"}, + {file = "triton-3.6.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c7f76c6e72d2ef08df639e3d0d30729112f47a56b0c81672edc05ee5116ac9"}, + {file = "triton-3.6.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1722e172d34e32abc3eb7711d0025bb69d7959ebea84e3b7f7a341cd7ed694d6"}, + {file = "triton-3.6.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d002e07d7180fd65e622134fbd980c9a3d4211fb85224b56a0a0efbd422ab72f"}, + {file = "triton-3.6.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5523241e7d1abca00f1d240949eebdd7c673b005edbbce0aca95b8191f1d43"}, + {file = "triton-3.6.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a17a5d5985f0ac494ed8a8e54568f092f7057ef60e1b0fa09d3fd1512064e803"}, + {file = "triton-3.6.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b3a97e8ed304dfa9bd23bb41ca04cdf6b2e617d5e782a8653d616037a5d537d"}, + {file = "triton-3.6.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:46bd1c1af4b6704e554cad2eeb3b0a6513a980d470ccfa63189737340c7746a7"}, +] + +[package.extras] +build = ["cmake (>=3.20,<4.0)", "lit"] +tests = ["autopep8", "isort", "llnl-hatchet", "numpy", "pytest", "pytest-forked", "pytest-xdist", "scipy (>=1.7.1)"] +tutorials = ["matplotlib", "pandas", "tabulate"] + +[[package]] +name = "typer" +version = "0.23.2" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typer-0.23.2-py3-none-any.whl", hash = "sha256:e9c8dc380f82450b3c851a9b9d5a0edf95d1d6456ae70c517d8b06a50c7a9978"}, + {file = "typer-0.23.2.tar.gz", hash = "sha256:a99706a08e54f1aef8bb6a8611503808188a4092808e86addff1828a208af0de"}, +] + +[package.dependencies] +annotated-doc = ">=0.0.2" +click = {version = ">=8.2.1", markers = "python_version >= \"3.10\""} +rich = ">=12.3.0" +shellingham = ">=1.3.0" + +[[package]] +name = "typer-slim" +version = "0.23.2" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typer_slim-0.23.2-py3-none-any.whl", hash = "sha256:2bc3f67ac58fe40763b414c3c65f6fcc92c6b81393d0d89339663aa69252c688"}, + {file = "typer_slim-0.23.2.tar.gz", hash = "sha256:19714179f4717a891650d8d5d062e990a0a1ed23e8d6e0f502f5a800802b3cdf"}, +] + +[package.dependencies] +typer = ">=0.23.2" + [[package]] name = "typing-extensions" version = "4.15.0" @@ -2886,6 +4916,21 @@ files = [ {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] +[[package]] +name = "typing-inspection" +version = "0.4.2" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7"}, + {file = "typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + [[package]] name = "tzdata" version = "2025.3" @@ -2916,6 +4961,18 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] +[[package]] +name = "wcwidth" +version = "0.6.0" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad"}, + {file = "wcwidth-0.6.0.tar.gz", hash = "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159"}, +] + [[package]] name = "websockets" version = "16.0" @@ -3110,6 +5167,18 @@ files = [ [package.extras] dev = ["pytest", "setuptools"] +[[package]] +name = "xyzservices" +version = "2025.11.0" +description = "Source of XYZ tiles providers" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "xyzservices-2025.11.0-py3-none-any.whl", hash = "sha256:de66a7599a8d6dad63980b77defd1d8f5a5a9cb5fc8774ea1c6e89ca7c2a3d2f"}, + {file = "xyzservices-2025.11.0.tar.gz", hash = "sha256:2fc72b49502b25023fd71e8f532fb4beddbbf0aa124d90ea25dba44f545e17ce"}, +] + [[package]] name = "yfinance" version = "0.2.66" @@ -3141,9 +5210,9 @@ nospam = ["requests_cache (>=1.0)", "requests_ratelimiter (>=0.3.1)"] repair = ["scipy (>=1.6.3)"] [extras] -dev = ["pytest", "pytest-cov"] +dev = ["pytest", "pytest-cov", "pytest-mock"] [metadata] lock-version = "2.1" python-versions = ">=3.12,<3.14" -content-hash = "a5b2092345e005931701651e4f0143a50cef72cf8b91e4088f78b3fe5d0d7649" +content-hash = "376c3be13c416a0bf9130739c8026a695257fcb68632b6b414638ecf4186a3d0" diff --git a/pyproject.toml b/pyproject.toml index e5634d4..20095a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,14 +42,33 @@ dependencies = [ # Utilities "multitasking>=0.0.11", "reportlab (>=4.4.9,<5.0.0)", + + # Phase 1: Foundation + "pydantic-settings>=2.0.0", + "structlog>=24.0.0", + "backtesting>=0.3.3", + "pyfolio-reloaded>=0.9.5", + "pydantic (>=2.12.5,<3.0.0)", + "transformers (>=5.2.0,<6.0.0)", + "torch (>=2.10.0,<3.0.0)", + "finnhub-python (>=2.4.27,<3.0.0)", + "polygon-api-client (>=1.16.3,<2.0.0)", + "hmmlearn (>=0.3.3,<0.4.0)", ] [project.optional-dependencies] dev = [ "pytest>=8.0.0", "pytest-cov>=4.0.0", + "pytest-mock>=3.10.0", ] [build-system] requires = ["poetry-core>=2.0.0,<3.0.0"] build-backend = "poetry.core.masonry.api" + +[dependency-groups] +dev = [ + "pytest-cov (>=7.0.0,<8.0.0)", + "pytest-mock (>=3.15.1,<4.0.0)" +] diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..a499866 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +testpaths = tests +pythonpath = . src +addopts = --cov=src --cov-report=term-missing +norecursedirs = .git .venv diff --git a/requirements.txt b/requirements.txt deleted file mode 100755 index 2cc4f9f..0000000 --- a/requirements.txt +++ /dev/null @@ -1,140 +0,0 @@ -# Legacy requirements file. Use `poetry install` instead. -absl-py==2.1.0 -astunparse==1.6.3 -attrs==21.2.0 -Automat==20.2.0 -Babel==2.8.0 -bcrypt==3.2.0 -beautifulsoup4==4.13.3 -blinker==1.4 -cachetools==5.5.1 -certifi==2020.6.20 -chardet==4.0.0 -cloudpickle==3.1.1 -colorama==0.4.4 -command-not-found==0.3 -configobj==5.0.6 -constantly==15.1.0 -contourpy==1.3.1 -cryptography==3.4.8 -cupshelpers==1.0 -cycler==0.12.1 -dbus-python==1.2.18 -decorator==5.1.1 -distro==1.7.0 -distro-info==1.1+ubuntu0.2 -dm-tree==0.1.9 -flatbuffers==25.2.10 -fonttools==4.56.0 -frozendict==2.4.6 -fuse-python==1.0.2 -gast==0.6.0 -google-auth==2.38.0 -google-auth-oauthlib==1.0.0 -google-pasta==0.2.0 -gpg==1.16.0 -grpcio==1.70.0 -h5py==3.12.1 -html5lib==1.1 -httplib2==0.20.2 -hyperlink==21.0.0 -idna==3.3 -importlib-metadata==4.6.4 -incremental==21.3.0 -jeepney==0.7.1 -Jinja2==3.0.3 -jsonpatch==1.32 -jsonpointer==2.0 -jsonschema==3.2.0 -kaleido==0.2.1 -keras==2.14.0 -keyring==23.5.0 -kiwisolver==1.4.8 -launchpadlib==1.10.16 -lazr.restfulclient==0.14.4 -lazr.uri==1.0.6 -libclang==18.1.1 -libcomps==0.1.15 -louis==3.20.0 -lxml==5.3.1 -Markdown==3.7 -MarkupSafe==3.0.2 -matplotlib==3.10.0 -ml-dtypes==0.2.0 -more-itertools==8.10.0 -multitasking==0.0.11 -narwhals==1.26.0 -netifaces==0.11.0 -numpy==1.26.4 -oauthlib==3.2.0 -opt_einsum==3.4.0 -packaging==21.3 -pandas==2.2.3 -pandas-ta==0.3.14b0 -peewee==3.17.9 -pexpect==4.8.0 -pillow==11.1.0 -platformdirs==4.3.6 -plotly==6.0.0 -protobuf==4.25.6 -ptyprocess==0.7.0 -pyasn1==0.4.8 -pyasn1-modules==0.2.1 -pycairo==1.20.1 -pycups==2.0.1 -PyGObject==3.42.1 -PyHamcrest==2.0.2 -PyJWT==2.3.0 -pylibacl==0.6.0 -pyOpenSSL==21.0.0 -pyparsing==2.4.7 -PyQt5==5.15.6 -PyQt5-sip==12.9.1 -pyrsistent==0.18.1 -pyserial==3.5 -python-apt==2.4.0+ubuntu4 -python-dateutil==2.9.0.post0 -python-debian==0.1.43+ubuntu1.1 -python-magic==0.4.24 -pytz==2025.1 -pyxattr==0.7.2 -pyxdg==0.27 -PyYAML==5.4.1 -requests==2.32.3 -requests-oauthlib==2.0.0 -rpm==4.17.0 -rsa==4.9 -scipy==1.15.1 -SecretStorage==3.3.1 -service-identity==18.1.0 -six==1.16.0 -sos==4.7.2 -soupsieve==2.6 -ssh-import-id==5.11 -systemd-python==234 -tensorboard==2.14.1 -tensorboard-data-server==0.7.2 -tensorflow==2.14.0 -tensorflow-estimator==2.14.0 -tensorflow-io-gcs-filesystem==0.37.1 -tensorflow-probability==0.22.0 -termcolor==2.5.0 -tornado==6.1 -tradingview-ta==3.3.0 -Twisted==22.1.0 -typing_extensions==4.5.0 -tzdata==2025.1 -ubuntu-drivers-common==0.0.0 -ubuntu-pro-client==8001 -ufw==0.36.1 -unattended-upgrades==0.1 -urllib3==1.26.5 -wadllib==1.3.6 -webencodings==0.5.1 -Werkzeug==3.1.3 -wrapt==1.14.1 -xdg==5 -xkit==0.0.0 -yfinance==0.2.52 -zipp==1.0.0 -zope.interface==5.4.0 diff --git a/src/classes/Add_indicators.py b/src/classes/Add_indicators.py index 2bedaba..49f2b4f 100644 --- a/src/classes/Add_indicators.py +++ b/src/classes/Add_indicators.py @@ -26,9 +26,23 @@ def add_rsi_indicator(df): return df +def add_adx_indicator(df): + from ta.trend import ADXIndicator + adx = ADXIndicator(high=df["High"], low=df["Low"], close=df["Adj Close"], window=14) + df["ADX"] = adx.adx() + return df + +def add_atr_indicator(df): + from ta.volatility import AverageTrueRange + atr = AverageTrueRange(high=df["High"], low=df["Low"], close=df["Adj Close"], window=14) + df["ATR"] = atr.average_true_range() + return df + def add_indicators(df): df = add_sma_indicator(df) df = add_macd_indicator(df) df = add_rsi_indicator(df) + df = add_adx_indicator(df) + df = add_atr_indicator(df) return df \ No newline at end of file diff --git a/src/classes/DatabaseManager.py b/src/classes/DatabaseManager.py index 5051a69..fbd1b4a 100644 --- a/src/classes/DatabaseManager.py +++ b/src/classes/DatabaseManager.py @@ -65,6 +65,11 @@ def insert_price_rows(conn: sqlite3.Connection, symbol: str, df: pd.DataFrame) - df.index.name = "date" df.reset_index(inplace=True) df.columns = [c.lower().replace(" ", "_") for c in df.columns] + + # Ensure date is string for sqlite + if "date" in df.columns: + df["date"] = df["date"].astype(str) + df["symbol"] = symbol records = df[ [ diff --git a/src/classes/Download.py b/src/classes/Download.py index 3369a5a..64c7265 100755 --- a/src/classes/Download.py +++ b/src/classes/Download.py @@ -29,6 +29,8 @@ get_company_info, ) from classes.Tools import ProgressBar, save_dict_with_timestamp +import structlog +from exceptions import DataFetchError, ConfigurationError # Try to import Rich progress components try: @@ -38,7 +40,7 @@ RICH_AVAILABLE = False -logger = logging.getLogger(__name__) +logger = structlog.get_logger() def _handle_start_end_dates(start, end): @@ -78,7 +80,7 @@ def load_cache(file_prefix: str, source_dir: str = ".") -> Optional[Dict]: if data: return data - print(f"Cache not found for {file_prefix}") + logger.info("Cache not found", file_prefix=file_prefix) return {} @@ -88,9 +90,13 @@ def download( start: Union[str, int] = None, end: Union[str, int] = None, interval: str = "1d", + db_path: str = None, use_rich_progress: bool = True, + provider: str = "yfinance", + api_key: str = None, ) -> Dict[str, Union[List[str], Dict[str, pd.DataFrame], Dict[str, str]]]: + """ Download historical data for tickers with Rich progress support. @@ -142,15 +148,19 @@ def progress_callback(ticker: str, completed: int, total: int): elif not progress: # Fallback to simple progress if completed % 50 == 0 or completed == total: - print(f"Downloaded {completed}/{total} stocks...") + logger.info("Download progress", completed=completed, total=total) # Create fetcher with retry logic fetcher = StockFetcher( max_retries=3, retry_delays=[1, 2, 4], progress_callback=progress_callback, + verbose=False, + provider_name=provider, + api_key=api_key, ) + # Fetch batch results = fetcher.fetch_batch( @@ -175,9 +185,23 @@ def progress_callback(ticker: str, completed: int, total: int): for ticker, result in results.items(): if result.success and result.price_data is not None: # Validate price data - validation = validator.validate_price_data(result.price_data, ticker) - - if validation.valid: + # Use strict validators from src.classes.data.validators + try: + # Basic validation (NaNs, rows) + validation = validator.validate_price_data(result.price_data, ticker) + if not validation.valid: + logger.warning(f"Basic validation failed for {ticker}: {validation.errors}") + continue + + # Strict validation (Sanity, schema) + from classes.data.validators import validate_data_quality, repair_data + + # Proactively repair data + result.price_data = repair_data(result.price_data, ticker) + + result.price_data = validate_data_quality(result.price_data, ticker) + + # If we get here, data is valid price_data[ticker] = result.price_data if result.company_info: company_info[ticker] = result.company_info @@ -185,8 +209,9 @@ def progress_callback(ticker: str, completed: int, total: int): # Save to database if path provided if db_path: _save_to_db(db_path, ticker, result.price_data, result.company_info) - else: - logger.warning(f"Invalid data for {ticker}: {validation.errors}") + + except Exception as e: + logger.warning(f"Validation failed for {ticker}: {e}") else: logger.warning(f"Failed to fetch {ticker}: {result.error}") @@ -288,8 +313,12 @@ def load_data( file_prefix: str = "", data_dir: str = "", db_path: str = None, + use_rich_progress: bool = True, + provider: str = "yfinance", + api_key: str = None, ) -> Dict: + """ Load historical data from cache or download if not available. @@ -307,7 +336,7 @@ def load_data( """ # Try loading from cache if cache: - print("\nLoading historical data...") + logger.info("Loading historical data") cache_manager = CacheManager(cache_dir=data_dir) data = cache_manager.get(file_prefix) if data: @@ -320,12 +349,20 @@ def load_data( with open(symbols_file_path, "r") as f: symbols = f.readline().split(" ") else: - print("No symbols information to download data. Exit script.") - sys.exit() + raise ConfigurationError("No symbols information to download data (symbols_list.txt missing)") # Download data - print("\nDownloading historical data...") - data = download(market, symbols, db_path=db_path, use_rich_progress=use_rich_progress) + logger.info("Downloading historical data") + + data = download( + market, + symbols, + db_path=db_path, + use_rich_progress=use_rich_progress, + provider=provider, + api_key=api_key + ) + # Merge with database data if available if db_path: @@ -453,8 +490,10 @@ def load_volatile_data( tickers = volatile_data.columns.get_level_values(0)[::2].tolist() if missing_tickers: - print( - f"\nRemoving {len(missing_tickers)} symbols due to incomplete data: {missing_tickers[:5]}..." + logger.warning( + "Removing symbols due to incomplete data", + count=len(missing_tickers), + examples=missing_tickers[:5] ) currencies = [ diff --git a/src/classes/Volatile.py b/src/classes/Volatile.py index 73fe6cd..1d79e85 100755 --- a/src/classes/Volatile.py +++ b/src/classes/Volatile.py @@ -10,6 +10,9 @@ from classes.Tools import convert_currency, extract_hierarchical_info from classes.Plotting import * from classes.Models import * +import structlog + +logger = structlog.get_logger() def softplus(x: np.array) -> np.array: @@ -302,15 +305,15 @@ def load_data(cache, symbols: list, market: str): FileNotFoundError: If cache is enabled and "data.pickle" does not exist. """ if cache and os.path.exists("data.pickle"): - print("\nLoading last year of data...") + logger.info("Loading last year of data") with open("data.pickle", "rb") as handle: data = pickle.load(handle) - print("Data has been saved to {}/{}.".format(os.getcwd(), "data.pickle")) + logger.info("Data loaded from cache", path=os.path.abspath("data.pickle")) else: if symbols is None: with open("symbols_list.txt", "r") as my_file: symbols = my_file.readlines()[0].split(" ") - print("\nDownloading last year of data...") + logger.info("Downloading last year of data") data = download(market, symbols) return data @@ -354,7 +357,7 @@ def volatile( initial_params = pickle.load(handle) if num_stocks > 1: - print("\nTraining a model that discovers correlations...") + logger.info("Training correlation model") # order of the polynomial order = 52 @@ -369,15 +372,15 @@ def volatile( phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi = train_msis_mcs( logp, info, num_steps=50000 ) + + logger.info("Correlation training completed") - print("Training completed.") - - print("\nEstimate top matches...") + logger.info("Estimating top matches") matches = estimate_matches(tickers, phi.numpy(), info["tt"]) + + logger.info("Top matches estimation completed") - print("Top matches estimation completed.") - - print("\nTraining a model that estimates and predicts trends...") + logger.info("Training trend prediction model") # how many days to look ahead when comparing the current price against a prediction horizon = 5 # order of the polynomial @@ -402,7 +405,7 @@ def volatile( phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi = params[:8] extra_params = params[8:] if len(params) > 8 else () - print("Training completed.") + logger.info("Training completed") if args.save_model: with open(args.save_model, "wb") as handle: pickle.dump([p.numpy() for p in params], handle) @@ -567,6 +570,8 @@ def volatile( # Save the DataFrame to a CSV file volatile_df.to_csv(tab_name, index=False) - print(f"\nThe prediction table printed above has been saved to {tab_name}.") + volatile_df.to_csv(tab_name, index=False) + + logger.info("Prediction table saved", path=tab_name) return volatile_df diff --git a/src/classes/analysis/VolatileConfig.py b/src/classes/analysis/VolatileConfig.py index f7bb51c..33a0373 100644 --- a/src/classes/analysis/VolatileConfig.py +++ b/src/classes/analysis/VolatileConfig.py @@ -8,6 +8,8 @@ from typing import Dict, Optional +from config.settings import settings + @dataclass class RatingThresholds: """Thresholds for stock rating based on z-scores.""" @@ -29,13 +31,13 @@ def to_dict(self) -> Dict[str, float]: @dataclass class TrainingConfig: """Configuration for model training.""" - learning_rate: float = 0.01 - correlation_steps: int = 50000 # Steps for correlation model - trend_steps: int = 10000 # Steps for trend model + learning_rate: float = settings.learning_rate + correlation_steps: int = settings.correlation_steps # Steps for correlation model + trend_steps: int = settings.trend_steps # Steps for trend model # Polynomial orders - order_correlation: int = 52 # High-frequency patterns - order_trend: int = 2 # Quadratic trend fitting + order_correlation: int = settings.correlation_order # High-frequency patterns + order_trend: int = settings.model_order # Quadratic trend fitting @dataclass diff --git a/src/classes/analysis/regime.py b/src/classes/analysis/regime.py new file mode 100644 index 0000000..531b8c6 --- /dev/null +++ b/src/classes/analysis/regime.py @@ -0,0 +1,102 @@ +import logging +import numpy as np +import pandas as pd +from hmmlearn.hmm import GaussianHMM + +logger = logging.getLogger(__name__) + + +class RegimeDetector: + """ + Detects market regimes (Bull, Bear, Sideways) using Hidden Markov Models. + Based on log returns and volatility. + """ + + def __init__(self, n_components: int = 3, n_iter: int = 100): + """ + Initialize the RegimeDetector. + + Args: + n_components: Number of hidden states (default: 3 for Bull, Bear, Sideways) + n_iter: Max iterations for HMM training + """ + self.n_components = n_components + self.model = GaussianHMM( + n_components=n_components, + covariance_type="full", + n_iter=n_iter, + random_state=42 + ) + self.state_map = {} # Map hidden state index to label + + def _compute_features(self, df: pd.DataFrame) -> pd.DataFrame: + """ + Compute features returning a DataFrame with NaNs dropped. + """ + data = df.copy() + data["log_ret"] = np.log(data["Close"] / data["Close"].shift(1)) + data["volatility"] = data["log_ret"].rolling(window=20).std() + return data.dropna() + + def prepare_features(self, df: pd.DataFrame) -> np.ndarray: + """ + Prepare features for HMM training. + """ + data = self._compute_features(df) + if data.empty: + raise ValueError("Not enough data to compute features (need > 20 days)") + return np.column_stack([data["log_ret"], data["volatility"]]) + + def fit(self, df: pd.DataFrame): + """ + Train the HMM on historical data. + """ + try: + X = self.prepare_features(df) + self.model.fit(X) + + # Map states to labels based on mean returns: 0=Bear, 2=Bull + means = self.model.means_[:, 0] + sorted_indices = np.argsort(means) + + self.state_map = { + sorted_indices[0]: "Bear", + sorted_indices[-1]: "Bull" + } + if self.n_components == 3: + self.state_map[sorted_indices[1]] = "Sideways" + + logger.info(f"Regime model fitted. State map: {self.state_map}") + + except Exception as e: + logger.error(f"Failed to fit regime model: {e}") + raise + + def predict(self, df: pd.DataFrame) -> pd.DataFrame: + """ + Predict regimes for the input DataFrame. + """ + if not self.state_map: + raise RuntimeError("Model not fitted. Call fit() first.") + + try: + valid_data = self._compute_features(df) + if valid_data.empty: + return df + + X = np.column_stack([valid_data["log_ret"], valid_data["volatility"]]) + hidden_states = self.model.predict(X) + + # Create a localized series for alignment + regime_series = pd.Series(hidden_states, index=valid_data.index, name="Regime_State") + + # Join back to original DF + result_df = df.copy() + result_df["Regime_State"] = regime_series + result_df["Regime"] = result_df["Regime_State"].map(self.state_map) + + return result_df + + except Exception as e: + logger.error(f"Failed to predict regimes: {e}") + return df diff --git a/src/classes/backtesting/__init__.py b/src/classes/backtesting/__init__.py new file mode 100644 index 0000000..0d77d1a --- /dev/null +++ b/src/classes/backtesting/__init__.py @@ -0,0 +1,3 @@ +from .adapter import ScreenerSignalAdapter +from .engine import BacktestEngine, ProjectAlphaStrategy +from .performance import BacktestPerformance diff --git a/src/classes/backtesting/adapter.py b/src/classes/backtesting/adapter.py new file mode 100644 index 0000000..40862c4 --- /dev/null +++ b/src/classes/backtesting/adapter.py @@ -0,0 +1,161 @@ +import pandas as pd +import numpy as np +from typing import List, Optional +import structlog + +from classes.screeners.base import BaseScreener + +logger = structlog.get_logger() + +class ScreenerSignalAdapter: + """ + Adapts a BaseScreener to generate historical signals for backtesting. + Simulates point-in-time screening by walking forward through the data. + """ + def __init__(self, screener: BaseScreener): + self.screener = screener + + def compute_signals(self, df: pd.DataFrame, ticker: str) -> pd.Series: + """ + Generate buy/sell signals (+1/-1/0) for the given dataframe. + + Args: + df: OHLCV DataFrame with DateTimeIndex. + ticker: Ticker symbol. + + Returns: + pd.Series: Signal series (+1=Buy, -1=Sell, 0=Hold). + """ + signals = pd.Series(0, index=df.index, dtype=int) + + # Optimization: Most screeners need a minimum lookback + # We can skip the first N rows where N is the max lookback of the screener + # For now, we'll iterate through all valid windows + + logger.debug(f"Generating signals for {ticker} using {self.screener.name}") + + # This is strictly for backtesting simulation. + # In a real vector-based backtest, we would vectorize the logic. + # However, since our screeners are classes with complex logic, + # we might need to iterate or use apply. + # To make it efficient for now, we will perform a simplified check if possible, + # or iterate if the screener requires complex state. + + # NOTE: Iterating row-by-row is slow for python. + # If the screener supports vectorization (like TA-Lib/pandas-ta), it should be preferred. + # But BaseScreener.screen() is designed for single-point analysis (latest date). + # We need to adapt it. + + # Check if the screener has a vectorized implementation + if hasattr(self.screener, "screen_vectorized"): + return self.screener.screen_vectorized(df) + + # Fallback to iteration (Slow but accurate to current implementation) + # We define a rolling window size sufficient for the screener + # MIN_LOOKBACK = 200 # assumption + + # For this MVP, we will assume the screener logic can be applied to the whole DF + # if we modify the screeners to return a Series. + # BUT, existing screeners return a ScreenerResult for the *last* row. + + # Strategy: + # We will assume that for the purpose of Phase 1 backtesting, + # we are testing technical strategies that CAN be vectorized. + # If we stick to the iteration plan, it will be very slow for 5 years of data. + + # Let's implement a 'screen_signal' method on the adapter that tries to be smart. + # For now, we will implement a rolling signal generator. + + # Using a simple moving window loop is safest for correctness regarding "point-in-time" + # without lookahead bias, assuming the screener only looks backwards. + + # Optimisation: Only run screener on new bars? + # Actually backtesting.py calls 'next' on each bar. + # But we want to pre-compute signals to pass to backtesting.py's 'Signal' strategy? + # Or we can use backtesting.py's native logic. + + # Implementation Plan Decision: + # Pre-compute signals using a rolling window. + + # To avoid performance issues, we'll limit this to the standard screeners (Breakout, Trend). + # We can add 'generate_signals(df)' to the BaseScreener interface later. + # For now, the adapter will do the heavy lifting or simple heuristics. + + # Let's just return dummy signals for the scaffold if we can't run the actual screener efficiently. + # Wait, the user wants us to run the ACTUAL screener. + # The breakout screener checks: Trend > SMA20, Vol > SMA20_Vol, Candle pattern. + # This IS vectorizable. + + # Ideally, we should refactor screeners to support vectorization. + # As a bridge, we will adapt the specific screeners here or assume the Strategy class handles logic. + + # Re-reading implementation plan: + # "1.5.1 Create adapter.py ... compute_signals(self, df) ... Walk through df in rolling windows" + + # Helper to map ScreenerResult signal to +/-1 + def map_signal(result): + if result.is_bullish: + return 1 + elif result.is_bearish: + return -1 + return 0 + + # Optimization: Start after a reasonable warmup period + # Most screeners need at least 50-200 days + min_lookback = 50 + + dates = df.index + for i in range(min_lookback, len(dates)): + # Create point-in-time window ending at current date + # We use .iloc to slice by position + window = df.iloc[:i+1] + + try: + # Run the actual screener logic on this window + result = self.screener.screen(ticker, window) + signals.iloc[i] = map_signal(result) + + except Exception as e: + # Log usage of fallback or error only on debug to avoid spam + # logger.debug(f"Screener failed at index {i} ({dates[i]}): {e}") + pass + + return signals + + def compute_signal_vectorized_breakout(self, df: pd.DataFrame) -> pd.Series: + """ + Vectorized implementation for Breakout Strategy to speed up backtesting. + """ + # Close > SMA20 + # Volume > SMA20_Volume + # Close > Open (Green Candle) + + cw = 20 + sma = df['Close'].rolling(window=cw).mean() + vol_sma = df['Volume'].rolling(window=cw).mean() + + long_condition = ( + (df['Close'] > sma) & + (df['Volume'] > vol_sma) & + (df['Close'] > df['Open']) + ) + + signals = pd.Series(0, index=df.index) + signals[long_condition] = 1 + return signals + + def compute_signal_vectorized_trend(self, df: pd.DataFrame) -> pd.Series: + """ + Vectorized implementation for Trend Strategy. + Using simple SMA50 > SMA200 crossover for proxy if TrendlineScreener logic is hard to vectorise without refactoring. + """ + sma50 = df['Close'].rolling(window=50).mean() + sma200 = df['Close'].rolling(window=200).mean() + + long_condition = (sma50 > sma200) + + signals = pd.Series(0, index=df.index) + signals[long_condition] = 1 + # signals[~long_condition] = -1 # Trend following doesn't necessarily mean short + return signals + diff --git a/src/classes/backtesting/engine.py b/src/classes/backtesting/engine.py new file mode 100644 index 0000000..abb45c6 --- /dev/null +++ b/src/classes/backtesting/engine.py @@ -0,0 +1,116 @@ +import pandas as pd +import numpy as np +from backtesting import Strategy, Backtest +from backtesting.lib import crossover +import structlog +from typing import Type + +from classes.backtesting.adapter import ScreenerSignalAdapter +from classes.risk.risk_manager import RiskManager +from classes.risk.transaction_costs import TransactionCosts +from classes.screeners.base import BaseScreener +from classes.screeners.breakout import BreakoutScreener +from classes.screeners.trendline import TrendlineScreener +from config.settings import settings + +logger = structlog.get_logger() + +class ProjectAlphaStrategy(Strategy): + """ + Base strategy for Project Alpha backtesting. + Integrates point-in-time signals from adapter with RiskManager logic. + """ + risk_manager = RiskManager() + transaction_costs = TransactionCosts.us_default() # Default, can be overridden + adapter_class = ScreenerSignalAdapter + screener_class = BreakoutScreener # Default + + def init(self): + # Initialize screener adapter + self.screener = self.screener_class() + self.adapter = self.adapter_class(self.screener) + + # Pre-compute signals using adapter's vectorized methods if possible + # For MVP, we use the simple vectorized proxies defined in adapter + # Pre-compute signals using adapter's logic + # We use a dummy ticker since the strategy runs on a single dataframe context + # and Screeners often just need the dataframe. + ticker = "BACKTEST" + self.signal = self.I(self.adapter.compute_signals, self.data.df, ticker) + + # Pre-compute ATR for dynamic stop-loss + # Using simple ATR proxy or pandas-ta if available. + # Backtesting.py's self.I requires an array-like result. + # We can implement a simple ATR helper here. + high = self.data.High + low = self.data.Low + close = self.data.Close + self.atr = self.I(self._compute_atr, high, low, close, settings.atr_period) + + def _compute_atr(self, high, low, close, period=14): + high = pd.Series(high) + low = pd.Series(low) + close = pd.Series(close) + return self._pandas_ta_atr(high, low, close, period) + + def _pandas_ta_atr(self, high, low, close, length=14): + # Manual ATR calculation if numpy arrays are passed or pandas-ta not available in scope + # TR = max(high-low, abs(high-prev_close), abs(low-prev_close)) + tr1 = high - low + tr2 = (high - close.shift()).abs() + tr3 = (low - close.shift()).abs() + tr = pd.DataFrame({'tr1': tr1, 'tr2': tr2, 'tr3': tr3}).max(axis=1) + return tr.rolling(window=length).mean().bfill().fillna(0).values + + def next(self): + price = self.data.Close[-1] + + # Entry Logic + if self.signal[-1] == 1 and not self.position: + # Calculate Stop Loss + stop_price = self.risk_manager.calculate_stop_loss(price, self.atr[-1], "long") + + # Calculate Position Size + # Equity is available via self.equity + # We assume single position for this strategy test context + size = self.risk_manager.calculate_position_size(self.equity, price, stop_price) + + if size > 0: + # Place Buy Order with Stop Loss + self.buy(size=size, sl=stop_price) + + # Exit Logic + elif self.signal[-1] == -1 and self.position: + self.position.close() + +class BacktestEngine: + """ + Engine to run backtests for a specific ticker and strategy. + """ + def __init__(self, data: pd.DataFrame, initial_capital: float = 10000, commission: float = 0.0): + self.data = data + self.initial_capital = initial_capital + self.commission = commission + + def run(self, strategy_class: Type[Strategy] = ProjectAlphaStrategy, screener_cls: Type[BaseScreener] = BreakoutScreener): + """ + Run the backtest. + """ + # Configure strategy class with specific screener + # Using a subclass to avoid modifying base class state if reused + # Capture closure variable explicitly + target_screener = screener_cls + + class ConfiguredStrategy(strategy_class): + screener_class = target_screener + + bt = Backtest( + self.data, + ConfiguredStrategy, + cash=self.initial_capital, + commission=self.commission, + exclusive_orders=True + ) + + stats = bt.run() + return bt, stats diff --git a/src/classes/backtesting/performance.py b/src/classes/backtesting/performance.py new file mode 100644 index 0000000..a57028e --- /dev/null +++ b/src/classes/backtesting/performance.py @@ -0,0 +1,105 @@ +from dataclasses import dataclass +from typing import Dict, Any, Optional +import pandas as pd +import structlog +import os + +from classes.backtesting.engine import BacktestEngine +from backtesting import Backtest + +logger = structlog.get_logger() + +@dataclass +class BacktestResult: + ticker: str + strategy: str + start_date: str + end_date: str + duration_days: int + exposure_time_pct: float + equity_final: float + equity_peak: float + return_pct: float + buy_hold_return_pct: float + return_ann_pct: float + volatility_ann_pct: float + sharpe_ratio: float + sortino_ratio: float + calmar_ratio: float + max_drawdown_pct: float + avg_drawdown_pct: float + max_drawdown_duration_days: int + avg_drawdown_duration_days: int + trade_count: int + win_rate_pct: float + best_trade_pct: float + worst_trade_pct: float + avg_trade_pct: float + max_trade_duration_days: int + avg_trade_duration_days: int + profit_factor: float + expectancy_pct: float + sqn: float + +class BacktestPerformance: + """ + Utilities for analyzing and reporting backtest results. + """ + @staticmethod + def extract_metrics(stats: pd.Series, ticker: str, strategy_name: str) -> BacktestResult: + """ + Convert backtesting.py stats Series into a structured dataclass. + """ + # Helper to safely get value or 0.0 + def get(key, default=0.0): + val = stats.get(key, default) + return val if pd.notnull(val) else default + + return BacktestResult( + ticker=ticker, + strategy=strategy_name, + start_date=str(get('Start')), + end_date=str(get('End')), + duration_days=int(get('Duration').days) if hasattr(get('Duration'), 'days') else 0, + exposure_time_pct=get('Exposure Time [%]'), + equity_final=get('Equity Final [$]'), + equity_peak=get('Equity Peak [$]'), + return_pct=get('Return [%]'), + buy_hold_return_pct=get('Buy & Hold Return [%]'), + return_ann_pct=get('Return (Ann.) [%]'), + volatility_ann_pct=get('Volatility (Ann.) [%]'), + sharpe_ratio=get('Sharpe Ratio'), + sortino_ratio=get('Sortino Ratio'), + calmar_ratio=get('Calmar Ratio'), + max_drawdown_pct=get('Max. Drawdown [%]'), + avg_drawdown_pct=get('Avg. Drawdown [%]'), + max_drawdown_duration_days=int(get('Max. Drawdown Duration').days) if hasattr(get('Max. Drawdown Duration'), 'days') else 0, + avg_drawdown_duration_days=int(get('Avg. Drawdown Duration').days) if hasattr(get('Avg. Drawdown Duration'), 'days') else 0, + trade_count=int(get('# Trades')), + win_rate_pct=get('Win Rate [%]'), + best_trade_pct=get('Best Trade [%]'), + worst_trade_pct=get('Worst Trade [%]'), + avg_trade_pct=get('Avg. Trade [%]'), + max_trade_duration_days=int(get('Max. Trade Duration').days) if hasattr(get('Max. Trade Duration'), 'days') else 0, + avg_trade_duration_days=int(get('Avg. Trade Duration').days) if hasattr(get('Avg. Trade Duration'), 'days') else 0, + profit_factor=get('Profit Factor'), + expectancy_pct=get('Expectancy [%]'), + sqn=get('SQN'), + ) + + @staticmethod + def generate_report(bt: Backtest, filename: str = "backtest_report.html", open_browser: bool = False): + """ + Generate interactive HTML report. + """ + try: + # Ensure directory exists + output_dir = os.path.dirname(filename) + if output_dir: + os.makedirs(output_dir, exist_ok=True) + + bt.plot(filename=filename, open_browser=open_browser) + logger.info(f"Backtest report saved to {filename}") + except Exception as e: + logger.error(f"Failed to generate backtest report: {e}") + diff --git a/src/classes/backtesting/walk_forward.py b/src/classes/backtesting/walk_forward.py new file mode 100644 index 0000000..4e93a52 --- /dev/null +++ b/src/classes/backtesting/walk_forward.py @@ -0,0 +1,168 @@ +import pandas as pd +import numpy as np +from typing import List, Dict, Type, Generator, Tuple +from datetime import timedelta +from structlog import get_logger + +from classes.backtesting.engine import BacktestEngine, ProjectAlphaStrategy +from classes.screeners.base import BaseScreener + +logger = get_logger() + +class WalkForwardValidator: + """ + Performs Walk-Forward Validation using Anchored Expanding Windows. + Wrapper around BacktestEngine to run sequential tests. + """ + + def __init__( + self, + data: pd.DataFrame, + train_period_days: int = 365, + test_period_days: int = 90, + initial_capital: float = 10000 + ): + """ + Initialize the validator. + + Args: + data: DataFrame with OHLCV data. + train_period_days: Initial size of the training window (days). + test_period_days: Size of the testing window (days). + initial_capital: Capital for each backtest run. + """ + self.data = data + self.train_period = timedelta(days=train_period_days) + self.test_period = timedelta(days=test_period_days) + self.initial_capital = initial_capital + + # Ensure DatetimeIndex + if not isinstance(self.data.index, pd.DatetimeIndex): + self.data.index = pd.to_datetime(self.data.index) + + # Results storage + self.results = [] + + + def generate_windows(self) -> Generator[Tuple[pd.DataFrame, pd.DataFrame, Tuple[pd.Timestamp, pd.Timestamp]], None, None]: + """ + Yields (train_df, test_df, (test_start, test_end)) tuples. + Strategy: Anchored Expanding Window. + - Start is fixed at data.index[0] + - Train End moves forward by test_period + - Test Start = Train End + - Test End = Test Start + test_period + """ + if self.data.empty: + return + + start_date = self.data.index[0] + max_date = self.data.index[-1] + + # Initial window end + current_train_end = start_date + self.train_period + + window_idx = 1 + + while current_train_end < max_date: + test_start = current_train_end + test_end = test_start + self.test_period + + if test_end > max_date: + test_end = max_date + + # Slice data + train_mask = (self.data.index >= start_date) & (self.data.index < current_train_end) + test_mask = (self.data.index >= test_start) & (self.data.index <= test_end) + + train_df = self.data.loc[train_mask] + test_df = self.data.loc[test_mask] + + # Ensure enough data + if len(test_df) > 5: # Minimal checks + yield train_df, test_df, (test_start, test_end) + + # Stop if we reached the end + if test_end == max_date: + break + + # Expand window + current_train_end = test_end + window_idx += 1 + + def validate(self, screener_cls: Type[BaseScreener]) -> List[Dict]: + """ + Run the validation loop. + + Args: + screener_cls: The screener class to test. + + Returns: + List of result dictionaries for each window. + """ + logger.info("Starting Walk-Forward Validation", + screener=screener_cls.__name__, + train_days=self.train_period.days, + test_days=self.test_period.days) + + self.results = [] + + for i, (train_df, test_df, (test_start, test_end)) in enumerate(self.generate_windows()): + logger.info(f"Processing Window {i+1}", + test_start=test_start.date(), + test_end=test_end.date(), + train_rows=len(train_df), + test_rows=len(test_df)) + + # Run In-Sample (IS) Backtest + # Note: For strict WFV we would Optimize params here. + # Current engine doesn't support optimization yet, so we just run to get IS baseline. + engine_is = BacktestEngine(train_df, self.initial_capital) + _, stats_is = engine_is.run(ProjectAlphaStrategy, screener_cls) + + # Run Out-of-Sample (OOS) Backtest + engine_oos = BacktestEngine(test_df, self.initial_capital) + _, stats_oos = engine_oos.run(ProjectAlphaStrategy, screener_cls) + + window_result = { + "window": i + 1, + "test_start": test_start, + "test_end": test_end, + # Metrics + "IS_Return": stats_is["Return [%]"], + "IS_Sharpe": stats_is["Sharpe Ratio"], + "IS_Drawdown": stats_is["Max. Drawdown [%]"], + "OOS_Return": stats_oos["Return [%]"], + "OOS_Sharpe": stats_oos["Sharpe Ratio"], + "OOS_Drawdown": stats_oos["Max. Drawdown [%]"], + # Ratios (OOS / IS) - strictly speaking signs matter, so simple ratio might be misleading if negative + # We'll compute them in summary or reporting + } + self.results.append(window_result) + + return self.results + + def get_summary(self) -> pd.DataFrame: + """ + Return a summary DataFrame of the results. + And checks for overfitting. + """ + if not self.results: + return pd.DataFrame() + + df = pd.DataFrame(self.results) + + # Calculate degradation + # 1.0 means OOS matched IS. < 0.5 suggests overfitting. + # Handle division by zero or negative sharpes gracefully? + # For Sharpe: + df["Sharpe_Degradation"] = df["OOS_Sharpe"] / df["IS_Sharpe"].replace(0, np.nan) + + # Determine performance + df["Overfit_Warning"] = df["Sharpe_Degradation"] < 0.5 + + # Overall aggregates + mean_oos_sharpe = df["OOS_Sharpe"].mean() + logger.info("Validation Complete", mean_oos_sharpe=mean_oos_sharpe) + + return df diff --git a/src/classes/filters/__init__.py b/src/classes/filters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/classes/filters/fundamental_filter.py b/src/classes/filters/fundamental_filter.py new file mode 100644 index 0000000..bce8a5c --- /dev/null +++ b/src/classes/filters/fundamental_filter.py @@ -0,0 +1,80 @@ +import structlog +from typing import Optional, Dict, Any +from functools import lru_cache +from config.settings import settings + +logger = structlog.get_logger() + +class FundamentalFilter: + """ + Filters stocks based on fundamental financial metrics. + Uses Finnhub API for data, with graceful fallback if key is missing. + """ + + def __init__(self, api_key: Optional[str] = None): + self.api_key = api_key or settings.finnhub_api_key + self.client = None + + if self.api_key: + try: + import finnhub + self.client = finnhub.Client(api_key=self.api_key) + except ImportError: + logger.warning("finnhub-python not installed. Fundamental filtering disabled.") + else: + logger.info("No Finnhub API key provided. Fundamental filtering will be skipped.") + + @lru_cache(maxsize=100) + def check_health(self, ticker: str) -> Dict[str, Any]: + """ + Check fundamental health of a ticker. + + Returns: + Dict containing 'passed' (bool) and 'reason' (str) + """ + if not self.client: + return {"passed": True, "reason": "No API access", "details": {}} + + try: + # Fetch basic financials + metrics = self.client.company_basic_financials(ticker, 'all')['metric'] + + reasons = [] + + # Rule 1: Debt/Equity < 200% + total_debt_equity = metrics.get('totalDebtToEquity') + if total_debt_equity and total_debt_equity > 200: + reasons.append(f"High Debt/Equity: {total_debt_equity}%") + + # Rule 2: P/E Ratio (Value check, but loose for growth) + # Avoid extremely high P/E or negative P/E + pe_ttm = metrics.get('peTTM') + if pe_ttm: + if pe_ttm < 0: + reasons.append(f"Negative P/E: {pe_ttm}") + elif pe_ttm > 100: # Very loose cap + reasons.append(f"Extremely High P/E: {pe_ttm}") + + # Rule 3: ROE > 0 (Profitability) + roe = metrics.get('roeTTM') + if roe and roe < 0: + reasons.append(f"Negative ROE: {roe}%") + + # Rule 4: Revenue Growth > 0 (Growth) + rev_growth = metrics.get('revenueGrowthTTMYoy') + if rev_growth and rev_growth < 0: + reasons.append(f"Negative Revenue Growth: {rev_growth}%") + + if reasons: + return { + "passed": False, + "reason": "; ".join(reasons), + "details": metrics + } + + return {"passed": True, "reason": "Fundamentals OK", "details": metrics} + + except Exception as e: + logger.error("Fundamental check failed", ticker=ticker, error=str(e)) + # Fallback to passing if check fails, to not block trading on API hiccup + return {"passed": True, "reason": "Check Error", "details": {"error": str(e)}} diff --git a/src/classes/filters/sentiment_filter.py b/src/classes/filters/sentiment_filter.py new file mode 100644 index 0000000..23605dd --- /dev/null +++ b/src/classes/filters/sentiment_filter.py @@ -0,0 +1,81 @@ +import structlog +from typing import List, Dict, Any, Optional +from transformers import pipeline + +logger = structlog.get_logger() + +class SentimentFilter: + """ + Filters stocks based on news sentiment using FinBERT. + """ + + _classifier = None # Singleton model instance + + def __init__(self, model_name: str = "ProsusAI/finbert"): + self.model_name = model_name + + def _load_model(self): + """Lazy load the model if not already loaded.""" + if SentimentFilter._classifier is None: + try: + logger.info(f"Loading sentiment model: {self.model_name}") + SentimentFilter._classifier = pipeline("sentiment-analysis", model=self.model_name) + except Exception as e: + logger.error(f"Failed to load sentiment model: {e}") + SentimentFilter._classifier = None + + @property + def classifier(self): + if SentimentFilter._classifier is None: + self._load_model() + return SentimentFilter._classifier + + def analyze_sentiment(self, headlines: List[str]) -> Dict[str, Any]: + """ + Analyze sentiment of a list of headlines. + + Returns: + Dict with 'score' (-1 to +1) and 'label' (positive/negative/neutral) + """ + if not self.classifier or not headlines: + return {"score": 0.0, "label": "neutral", "details": []} + + try: + results = self.classifier(headlines) + + # Calculate aggregate score + # positive=1, negative=-1, neutral=0 + total_score = 0.0 + details = [] + + for headline, res in zip(headlines, results): + label = res['label'] + score = res['score'] + + val = 0.0 + if label == 'positive': + val = score + elif label == 'negative': + val = -score + # neutral is 0.0 + + total_score += val + details.append({"headline": headline, "label": label, "score": round(score, 4)}) + + avg_score = total_score / len(headlines) if headlines else 0.0 + + final_label = "neutral" + if avg_score > 0.15: # Lowered threshold slightly + final_label = "positive" + elif avg_score < -0.15: + final_label = "negative" + + return { + "score": round(avg_score, 2), + "label": final_label, + "details": details + } + + except Exception as e: + logger.error(f"Sentiment analysis failed: {e}") + return {"score": 0.0, "label": "error", "details": str(e)} diff --git a/src/classes/output/charts.py b/src/classes/output/charts.py index 8bb5c57..9db93db 100644 --- a/src/classes/output/charts.py +++ b/src/classes/output/charts.py @@ -437,6 +437,7 @@ def create_batch_charts( output_dir: str, batch_size: int = 100, send_email_flag: bool = True, + analysis_metadata: Dict[str, Any] = None, ) -> List[str]: """ Create charts for multiple symbols in batches. @@ -512,15 +513,36 @@ def create_batch_charts( prev_close = df["Close"].iloc[-2] change_pct = ((last_close - prev_close) / prev_close) * 100 - # Get Sector (if available in data["sectors"]) + # Get Sector (if available in data["sectors"] or data["company_info"]) sector = data.get("sectors", {}).get(symbol, "N/A") - summary_data.append({ + # Fallback to company_info if sector is missing or N/A + if sector == "N/A" and "company_info" in data: + info = data["company_info"].get(symbol, {}) + # Check for 'sector' or 'Sector' keys + sector = info.get("sector", info.get("Sector", "N/A")) + + # Handle potential list or dict values (some providers return complex objects) + if isinstance(sector, list) and sector: + sector = sector[0] + elif isinstance(sector, dict): + sector = "N/A" + + item = { "symbol": symbol, "price": f"{last_close:.2f}", "change": change_pct, "sector": sector - }) + } + + # Add metadata if available + if analysis_metadata and symbol in analysis_metadata: + meta = analysis_metadata[symbol] + # Copy all metadata keys into the item + for k, v in meta.items(): + item[k] = v + + summary_data.append(item) # Generate PNGs for ALL symbols for PDF report chart_files = [] @@ -552,6 +574,17 @@ def create_batch_charts( # Use top 5 for inline embedding, attach PDF for full view embedded_charts = chart_files[:5] + # Determine extra columns from summary_data keys for the HTML table + base_keys = {"symbol", "price", "change", "sector"} + all_keys = set() + for item in summary_data: + all_keys.update(item.keys()) + extra_columns = list(all_keys - base_keys) + + # Preferred order for readability in the email + preferred_order = ["Score", "Signal", "Growth", "Vol", "Regime"] + extra_columns.sort(key=lambda x: preferred_order.index(x) if x in preferred_order else 999) + server.send_stock_report_email( subject=f"{screener_name} - {market.upper()} Report (Batch {batch_idx + 1})", market=market, @@ -559,7 +592,8 @@ def create_batch_charts( summary_data=summary_data, charts=embedded_charts, pdf_path=pdf_path, - mock=False # Set to False in production + mock=False, + extra_columns=extra_columns ) except FileNotFoundError: @@ -620,10 +654,17 @@ def _create_full_chart( ) # Row 1: Price with SMAs - for col in ["Close", "SMA_10", "SMA_30", "SMA_50", "SMA_200"]: + # Row 1: Price with SMAs (Only 50 and 200 for clarity) + for col in ["Close", "SMA_50", "SMA_200"]: if col in data.columns: + # Style override for specific lines + line_style = dict(width=1.5) + if col == "SMA_50": line_style["color"] = "orange" + if col == "SMA_200": line_style["color"] = "blue" + if col == "Close": line_style["color"] = "black" + fig.add_trace( - go.Scatter(x=data.index, y=data[col], name=col), + go.Scatter(x=data.index, y=data[col], name=col, line=line_style), row=1, col=1 ) @@ -714,15 +755,12 @@ def _add_donchian_to_chart(fig: go.Figure, data: pd.DataFrame, window: int = 20) don_mid = (don_high + don_low) / 2 fig.add_trace( - go.Scatter(x=data.index, y=don_high, name="Don High", line=dict(color="red", width=2, dash="dash")), + go.Scatter(x=data.index, y=don_high, name="Don High", line=dict(color="gray", width=1, dash="dot"), opacity=0.5), row=1, col=1 ) + # Mid band removed for clarity fig.add_trace( - go.Scatter(x=data.index, y=don_mid, name="Don Mid", line=dict(color="blue", width=2, dash="dash")), - row=1, col=1 - ) - fig.add_trace( - go.Scatter(x=data.index, y=don_low, name="Don Low", line=dict(color="purple", width=2, dash="dash")), + go.Scatter(x=data.index, y=don_low, name="Don Low", line=dict(color="gray", width=1, dash="dot"), opacity=0.5), row=1, col=1 ) @@ -777,24 +815,77 @@ def _generate_pdf_report( if summary_data: elements.append(Paragraph("Market Summary (Top 30)", styles["Heading2"])) - # Prepare data for table - table_data = [["Symbol", "Price", "Change %", "Sector"]] + # Determine columns dynamically + base_keys = ["symbol", "price", "change", "sector"] + # Map internal keys to display names + header_map = { + "symbol": "Symbol", + "price": "Price", + "change": "Change %", + "sector": "Sector" + } + + # Find all unique keys across items + all_keys = set() + for item in summary_data: + all_keys.update(item.keys()) + + # Identify extra columns + extra_keys = list(all_keys - set(base_keys)) + # Sort extra columns: Score, Signal, Growth, Vol, Rate, Regime come first if present + preferred_order = ["Score", "Signal", "Growth", "Vol", "Rate", "Regime"] + extra_keys.sort(key=lambda x: preferred_order.index(x) if x in preferred_order else 999) + + # Final Column List (internal keys) + final_keys = ["symbol", "price", "change", "sector"] + extra_keys + + # Prepare Header Row + header_row = [header_map.get(k, k) for k in final_keys] + table_data = [header_row] + + # Prepare Data Rows for item in summary_data[:30]: - change_str = f"{item['change']:.2f}%" - if item['change'] > 0: change_str = "+" + change_str - table_data.append([ - item['symbol'], - item['price'], - change_str, - item.get('sector', 'N/A') - ]) + row = [] + for k in final_keys: + val = item.get(k, "") + # Format specific keys + if k == "change": + try: + val_f = float(val) + change_str = f"{val_f:.2f}%" + if val_f > 0: change_str = "+" + change_str + val = change_str + except: pass + elif k == "sector": + if val == "N/A": val = "" # Cleaner + + row.append(str(val)) + table_data.append(row) - t = Table(table_data, colWidths=[1.5*inch, 1*inch, 1*inch, 2.5*inch]) + # Dynamic Column Widths + # Base widths + col_widths = [] + for k in final_keys: + if k == "symbol": col_widths.append(1.2*inch) + elif k == "price": col_widths.append(0.8*inch) + elif k == "change": col_widths.append(0.8*inch) + elif k == "sector": col_widths.append(1.5*inch) # Reduced sector width to fit others + else: col_widths.append(0.8*inch) # Default width for extras + + # Adjust if total width exceeds page width (approx 7.5 inch available) + total_width = sum(col_widths) + max_width = 7.5 * inch + if total_width > max_width: + scale = max_width / total_width + col_widths = [w * scale for w in col_widths] + + t = Table(table_data, colWidths=col_widths) t.setStyle(TableStyle([ ('BACKGROUND', (0, 0), (-1, 0), colors.HexColor("#3498db")), ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), ('ALIGN', (0, 0), (-1, -1), 'CENTER'), ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, -1), 8), # Reduced font size for more columns ('BOTTOMPADDING', (0, 0), (-1, 0), 12), ('BACKGROUND', (0, 1), (-1, -1), colors.beige), ('GRID', (0, 0), (-1, -1), 1, colors.black), diff --git a/src/classes/output/email.py b/src/classes/output/email.py index e850049..c5b3712 100644 --- a/src/classes/output/email.py +++ b/src/classes/output/email.py @@ -162,6 +162,7 @@ def send_stock_report_email( pdf_path: Optional[str] = None, recipients: Optional[List[str]] = None, mock: bool = True, + extra_columns: Optional[List[str]] = None, ) -> bool: """ Send a formatted stock report email with summary table and embedded charts. @@ -184,6 +185,12 @@ def send_stock_report_email( MAX_EMBEDDED_CHARTS = 5 embedded_charts = charts[:MAX_EMBEDDED_CHARTS] + # Prepare dynamic columns + extra_columns = extra_columns or [] + extra_headers = "" + for col in extra_columns: + extra_headers += f'{col}' + # Build HTML Body html_body = f""" @@ -201,6 +208,7 @@ def send_stock_report_email( Symbol Price Change + {extra_headers} Sector @@ -217,6 +225,10 @@ def send_stock_report_email( {item['symbol']} {item['price']} {change_str} + + + { "".join([f'{item.get(col, "")}' for col in extra_columns]) } + {item.get('sector', 'N/A')} """ diff --git a/src/classes/risk/__init__.py b/src/classes/risk/__init__.py new file mode 100644 index 0000000..3642328 --- /dev/null +++ b/src/classes/risk/__init__.py @@ -0,0 +1,2 @@ +from .risk_manager import RiskManager +from .transaction_costs import TransactionCosts diff --git a/src/classes/risk/risk_manager.py b/src/classes/risk/risk_manager.py new file mode 100644 index 0000000..89c51bd --- /dev/null +++ b/src/classes/risk/risk_manager.py @@ -0,0 +1,85 @@ +from dataclasses import dataclass +from typing import Optional, Literal +import math + +from config.settings import settings + +@dataclass +class OrderValidation: + valid: bool + reason: Optional[str] = None + +class RiskManager: + """ + Manages trading risk through position sizing, stop-loss calculation, and exposure limits. + """ + def __init__(self): + self.risk_per_trade = settings.risk_per_trade + self.atr_multiplier = settings.atr_multiplier + self.max_positions = settings.max_positions + self.max_portfolio_exposure = settings.max_portfolio_exposure + self.trailing_stop = settings.trailing_stop + + def calculate_stop_loss(self, entry_price: float, atr: float, direction: Literal["long", "short"] = "long") -> float: + """ + Calculate stop-loss price based on ATR. + + Args: + entry_price: Execution price. + atr: Average True Range value. + direction: 'long' or 'short'. + + Returns: + Stop-loss price. + """ + if direction == "long": + return entry_price - (atr * self.atr_multiplier) + else: + return entry_price + (atr * self.atr_multiplier) + + def calculate_position_size(self, account_size: float, entry_price: float, stop_loss: float) -> int: + """ + Calculate position size (number of shares) based on risk per trade. + + Risk Amount = Account Size * Risk Per Trade % + Risk Per Share = |Entry - Stop Loss| + Shares = Risk Amount / Risk Per Share + + Args: + account_size: Total account equity. + entry_price: Execution price. + stop_loss: Stop protection price. + + Returns: + Number of shares (integer). + """ + risk_amount = account_size * self.risk_per_trade + risk_per_share = abs(entry_price - stop_loss) + + if risk_per_share == 0: + return 0 + + shares = math.floor(risk_amount / risk_per_share) + return shares + + def validate_order(self, current_positions: int, current_exposure: float, order_value: float, account_size: float) -> OrderValidation: + """ + Validate if an order can be placed within risk limits. + + Args: + current_positions: Number of currently open positions. + current_exposure: Total value of current positions. + order_value: Value of the new order (Price * Quantity). + account_size: Total account equity. + + Returns: + OrderValidation object. + """ + if current_positions >= self.max_positions: + return OrderValidation(False, f"Max positions reached ({self.max_positions})") + + projected_exposure = (current_exposure + order_value) / account_size + if projected_exposure > self.max_portfolio_exposure: + return OrderValidation(False, f"Max portfolio exposure exceeded ({projected_exposure:.2%} > {self.max_portfolio_exposure:.2%})") + + return OrderValidation(True) diff --git a/src/classes/risk/transaction_costs.py b/src/classes/risk/transaction_costs.py new file mode 100644 index 0000000..17dc855 --- /dev/null +++ b/src/classes/risk/transaction_costs.py @@ -0,0 +1,44 @@ +from dataclasses import dataclass +from typing import Optional + +@dataclass +class TransactionCosts: + """ + Model for estimating transaction costs including commission, slippage, and spread. + + Attributes: + commission_per_trade (float): Fixed commission fee per trade (buy or sell). + slippage_bps (float): Estimated slippage in basis points (1 bps = 0.01%). + spread_bps (float): Average bid-ask spread in basis points. + """ + commission_per_trade: float = 0.0 + slippage_bps: float = 5.0 + spread_bps: float = 3.0 + + def calculate_cost(self, price: float, quantity: int) -> float: + """ + Calculate total transaction cost for a single leg (entry or exit). + + Cost = Commission + (Price * Quantity * (Slippage + 0.5 * Spread) / 10000) + + Args: + price: Execution price per share. + quantity: Number of shares. + + Returns: + Total estimated cost in currency units. + """ + total_value = price * quantity + variable_bps = self.slippage_bps + (self.spread_bps / 2) + variable_cost = total_value * (variable_bps / 10000) + return self.commission_per_trade + variable_cost + + @classmethod + def us_default(cls): + """Default costs for US markets (zero commission, standard liquidity).""" + return cls(commission_per_trade=0.0, slippage_bps=5.0, spread_bps=3.0) + + @classmethod + def india_default(cls): + """Default costs for Indian markets (brokerage fees, lower liquidity).""" + return cls(commission_per_trade=20.0, slippage_bps=10.0, spread_bps=5.0) diff --git a/src/classes/screeners/breakout.py b/src/classes/screeners/breakout.py index 2205b81..3c0b125 100644 --- a/src/classes/screeners/breakout.py +++ b/src/classes/screeners/breakout.py @@ -7,6 +7,7 @@ import numpy as np import pandas as pd +from config.settings import settings from .base import BaseScreener, ScreenerResult, Signal @@ -26,10 +27,12 @@ class BreakoutScreener(BaseScreener): def __init__( self, lookback_days: int = 5, - min_avg_volume: int = 100000, - oc_threshold: float = 100.0, - volume_threshold: float = 50.0, - selling_pressure_max: float = 0.40, + min_avg_volume: int = settings.min_volume, + oc_threshold: float = settings.breakout_oc_threshold, + volume_threshold: float = settings.breakout_volume_threshold, + selling_pressure_max: float = settings.breakout_selling_pressure_max, + min_adx: float = settings.breakout_adx_min, + atr_expansion_factor: float = settings.breakout_atr_expansion_factor, ): """ Initialize breakout screener. @@ -40,12 +43,16 @@ def __init__( oc_threshold: Min % above 20-day O-C average volume_threshold: Min % above 20-day volume average selling_pressure_max: Max upper wick to body ratio + min_adx: Minimum ADX for trend strength + atr_expansion_factor: Min ATR expansion vs 20-day average """ self.lookback_days = lookback_days self.min_avg_volume = min_avg_volume self.oc_threshold = oc_threshold self.volume_threshold = volume_threshold self.selling_pressure_max = selling_pressure_max + self.min_adx = min_adx + self.atr_expansion_factor = atr_expansion_factor def screen(self, ticker: str, data: pd.DataFrame) -> ScreenerResult: """ @@ -79,11 +86,20 @@ def screen(self, ticker: str, data: pd.DataFrame) -> ScreenerResult: 100 * (df["O_to_C"] - df["OC_20D_Mean"]) / df["OC_20D_Mean"].abs().replace(0, 1) ) df["MaxOC_Prev10"] = df["O_to_C"].rolling(10).max() + # Volume and Confirmation Metrics df["Volume_20D_Mean"] = df["Volume"].rolling(20).mean() df["Volume_perc_from_20D_Mean"] = ( 100 * (df["Volume"] - df["Volume_20D_Mean"]) / df["Volume_20D_Mean"].replace(0, 1) ) + # Check for confirmation indicators (ADX/ATR) + # These should have been added by add_indicators() + has_adx = "ADX" in df.columns + has_atr = "ATR" in df.columns + + if has_atr: + df["ATR_20D_Mean"] = df["ATR"].rolling(20).mean() + # Get last N candles for screening latest = df.tail(self.lookback_days) @@ -96,6 +112,14 @@ def screen(self, ticker: str, data: pd.DataFrame) -> ScreenerResult: (latest["Volume_perc_from_20D_Mean"] >= self.volume_threshold) ) + # Add Confirmation Filters + if has_adx: + condition &= (latest["ADX"] >= self.min_adx) + + if has_atr: + # ATR Expansion check + condition &= (latest["ATR"] >= latest["ATR_20D_Mean"] * self.atr_expansion_factor) + breakouts = latest[condition] avg_volume = latest["Volume"].mean() @@ -107,16 +131,23 @@ def screen(self, ticker: str, data: pd.DataFrame) -> ScreenerResult: vol_strength = min(latest_breakout["Volume_perc_from_20D_Mean"] / 100, 1.0) confidence = (oc_strength + vol_strength) / 2 + details = { + "breakout_count": len(breakouts), + "avg_volume": int(avg_volume), + "oc_strength": round(oc_strength * 100, 1), + "volume_strength": round(vol_strength * 100, 1), + } + + if has_adx: + details["adx"] = round(latest_breakout["ADX"], 1) + if has_atr: + details["atr_expansion"] = round(latest_breakout["ATR"] / latest_breakout["ATR_20D_Mean"], 2) if latest_breakout["ATR_20D_Mean"] > 0 else 0 + return ScreenerResult( ticker=ticker, signal=Signal.BUY, confidence=confidence, - details={ - "breakout_count": len(breakouts), - "avg_volume": int(avg_volume), - "oc_strength": round(oc_strength * 100, 1), - "volume_strength": round(vol_strength * 100, 1), - }, + details=details, ) else: return ScreenerResult( diff --git a/src/classes/screeners/consensus.py b/src/classes/screeners/consensus.py new file mode 100644 index 0000000..55d64b2 --- /dev/null +++ b/src/classes/screeners/consensus.py @@ -0,0 +1,88 @@ +from typing import Dict, List, Optional +from dataclasses import dataclass +from classes.screeners.base import ScreenerResult, Signal +from config.settings import settings + +@dataclass +class ConsensusResult: + ticker: str + score: float # 0.0 to 1.0 + signals: Dict[str, float] # contributing signals and their weights/scores + primary_signal: Signal + recommendation: str + +class ConsensusEngine: + """ + Aggregates signals from multiple screeners and filters to produce a single confidence score. + """ + + def __init__(self, weights: Optional[Dict[str, float]] = None): + self.weights = weights or settings.consensus_weights.copy() + # Normalize weights to sum to 1.0 + total_weight = sum(self.weights.values()) + if total_weight > 0: + self.weights = {k: v / total_weight for k, v in self.weights.items()} + + def calculate_score(self, ticker: str, screener_results: Dict[str, ScreenerResult], + filter_results: Optional[Dict[str, float]] = None) -> ConsensusResult: + """ + Calculate consensus score for a single ticker. + + Args: + ticker: Ticker symbol + screener_results: Dict mapping screener name to ScreenerResult + filter_results: Optional dict of filter scores (0.0 to 1.0) + + Returns: + ConsensusResult object + """ + total_score = 0.0 + details = {} + + # Process Screener Signals + for name, weight in self.weights.items(): + score = 0.0 + + # Check tech screeners + if name in screener_results: + res = screener_results[name] + if res.signal == Signal.BUY: + score = res.confidence + elif res.signal == Signal.SELL: + score = -res.confidence # Penalize for sell signals? + # For now, let's treat sell as negative score, but we want 0-1 range. + # Simple approach: Only add to score if BUY. + score = 0.0 + + # Check filters + elif filter_results and name in filter_results: + score = filter_results[name] + + total_score += score * weight + details[name] = round(score, 2) + + # Synergy Bonus: If Breakout AND Trend are both present, boost score + if "breakout" in details and details["breakout"] > 0 and \ + "trend" in details and details["trend"] > 0: + synergy_bonus = 0.1 + total_score = min(1.0, total_score + synergy_bonus) + details["synergy_bonus"] = synergy_bonus + + # Determine final signal + primary_signal = Signal.HOLD + rec = "HOLD" + + if total_score >= 0.7: + primary_signal = Signal.BUY + rec = "STRONG BUY" + elif total_score >= 0.5: + primary_signal = Signal.BUY + rec = "BUY" + + return ConsensusResult( + ticker=ticker, + score=round(total_score, 2), + signals=details, + primary_signal=primary_signal, + recommendation=rec + ) diff --git a/src/classes/screeners/trendline.py b/src/classes/screeners/trendline.py index 0b06614..7598bb4 100644 --- a/src/classes/screeners/trendline.py +++ b/src/classes/screeners/trendline.py @@ -7,6 +7,7 @@ import numpy as np import pandas as pd +from config.settings import settings from .base import BaseScreener, ScreenerResult, Signal @@ -25,7 +26,7 @@ class TrendlineScreener(BaseScreener): name = "trendline" description = "Price trend slope analysis" - def __init__(self, lookback_days: int = 5): + def __init__(self, lookback_days: int = settings.trend_lookback_days): """ Initialize trendline screener. @@ -68,9 +69,13 @@ def screen(self, ticker: str, data: pd.DataFrame) -> ScreenerResult: trend_data = trend_data.fillna(0) trend_data = trend_data.replace([np.inf, -np.inf], 0) + # Normalize prices to percentage change from start of period + # This fixes the bias towards high-priced stocks + y_raw = trend_data["Close"].values + y = (y_raw / y_raw[0]) * 100 + # Calculate slope using linear regression - x = trend_data.index.values - y = trend_data["Close"].values + x = np.arange(len(y)) # Use 0, 1, 2... for consistent X-axis scale if len(x) < 2: return ScreenerResult( diff --git a/src/config/defaults.yaml b/src/config/defaults.yaml new file mode 100644 index 0000000..3970711 --- /dev/null +++ b/src/config/defaults.yaml @@ -0,0 +1,26 @@ +# Default Configuration +# These values are loaded if not overridden by environment variables or CLI arguments. + +market: "us" +data_dir: "data" +cache_ttl_hours: 24 + +model: + order: 2 + correlation_order: 52 + learning_rate: 0.01 + trend_steps: 10000 + correlation_steps: 50000 + +screeners: + min_volume: 100000 + breakout: + selling_pressure_max: 0.40 + oc_threshold: 1.0 + volume_threshold: 0.5 + trend_lookback_days: 20 + +risk: + risk_per_trade: 0.01 + atr_multiplier: 2.0 + max_positions: 10 diff --git a/src/config/settings.py b/src/config/settings.py new file mode 100644 index 0000000..f16ba80 --- /dev/null +++ b/src/config/settings.py @@ -0,0 +1,63 @@ +from pydantic_settings import BaseSettings, SettingsConfigDict +from pydantic import Field +from pathlib import Path +from typing import Literal, Optional + +class Settings(BaseSettings): + # Data + market: Literal["us", "india"] = "us" + data_dir: Path = Path("data") + cache_ttl_hours: int = 24 + data_provider: Literal["yfinance", "polygon"] = "yfinance" + + + # Model + model_order: int = 2 + correlation_order: int = 52 + learning_rate: float = 0.01 + trend_steps: int = 10000 + correlation_steps: int = 50000 + + # Screeners + min_volume: int = 100_000 + breakout_selling_pressure_max: float = 0.40 + breakout_oc_threshold: float = 1.0 + breakout_volume_threshold: float = 0.5 + trend_lookback_days: int = 20 + breakout_adx_min: float = 20.0 # Minimum ADX for trend strength + breakout_atr_expansion_factor: float = 1.5 # Current ATR vs 20-day average + + # Consensus + consensus_weights: dict = { + "breakout": 0.4, + "trend": 0.3, + "volatility": 0.1, + "fundamental": 0.1, + "sentiment": 0.1 + } + + # Risk + risk_per_trade: float = 0.01 + atr_multiplier: float = 2.0 + atr_period: int = 14 + max_positions: int = 10 + max_portfolio_exposure: float = 1.0 # 1.0 = 100% exposure allowed + trailing_stop: bool = True + + # Email + smtp_host: Optional[str] = None + smtp_port: int = 587 + + # API Keys + finnhub_api_key: Optional[str] = None + polygon_api_key: Optional[str] = None + fmp_api_key: Optional[str] = None + + model_config = SettingsConfigDict( + env_file=".env", + env_prefix="PA_", + env_file_encoding="utf-8", + extra="ignore" + ) + +settings = Settings() diff --git a/src/exceptions.py b/src/exceptions.py new file mode 100644 index 0000000..71faf8e --- /dev/null +++ b/src/exceptions.py @@ -0,0 +1,17 @@ +class ProjectAlphaError(Exception): + """Base exception for all application errors.""" + +class DataFetchError(ProjectAlphaError): + """Failed to download market data.""" + +class ScreenerError(ProjectAlphaError): + """Screener execution failed.""" + +class ModelTrainingError(ProjectAlphaError): + """Model failed to converge.""" + +class ConfigurationError(ProjectAlphaError): + """Invalid configuration.""" + +class DataValidationError(ProjectAlphaError): + """Input data failed validation.""" diff --git a/src/logging_config.py b/src/logging_config.py new file mode 100644 index 0000000..5b96894 --- /dev/null +++ b/src/logging_config.py @@ -0,0 +1,36 @@ +import structlog +import logging +import sys + +def configure_logging(level: str = "INFO", json_output: bool = False): + """ + Configure structured logging for the application. + + Args: + level: transform string level (INFO, DEBUG) to logging constant + json_output: if True, output JSON, otherwise colorful console output + """ + + # Map string level to logging constant + log_level = getattr(logging, level.upper(), logging.INFO) + + processors = [ + structlog.contextvars.merge_contextvars, + structlog.processors.add_log_level, + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.StackInfoRenderer(), + structlog.dev.set_exc_info, + structlog.processors.format_exc_info, + ] + + if json_output: + processors.append(structlog.processors.JSONRenderer()) + else: + processors.append(structlog.dev.ConsoleRenderer(colors=True)) + + structlog.configure( + processors=processors, + logger_factory=structlog.PrintLoggerFactory(), + wrapper_class=structlog.make_filtering_bound_logger(log_level), + cache_logger_on_first_use=True, + ) diff --git a/src/project_alpha.py b/src/project_alpha.py index 290df41..6898974 100644 --- a/src/project_alpha.py +++ b/src/project_alpha.py @@ -20,6 +20,9 @@ """ import os +import structlog +from datetime import datetime +import pandas as pd os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["OMP_NUM_THREADS"] = "4" @@ -61,6 +64,14 @@ "name": "Model Options", "options": ["--load-model", "--save-model"], }, + { + "name": "Risk Management", + "options": ["--risk-per-trade", "--atr-multiplier", "--max-positions"], + }, + { + "name": "Backtesting", + "options": ["--backtest", "--initial-capital", "--benchmark"], + }, { "name": "Additional Features", "options": ["--value"], @@ -71,6 +82,17 @@ from classes.Download import load_data, load_volatile_data from classes.Volatile import volatile from classes.screeners import BreakoutScreener, TrendlineScreener +from classes.screeners.consensus import ConsensusEngine +from classes.filters.fundamental_filter import FundamentalFilter +from classes.filters.sentiment_filter import SentimentFilter +from classes.backtesting.engine import BacktestEngine, ProjectAlphaStrategy +from classes.backtesting.performance import BacktestPerformance +from classes.backtesting.walk_forward import WalkForwardValidator +from classes.data.news_fetcher import NewsFetcher + +from classes.analysis.regime import RegimeDetector + +from concurrent.futures import ThreadPoolExecutor, as_completed from classes.output import ( create_batch_charts, console, print_banner, print_section, print_success, print_error, @@ -79,6 +101,8 @@ ) import classes.IndexListFetcher as Index import classes.Tools as tools +from config.settings import settings +from logging_config import configure_logging # Available screeners @@ -102,7 +126,7 @@ def validate_screeners(ctx, param, value): @click.option( "-m", "--market", type=click.Choice(["us", "india"], case_sensitive=False), - default="us", + default=settings.market, show_default=True, help="Market to analyze. **us** = S&P 500, **india** = NSE 500", ) @@ -187,6 +211,18 @@ def validate_screeners(ctx, param, value): default=False, help="Minimal output (errors only)", ) +@click.option( + "--json-logs", + is_flag=True, + default=False, + help="Output logs in JSON format", +) +@click.option( + "--log-level", + type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False), + default="INFO", + help="Set log level", +) @click.option( "--no-banner", is_flag=True, @@ -226,10 +262,111 @@ def validate_screeners(ctx, param, value): default=False, help="Include value stocks from external screener sources (India only)", ) +@click.option( + "--fundamental/--no-fundamental", + default=False, + help="Enable fundamental analysis filtering (requires Finnhub API Key)", +) +@click.option( + "--sentiment/--no-sentiment", + default=False, + help="Enable sentiment analysis filtering (uses FinBERT)", +) +@click.option( + "--consensus/--no-consensus", + default=False, + help="Enable consensus scoring (runs all screeners and aggregates signals)", +) +@click.option( + "--risk-per-trade", + type=float, + default=settings.risk_per_trade, + help="Risk per trade (decimal, e.g., 0.01 for 1%)", +) +@click.option( + "--atr-multiplier", + type=float, + default=settings.atr_multiplier, + help="ATR multiplier for stop-loss calculation", +) +@click.option( + "--max-positions", + type=int, + default=settings.max_positions, + help="Maximum number of concurrent open positions", +) +@click.option( + "--backtest", + is_flag=True, + default=False, + help="Run backtest on selected symbols/market", +) +@click.option( + "--initial-capital", + type=float, + default=10000.0, + help="Initial capital for backtest", +) +@click.option( + "--benchmark", + type=str, + default="SPY", + help="Benchmark symbol for comparison", +) +@click.option( + "--data-provider", + type=click.Choice(["yfinance", "polygon"], case_sensitive=False), + default=settings.data_provider, + show_default=True, + help="Data provider for historical data", +) +@click.option( + "--polygon-api-key", + type=str, + default=settings.polygon_api_key, + envvar="PA_POLYGON_API_KEY", + help="API key for Polygon.io (overrides settings)", +) +@click.option( + "--regime-detection", + is_flag=True, + default=False, + help="Enable market regime detection (Bull/Bear/Sideways)", +) +@click.option( + "--regime-index", + type=str, + default="SPY", + help="Index symbol for regime detection (default: SPY)", +) +@click.option( + "--walk-forward", + is_flag=True, + default=False, + help="Run Walk-Forward Validation instead of single backtest", +) +@click.option( + "--wf-train-months", + type=int, + default=12, + help="Training window size in months for Walk-Forward Validation", +) +@click.option( + "--wf-test-months", + type=int, + default=3, + help="Testing window size in months for Walk-Forward Validation", +) +@click.option("--strict", is_flag=True, help="Enable stricter filtering for high-quality signals only") @click.version_option(version="0.1.0", prog_name="Project Alpha") def cli(market, symbols, screeners, rank, top, min_price, max_price, output_format, - save_table, no_plots, plot_losses, verbose, quiet, no_banner, cache, - db_path, load_model, save_model, value): + save_table, no_plots, plot_losses, verbose, quiet, json_logs, log_level, no_banner, cache, + db_path, load_model, save_model, value, fundamental, sentiment, consensus, risk_per_trade, atr_multiplier, max_positions, + backtest, initial_capital, benchmark, data_provider, polygon_api_key, regime_detection, regime_index, + walk_forward, wf_train_months, wf_test_months, strict): + + + """ ๐Ÿš€ **Project Alpha** - Your Day-to-Day Trading Companion @@ -243,9 +380,15 @@ def cli(market, symbols, screeners, rank, top, min_price, max_price, output_form **Quick Start:** - $ python project_alpha.py --market us - $ python project_alpha.py --market india --screeners volatility,trend """ + # Configure logging + if verbose: + log_level = "DEBUG" + elif quiet: + log_level = "WARNING" + + configure_logging(level=log_level, json_output=json_logs) + # Create args namespace for backward compatibility class Args: pass @@ -270,6 +413,27 @@ class Args: args.load_model = load_model args.save_model = save_model args.value = value + args.fundamental = fundamental + args.sentiment = sentiment + args.consensus = consensus + args.risk_per_trade = risk_per_trade + args.atr_multiplier = atr_multiplier + args.max_positions = max_positions + args.backtest = backtest + args.initial_capital = initial_capital + args.benchmark = benchmark + args.data_provider = data_provider + args.polygon_api_key = polygon_api_key + args.regime_detection = regime_detection + args.regime_index = regime_index + args.walk_forward = walk_forward + args.wf_train_months = wf_train_months + args.wf_test_months = wf_test_months + args.strict = strict + args.settings = settings + + + # Run main with enhanced args run_screening(args) @@ -277,7 +441,7 @@ class Args: def screener_value_charts(cache, market: str, index: str, symbols: list, db_path: str = None): """Generate value stock charts for a given market and symbols.""" - historic_data_dir = f"data/historic_data/{market}" + historic_data_dir = os.path.join(settings.data_dir, "historic_data", market) os.makedirs(historic_data_dir, exist_ok=True) file_prefix = f"{index}_data" @@ -286,12 +450,117 @@ def screener_value_charts(cache, market: str, index: str, symbols: list, db_path price_data = data["price_data"] value_symbols = data["tickers"] - processed_data_dir = f"data/processed_data/{index}" + processed_data_dir = os.path.join(settings.data_dir, "processed_data", index) os.makedirs(processed_data_dir, exist_ok=True) create_batch_charts("IND_screener_value_stocks", market, value_symbols, {"price_data": price_data}, processed_data_dir) +def apply_filters(symbols, args, filter_cache=None): + """ + Apply additional filters (Fundamental, Sentiment) to a list of symbols. + + Args: + symbols: List of ticker symbols + args: CLI arguments + filter_cache: Optional dict to store filter results {ticker: {filter_name: score}} + + Returns: + List of filtered symbols + """ + if not symbols: + return [] + + filtered_symbols = symbols.copy() + + # Fundamental Analysis + if args.fundamental: + if not args.quiet: + print_info(f"Running fundamental analysis on {len(filtered_symbols)} symbols...") + + fundamental_filter = FundamentalFilter() + passed_fundamental = [] + + with create_download_progress() as progress: + task = progress.add_task("[cyan]Checking Fundamentals...", total=len(filtered_symbols)) + + for ticker in filtered_symbols: + result = fundamental_filter.check_health(ticker) + + # Cache result if container provided + if filter_cache is not None: + if ticker not in filter_cache: + filter_cache[ticker] = {} + # Simple mapping: passed = 1.0, failed = 0.0 + filter_cache[ticker]["fundamental"] = 1.0 if result["passed"] else 0.0 + + if result["passed"]: + passed_fundamental.append(ticker) + progress.advance(task) + + if not args.quiet: + print_success(f"Fundamental filter: {len(passed_fundamental)}/{len(filtered_symbols)} passed") + filtered_symbols = passed_fundamental + + # Sentiment Analysis + if args.sentiment and filtered_symbols: + if not args.quiet: + print_info(f"Running sentiment analysis on {len(filtered_symbols)} symbols...") + + sentiment_filter = SentimentFilter() + passed_sentiment = [] + + sentiment_filter = SentimentFilter() + news_fetcher = NewsFetcher() + passed_sentiment = [] + + def process_sentiment(ticker): + try: + headlines = news_fetcher.fetch_headlines(ticker) + if not headlines: + return ticker, None + return ticker, sentiment_filter.analyze_sentiment(headlines) + except Exception as e: + return ticker, None + + with create_download_progress() as progress: + task = progress.add_task("[cyan]Checking Sentiment...", total=len(filtered_symbols)) + + with ThreadPoolExecutor(max_workers=5) as executor: + futures = {executor.submit(process_sentiment, t): t for t in filtered_symbols} + + for future in as_completed(futures): + ticker, result = future.result() + + if result: + # Cache result/score logic + if filter_cache is not None: + if ticker not in filter_cache: + filter_cache[ticker] = {} + + sent_score = 0.5 + if result["label"] == "positive": + sent_score = 0.5 + (result["score"] * 0.5) + elif result["label"] == "negative": + sent_score = 0.5 - (result["score"] * 0.5) + + filter_cache[ticker]["sentiment"] = sent_score + + if result["label"] != "negative": + passed_sentiment.append(ticker) + else: + # Fallback for no news or error: Pass + passed_sentiment.append(ticker) + + progress.advance(task) + + if not args.quiet: + print_success(f"Sentiment filter: {len(passed_sentiment)}/{len(filtered_symbols)} passed") + filtered_symbols = passed_sentiment + + return filtered_symbols + + def run_screening(args): """Run the stock screening application with the given arguments.""" @@ -309,9 +578,10 @@ def run_screening(args): db_path = args.db_path results = {} # Track screener results screeners_to_run = args.screeners if hasattr(args, 'screeners') else ["all"] + current_regime = "Unknown" # Cleanup report directories - tools.cleanup_directory_files("data/processed_data") + tools.cleanup_directory_files(os.path.join(settings.data_dir, "processed_data")) if not args.quiet: print_success("Cleaned up previous report directories") @@ -319,6 +589,45 @@ def run_screening(args): if not args.quiet: print_section("Loading Market Data", "๐Ÿ“ฅ") + # Market Regime Detection + if args.regime_detection: + if not args.quiet: + print_info(f"Detecting market regime using {args.regime_index}...") + + try: + # Load index data + regime_data = load_data( + cache, + [args.regime_index], + market, + f"regime_{args.regime_index}", + os.path.join(settings.data_dir, "historic_data", "regime"), + db_path=db_path, + provider=args.data_provider, + api_key=args.polygon_api_key + ) + + if args.regime_index in regime_data["price_data"]: + index_df = regime_data["price_data"][args.regime_index] + + detector = RegimeDetector() + detector.fit(index_df) + + # Predict (get last state) + result = detector.predict(index_df) + current_regime = result["Regime"].iloc[-1] + + if not args.quiet: + color = "green" if current_regime == "Bull" else "red" if current_regime == "Bear" else "yellow" + print_info(f"Current Market Regime: [{color}]{current_regime}[/{color}]") + + else: + print_warning(f"Could not load data for {args.regime_index}, skipping regime detection.") + + except Exception as e: + print_error(f"Regime detection failed: {e}") + + if market == "india": index, symbols = Index.nse_500() screener_dur = 3 @@ -334,11 +643,30 @@ def run_screening(args): print_info(f"Loading S&P 500 index ({len(symbols)} stocks)") # Create data directories - data_dir = f"data/historic_data/{market}" + data_dir = os.path.join(settings.data_dir, "historic_data", market) os.makedirs(data_dir, exist_ok=True) file_prefix = f"{index}_data" - data = load_data(cache, symbols, market, file_prefix, data_dir, db_path=db_path) + + # Optimization: If specific symbols requested, only load/download those + if args.symbols and len(args.symbols) > 0: + symbols = args.symbols + # Use a different prefix to avoid overwriting the full market cache + file_prefix = f"{index}_custom_{len(symbols)}_data" + if not args.quiet: + print_info(f"Analyzing {len(symbols)} specific symbols: {', '.join(symbols[:5])}...") + + data = load_data( + cache, + symbols, + market, + file_prefix, + data_dir, + db_path=db_path, + provider=args.data_provider, + api_key=args.polygon_api_key + ) + total_stocks = len(data.get("tickers", [])) if not args.quiet: @@ -354,6 +682,152 @@ def run_screening(args): # Determine which screeners to run run_all = "all" in screeners_to_run + + # Track results for consensus engine + from collections import defaultdict + all_screener_results = defaultdict(dict) + global_filter_results = {} # Cache for filter scores {ticker: {filter_name: score}} + + # Backtesting Mode + if getattr(args, 'backtest', False) or getattr(args, 'walk_forward', False): + if not args.quiet: + print_section("Backtesting Mode", "๐Ÿงช") + + backtest_results = [] + + # Determine which screener to backtest + screener_cls = BreakoutScreener + if "trend" in screeners_to_run and "breakout" not in screeners_to_run: + screener_cls = TrendlineScreener + + tickers = data.get("tickers", []) + if args.symbols: + tickers = args.symbols + + # Limit tickers if --top is set + if args.top: + tickers = tickers[:args.top] + + # Walk-Forward Validation + if getattr(args, 'walk_forward', False): + if not args.quiet: + print_info(f"Running Walk-Forward Validation on {len(tickers)} symbols...") + print_info(f"Window: Train {args.wf_train_months}m / Test {args.wf_test_months}m") + + for ticker in tickers: # Iterate tickers manually as WFV is per-ticker usually + if ticker not in data["price_data"]: + continue + + # Get data for ticker + df = data["price_data"][ticker] + if df.empty: + continue + + try: + validator = WalkForwardValidator( + df, + train_period_days=args.wf_train_months*30, + test_period_days=args.wf_test_months*30, + initial_capital=args.initial_capital + ) + + results = validator.validate(screener_cls) + summary = validator.get_summary() + + if not args.quiet: + print_success(f"WFV Complete for {ticker}") + print(summary) # Simple print for now + + except Exception as e: + print_error(f"WFV failed for {ticker}: {e}") + + return # Stop after WFV for now + + + if not args.quiet: + print_info(f"Backtesting {len(tickers)} symbols with {screener_cls.__name__}...") + + + # Create progress bar + with create_download_progress() as progress: + task = progress.add_task("[cyan]Running Backtests...", total=len(tickers)) + + logger = structlog.get_logger() + + for ticker in tickers: + try: + df = data.get("price_data", {}).get(ticker) + if df is None or len(df) < 100: + progress.advance(task) + continue + + # Ensure index is DatetimeIndex + if not isinstance(df.index, pd.DatetimeIndex): + df.index = pd.to_datetime(df.index) + + # Ensure numeric columns + cols = ['Open', 'High', 'Low', 'Close', 'Volume'] + for col in cols: + if col in df.columns: + df[col] = pd.to_numeric(df[col], errors='coerce') + + engine = BacktestEngine(df, initial_capital=args.initial_capital) + bt, stats = engine.run(strategy_class=ProjectAlphaStrategy, screener_cls=screener_cls) + + metrics = BacktestPerformance.extract_metrics(stats, ticker, screener_cls.__name__) + backtest_results.append(metrics) + + except Exception as e: + logger.error(f"Backtest failed for {ticker}: {e}") + + progress.advance(task) + + # Display results summary + if backtest_results: + # Sort by Return % + backtest_results.sort(key=lambda x: x.return_pct or -999, reverse=True) + + # Print table + from rich.table import Table + table = Table(title=f"Backtest Results ({screener_cls.__name__})") + table.add_column("Ticker", style="cyan") + table.add_column("Return %", style="green") + table.add_column("Sharpe", style="magenta") + table.add_column("Max DD %", style="red") + table.add_column("Trades", style="blue") + table.add_column("Win Rate %", style="yellow") + + for res in backtest_results[:20]: # Show top 20 + table.add_row( + res.ticker, + f"{res.return_pct:.2f}%", + f"{res.sharpe_ratio:.2f}", + f"{res.max_drawdown_pct:.2f}%", + str(res.trade_count), + f"{res.win_rate_pct:.2f}%" + ) + + console.print(table) + + # Save comprehensive CSV + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + csv_path = os.path.join(settings.data_dir, "backtests", f"backtest_summary_{timestamp}.csv") + os.makedirs(os.path.dirname(csv_path), exist_ok=True) + + # Convert list of dataclasses to DataFrame + # import pandas as pd <-- Removed + # Handle potential missing attributes or serialization issues + try: + # Use vars() only if it's a dataclass instance + results_data = [vars(r) for r in backtest_results] + results_df = pd.DataFrame(results_data) + results_df.to_csv(csv_path, index=False) + print_success(f"Backtest summary saved to {csv_path}") + except Exception as e: + logger.error(f"Failed to save CSV: {e}") + + # Exit after backtest + return # Volatility Screening if run_all or "volatility" in screeners_to_run: @@ -363,6 +837,18 @@ def run_screening(args): volatile_data = load_volatile_data(market, data) volatile_df = volatile(args, volatile_data) + + # Prepare metadata for emails + vol_meta = {} + if not volatile_df.empty: + for _, row in volatile_df.iterrows(): + vol_meta[row["SYMBOL"]] = { + "Growth": f"{row['GROWTH']:.4f}", + "Vol": f"{row['VOLATILITY']:.4f}", + "Rate": row["RATE"], + "Regime": current_regime + } + volatile_symbols_top = volatile_df["SYMBOL"].head(200).tolist() volatile_symbols_bottom = volatile_df["SYMBOL"].tail(200).tolist() @@ -375,8 +861,11 @@ def run_screening(args): # 2. VALUE (Mean-reversion): BELOW TREND or HIGHLY BELOW TREND (undervalued) # 3. BREAKOUT: Low VOLATILITY + ALONG TREND (consolidating, ready to break) + trend_threshold = 0.003 if args.strict else 0.001 + breakout_vol_threshold = 0.10 if args.strict else 0.15 + trend_candidates = volatile_df[ - (volatile_df["GROWTH"] > 0.001) & + (volatile_df["GROWTH"] > trend_threshold) & (volatile_df["VOLATILITY"] > 0.10) ]["SYMBOL"].head(50).tolist() @@ -386,7 +875,7 @@ def run_screening(args): breakout_candidates = volatile_df[ (volatile_df["RATE"] == "ALONG TREND") & - (volatile_df["VOLATILITY"] < 0.15) & + (volatile_df["VOLATILITY"] < breakout_vol_threshold) & (volatile_df["GROWTH"].abs() < 0.001) ]["SYMBOL"].head(50).tolist() @@ -397,24 +886,24 @@ def run_screening(args): if not args.no_plots: # Trend/Momentum trading candidates if trend_candidates: - trend_dir = "data/processed_data/volatile_trend_trading" + trend_dir = os.path.join(settings.data_dir, "processed_data", "volatile_trend_trading") if not args.quiet: print_info(f"Generating {len(trend_candidates)} Trend/Momentum charts...") - create_batch_charts("Trend Trading", market, trend_candidates, data, trend_dir) + create_batch_charts("Trend Trading", market, trend_candidates, data, trend_dir, analysis_metadata=vol_meta) # Value/Mean-reversion trading candidates if value_candidates: - value_dir = "data/processed_data/volatile_value_trading" + value_dir = os.path.join(settings.data_dir, "processed_data", "volatile_value_trading") if not args.quiet: print_info(f"Generating {len(value_candidates)} Value/Undervalued charts...") - create_batch_charts("Value Trading", market, value_candidates, data, value_dir) + create_batch_charts("Value Trading", market, value_candidates, data, value_dir, analysis_metadata=vol_meta) # Breakout trading candidates if breakout_candidates: - breakout_dir = "data/processed_data/volatile_breakout_trading" + breakout_dir = os.path.join(settings.data_dir, "processed_data", "volatile_breakout_trading") if not args.quiet: print_info(f"Generating {len(breakout_candidates)} Breakout charts...") - create_batch_charts("Breakout Trading", market, breakout_candidates, data, breakout_dir) + create_batch_charts("Breakout Trading", market, breakout_candidates, data, breakout_dir, analysis_metadata=vol_meta) results["Volatility"] = len(volatile_symbols_top) results["Trend_Candidates"] = len(trend_candidates) @@ -429,15 +918,23 @@ def run_screening(args): if not args.quiet: print_section("Breakout Screening", "๐Ÿš€") - breakout_screener_out_dir = "data/processed_data/screener_breakout" + breakout_screener_out_dir = os.path.join(settings.data_dir, "processed_data", "screener_breakout") tickers_to_screen = volatile_symbols_bottom if volatile_symbols_bottom else data.get("tickers", []) # Use new modular screener breakout_screener = BreakoutScreener() price_data = data.get("price_data", {}) batch_result = breakout_screener.screen_batch(tickers_to_screen, price_data) + if args.consensus: + for res in batch_result.results: + if res.signal: # Store all signals + all_screener_results[res.ticker]["breakout"] = res + breakout_screener_out_symbols = [r.ticker for r in batch_result.buys] + # Apply Base Filters (Fundamental, Sentiment) + breakout_screener_out_symbols = apply_filters(breakout_screener_out_symbols, args, filter_cache=global_filter_results) + # Apply --top limit if specified if hasattr(args, 'top') and args.top and len(breakout_screener_out_symbols) > args.top: breakout_screener_out_symbols = breakout_screener_out_symbols[:args.top] @@ -459,8 +956,8 @@ def run_screening(args): if not args.quiet: print_section("Trend Screening", "๐Ÿ“ˆ") - trend_screener_out_dir = "data/processed_data/screener_trend" - trend_screener_history = "data/processed_data/screener_trend_history" + trend_screener_out_dir = os.path.join(settings.data_dir, "processed_data", "screener_trend") + trend_screener_history = os.path.join(settings.data_dir, "processed_data", "screener_trend_history") tickers_to_screen = volatile_symbols_top if volatile_symbols_top else data.get("tickers", [])[:200] @@ -468,7 +965,22 @@ def run_screening(args): trend_screener = TrendlineScreener(lookback_days=screener_dur) price_data = data.get("price_data", {}) batch_result = trend_screener.screen_batch(tickers_to_screen, price_data) - trend_screener_out_symbols = [r.ticker for r in batch_result.buys] + if args.consensus: + for res in batch_result.results: + if res.signal: + all_screener_results[res.ticker]["trend"] = res + + trend_screener_out_symbols = [] + for r in batch_result.buys: + # Strict Mode: Only Strong Up (Angle > 60) + if args.strict: + if r.details.get("trend") == "Strong Up": + trend_screener_out_symbols.append(r.ticker) + else: + trend_screener_out_symbols.append(r.ticker) + + # Apply Base Filters (Fundamental, Sentiment) + trend_screener_out_symbols = apply_filters(trend_screener_out_symbols, args, filter_cache=global_filter_results) # Apply --top limit if specified if hasattr(args, 'top') and args.top and len(trend_screener_out_symbols) > args.top: @@ -495,6 +1007,102 @@ def run_screening(args): print_warning("No trending stocks found") results["Trend"] = len(trend_screener_out_symbols) + # Consensus Engine + if args.consensus and all_screener_results: + if not args.quiet: + print_section("Consensus Analysis", "๐Ÿง ") + + consensus_engine = ConsensusEngine() + consensus_results = [] + + # Iterate over all tickers that appeared in any screener + all_tickers = list(all_screener_results.keys()) + + for ticker in all_tickers: + screener_res = all_screener_results[ticker] + # Only consider if there's at least one BUY signal + if not any(r.signal == 1 for r in screener_res.values()): + continue + + # Optional: Calculate filter scores + filter_scores = {} + # Retrieve cached scores if available + if ticker in global_filter_results: + filter_scores = global_filter_results[ticker] + + # Note: We rely on apply_filters having populated the cache. + # If a ticker wasn't in the buy list of breakout/trend filters, it might not have been filtered. + # However, here we iterate 'all_screener_results', which contains ALL signals. + # But apply_filters is only run on the BUY candidates of each screener. + # If a ticker had a SELL signal, it wouldn't have gone through apply_filters. + # That's acceptable for now as we only care about consensus on potential BUYs. + + # If filters are enabled but cache missing (e.g. ticker didn't pass initial screener cut but we want consensus?) + # Currently apply_filters runs on the output of screeners. + # So if a ticker is here, it implies it was returned by at least one screener. + + # Calculate score + c_res = consensus_engine.calculate_score(ticker, screener_res, filter_scores) + + if c_res.score >= 0.5: # Filter weak consensus + consensus_results.append(c_res) + + # Sort by score + consensus_results.sort(key=lambda x: x.score, reverse=True) + + if consensus_results: + results["Consensus"] = len(consensus_results) + + from rich.table import Table + table = Table(title="Top Consensus Picks") + table.add_column("Ticker", style="cyan") + table.add_column("Score", style="magenta") + table.add_column("Rec", style="green") + table.add_column("Signals", style="yellow") + + for res in consensus_results[:20]: + signals_str = ", ".join([f"{k}:{v}" for k,v in res.signals.items()]) + table.add_row( + res.ticker, + f"{res.score:.2f}", + res.recommendation, + signals_str + ) + console.print(table) + + if not args.quiet: + print_success(f"Identified {len(consensus_results)} consensus candidates") + + # Send Email for Consensus + if not args.no_plots: + consensus_dir = os.path.join(settings.data_dir, "processed_data", "consensus_picks") + if not args.quiet: + print_info(f"Generating Consensus charts...") + + # Prepare metadata + consensus_meta = {} + for res in consensus_results: + meta = { + "Score": f"{res.score:.2f}", + "Signal": res.recommendation + } + # Add Volatility metrics if available + if 'vol_meta' in locals() and res.ticker in vol_meta: + meta.update(vol_meta[res.ticker]) + consensus_meta[res.ticker] = meta + + create_batch_charts( + "Consensus Picks", + market, + [r.ticker for r in consensus_results[:50]], + data, + consensus_dir, + analysis_metadata=consensus_meta + ) + else: + if not args.quiet: + print_warning("No strong consensus found") + # Summary if not args.quiet: screeners_run = [s for s in ["Volatility", "Breakout", "Trend"] if s in results] @@ -505,7 +1113,7 @@ def run_screening(args): results=results ) - console.print("\n[dim]Reports saved to data/processed_data/[/dim]") + console.print(f"\n[dim]Reports saved to {os.path.join(settings.data_dir, 'processed_data')}[/dim]") console.print("[bold green]โœจ Screening complete![/bold green]\n") diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..d81a14f --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,23 @@ +import pytest +import pandas as pd +from tests.fixtures import sample_data + +@pytest.fixture +def uptrend_data(): + """Returns a DataFrame with a 60-day uptrend.""" + return sample_data.make_uptrend() + +@pytest.fixture +def downtrend_data(): + """Returns a DataFrame with a 60-day downtrend.""" + return sample_data.make_downtrend() + +@pytest.fixture +def sideways_data(): + """Returns a DataFrame with 60 days of sideways price action.""" + return sample_data.make_sideways() + +@pytest.fixture +def breakout_data(): + """Returns a DataFrame with a consolidation followed by a breakout.""" + return sample_data.make_breakout() diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/sample_data.py b/tests/fixtures/sample_data.py new file mode 100644 index 0000000..ed89b10 --- /dev/null +++ b/tests/fixtures/sample_data.py @@ -0,0 +1,92 @@ +import pandas as pd +import numpy as np +from datetime import datetime, timedelta + +def make_ohlcv(days=60, start_price=100.0, trend=0.0, volatility=0.02, start_date="2023-01-01"): + """ + Generate synthetic OHLCV data. + + Args: + days: Number of trading days + start_price: Starting price + trend: Daily drift (e.g., 0.001 for 0.1% daily up) + volatility: Daily volatility (sigma) + start_date: Start date string (YYYY-MM-DD) + """ + dates = pd.date_range(start=start_date, periods=days, freq="B") # Business days + + close_prices = [start_price] + for _ in range(1, days): + change = np.random.normal(trend, volatility) + new_price = close_prices[-1] * (1 + change) + close_prices.append(new_price) + + data = [] + for date, close in zip(dates, close_prices): + # Generate H/L/O around Close + high = close * (1 + abs(np.random.normal(0, volatility/2))) + low = close * (1 - abs(np.random.normal(0, volatility/2))) + open_ = (high + low) / 2 + np.random.normal(0, volatility/4) + volume = int(np.random.normal(1000000, 200000)) + + data.append({ + "Date": date, + "Open": open_, + "High": high, + "Low": low, + "Close": close, + "Adj Close": close, + "Volume": max(volume, 1000) + }) + + df = pd.DataFrame(data) + df.set_index("Date", inplace=True) + return df + +def make_uptrend(days=60, start_price=100, daily_return=0.005): + """Generate synthetic uptrend data.""" + return make_ohlcv(days=days, start_price=start_price, trend=daily_return, volatility=0.01) + +def make_downtrend(days=60, start_price=100, daily_return=-0.005): + """Generate synthetic downtrend data.""" + return make_ohlcv(days=days, start_price=start_price, trend=daily_return, volatility=0.01) + +def make_sideways(days=60, start_price=100, volatility=0.01): + """Generate synthetic sideways data.""" + return make_ohlcv(days=days, start_price=start_price, trend=0.0, volatility=volatility) + +def make_breakout(days=60, consolidation_days=40, breakout_magnitude=0.05): + """ + Generate consolidation followed by a breakout. + First part: sideways + Second part: sharp move up + """ + consolidation = make_sideways(days=consolidation_days, start_price=100, volatility=0.005) + + last_price = consolidation["Close"].iloc[-1] + last_date = consolidation.index[-1] + + # Breakout candle + breakout_date = last_date + timedelta(days=1) + breakout_open = last_price + breakout_close = last_price * (1 + breakout_magnitude) + breakout_high = breakout_close * 1.01 + breakout_low = breakout_open * 0.99 + breakout_vol = consolidation["Volume"].mean() * 3 # High volume + + breakout_row = pd.DataFrame([{ + "Open": breakout_open, + "High": breakout_high, + "Low": breakout_low, + "Close": breakout_close, + "Adj Close": breakout_close, + "Volume": int(breakout_vol) + }], index=[breakout_date]) + breakout_row.index.name = "Date" + + # Post-breakout continuation + continuation = make_uptrend(days=days-consolidation_days-1, start_price=breakout_close, daily_return=0.002) + # Adjust dates + continuation.index = pd.date_range(start=breakout_date + timedelta(days=1), periods=len(continuation), freq="B") + + return pd.concat([consolidation, breakout_row, continuation]) diff --git a/tests/integration/test_download_module.py b/tests/integration/test_download_module.py new file mode 100644 index 0000000..60d51fb --- /dev/null +++ b/tests/integration/test_download_module.py @@ -0,0 +1,92 @@ +import pytest +import pandas as pd +from unittest.mock import MagicMock, patch +from src.classes.data.DataFetcher import StockFetcher, FetchResult +from src.classes.Download import download + +@pytest.fixture +def sample_price_data(): + dates = pd.date_range("2023-01-01", periods=50) + # Distinct values to survive drop_duplicates + prices = [100.0 + i for i in range(50)] + df = pd.DataFrame({ + "Open": prices, + "High": [p + 5 for p in prices], + "Low": [p - 5 for p in prices], + "Close": [p + 2 for p in prices], + "Volume": [10000 + i*100 for i in range(50)] + }, index=dates) + return df + +def test_fetch_one_success(sample_price_data): + """Test successful data fetch.""" + fetcher = StockFetcher(verbose=True) + fetcher.provider = MagicMock() + fetcher.provider.fetch_data.return_value = sample_price_data + fetcher.provider.get_company_info.return_value = {"sector": "Technology"} + + result = fetcher.fetch_one("TEST", "us") + + assert result.success is True + assert result.ticker == "TEST" + assert result.price_data is not None + assert len(result.price_data) == 50 + assert result.company_info == {"sector": "Technology"} + assert result.retries == 0 + +def test_fetch_one_retry_success(sample_price_data): + """Test fetch succeeds after retry.""" + fetcher = StockFetcher(max_retries=2, retry_delays=[0.01, 0.01]) + fetcher.provider = MagicMock() + # First call raises Exception, second returns data + fetcher.provider.fetch_data.side_effect = [Exception("Network Error"), sample_price_data] + fetcher.provider.get_company_info.return_value = {} + + result = fetcher.fetch_one("TEST", "us") + + assert result.success is True + assert result.retries == 1 + assert result.price_data is not None + +def test_fetch_one_retry_exhaustion(): + """Test fetch fails after max retries.""" + fetcher = StockFetcher(max_retries=2, retry_delays=[0.01, 0.01]) + fetcher.provider = MagicMock() + fetcher.provider.fetch_data.side_effect = Exception("Persistent Error") + + result = fetcher.fetch_one("TEST", "us") + + assert result.success is False + assert result.retries == 2 + assert "Persistent Error" in result.error + +def test_download_integration_batch(sample_price_data): + """Test high-level download function with batch processing.""" + tickers = ["AAPL", "GOOGL"] + + # Mock the provider at the StockFetcher level + with patch("src.classes.Download.StockFetcher") as MockFetcherClass: + mock_fetcher = MockFetcherClass.return_value + + # Mock fetch_batch to return FetchResult objects + mock_fetcher.fetch_batch.return_value = { + "AAPL": FetchResult( + ticker="AAPL", + price_data=sample_price_data.copy(), + company_info={"sector": "Technology"}, + success=True, + ), + "GOOGL": FetchResult( + ticker="GOOGL", + price_data=sample_price_data.copy(), + company_info={"sector": "Communication"}, + success=True, + ), + } + + with patch("src.classes.Download._save_to_db"): + results = download(market="us", tickers=tickers, use_rich_progress=False) + + assert len(results) > 0 + assert "AAPL" in results["price_data"] + assert "GOOGL" in results["price_data"] diff --git a/tests/integration/test_pipeline.py b/tests/integration/test_pipeline.py new file mode 100644 index 0000000..ac1a77b --- /dev/null +++ b/tests/integration/test_pipeline.py @@ -0,0 +1,77 @@ +import pytest +import pandas as pd +from unittest.mock import MagicMock, patch +from src.project_alpha import run_screening + +@pytest.fixture +def mock_args(): + args = MagicMock() + # Default args + args.market = "us" + args.symbols = None + args.cache = False + args.no_plots = True + args.rank = "growth" + args.backtest = False + args.walk_forward = False + args.quiet = True + args.regime_detection = False + args.consensus = False + args.fundamental = False + args.sentiment = False + args.save_table = False + args.top = None + args.min_price = None + args.max_price = None + args.load_model = None + args.save_model = None + args.screeners = "all" + args.format = "table" + args.verbose = False + args.json_logs = False + args.log_level = "INFO" + return args + +@pytest.fixture +def sample_market_data(): + # Create valid data structure for load_data return + dates = pd.date_range("2023-01-01", periods=30) + prices = [100 + i for i in range(30)] + df = pd.DataFrame({ + "Close": prices, + "Open": prices, + "High": prices, + "Low": prices, + "Volume": [1000]*30, + "Adj Close": prices + }, index=dates) + + return { + "tickers": ["TEST"], + "price_data": {"TEST": df}, + "company_info": {"TEST": {"sector": "Tech", "industry": "Software"}} + } + +def test_pipeline_execution(mock_args, sample_market_data): + """Test full pipeline execution with mocked data.""" + + # load_data is imported directly in project_alpha.py + with patch("src.project_alpha.load_data", return_value=sample_market_data) as mock_load: + with patch("src.project_alpha.settings") as mock_settings: + # Setup settings to run a specific screener + mock_settings.screeners = ["trendline"] + mock_settings.trend_lookback_days = 20 + + # Mock console to avoid clutter + with patch("src.project_alpha.console") as mock_console: + # tools is imported as 'tools', so we patch tools.save_dict_with_timestamp + # checking if tools is imported or specific function + # The code has 'import classes.Tools as tools' + with patch("src.project_alpha.tools.save_dict_with_timestamp") as mock_save: + + # Run the function + run_screening(mock_args) + + # Assertions โ€” load_data should be called once + # (exact kwargs depend on mock_args and settings) + assert mock_load.call_count == 1 diff --git a/tests/test_charts_email_logic.py b/tests/test_charts_email_logic.py index c25a419..53c2d57 100644 --- a/tests/test_charts_email_logic.py +++ b/tests/test_charts_email_logic.py @@ -23,6 +23,10 @@ }, index=pd.to_datetime(["2023-01-01", "2023-01-02"])) } data = {"price_data": price_data, "sectors": {"TEST1": "Tech", "TEST2": "Finance"}} +metadata = { + "TEST1": {"Growth": "0.5%", "Vol": "0.05", "Score": "9.5", "Signal": "Strong Buy"}, + "TEST2": {"Growth": "-0.1%", "Vol": "0.15", "Score": "3.0", "Signal": "Hold"} +} output_dir = "tests/test_charts_output" # Ensure config exists for the test to attempt sending @@ -38,7 +42,8 @@ data=data, output_dir=output_dir, batch_size=10, - send_email_flag=True + send_email_flag=True, + analysis_metadata=metadata ) print("create_batch_charts completed successfully!") diff --git a/tests/test_charts_output_batch_0/1_TEST1.svg b/tests/test_charts_output_batch_0/1_TEST1.svg index 9a1f166..b99719e 100644 --- a/tests/test_charts_output_batch_0/1_TEST1.svg +++ b/tests/test_charts_output_batch_0/1_TEST1.svg @@ -1 +1 @@ -101101.2101.4101.6101.81020500100015002000LegendCloseDon HighDon MidDon LowTEST1 - N_A N_A N_APriceVolume \ No newline at end of file +101101.2101.4101.6101.81020500100015002000LegendCloseDon HighDon LowTEST1 - N_A N_A N_APriceVolume \ No newline at end of file diff --git a/tests/test_charts_output_batch_0/2_TEST2.svg b/tests/test_charts_output_batch_0/2_TEST2.svg index 37a1046..79f4d82 100644 --- a/tests/test_charts_output_batch_0/2_TEST2.svg +++ b/tests/test_charts_output_batch_0/2_TEST2.svg @@ -1 +1 @@ -5151.251.451.651.8520200400600LegendCloseDon HighDon MidDon LowTEST2 - N_A N_A N_APriceVolume \ No newline at end of file +5151.251.451.651.8520200400600LegendCloseDon HighDon LowTEST2 - N_A N_A N_APriceVolume \ No newline at end of file diff --git a/tests/test_data_layer.py b/tests/test_data_layer.py index 25f22d8..d99f3f6 100644 --- a/tests/test_data_layer.py +++ b/tests/test_data_layer.py @@ -233,24 +233,20 @@ def test_unformat_ticker(self): result = self.fetcher._unformat_ticker("AAPL", "us") self.assertEqual(result, "AAPL") - @patch("classes.data.DataFetcher.yf.Ticker") - def test_fetch_one_success(self, mock_ticker): - """Test successful fetch with mocked yfinance.""" + def test_fetch_one_success(self): + """Test successful fetch with mocked provider.""" # Create mock data - mock_history = pd.DataFrame({ + mock_price_df = pd.DataFrame({ "Open": [100, 101], "High": [105, 106], "Low": [99, 100], "Close": [104, 105], "Volume": [1000000, 1100000], - "Dividends": [0, 0], - "Stock Splits": [0, 0], }, index=pd.date_range("2024-01-01", periods=2)) - mock_ticker_instance = MagicMock() - mock_ticker_instance.history.return_value = mock_history - mock_ticker_instance.info = {"sector": "Technology"} - mock_ticker.return_value = mock_ticker_instance + self.fetcher.provider = MagicMock() + self.fetcher.provider.fetch_data.return_value = mock_price_df + self.fetcher.provider.get_company_info.return_value = {"sector": "Technology"} result = self.fetcher.fetch_one("AAPL", "us", "2024-01-01", "2024-01-02") @@ -259,27 +255,22 @@ def test_fetch_one_success(self, mock_ticker): self.assertIsNotNone(result.price_data) self.assertEqual(result.company_info["sector"], "Technology") - @patch("classes.data.DataFetcher.yf.Ticker") - def test_fetch_one_empty_data(self, mock_ticker): + def test_fetch_one_empty_data(self): """Test fetch handling empty data.""" - mock_ticker_instance = MagicMock() - mock_ticker_instance.history.return_value = pd.DataFrame() # Empty - mock_ticker.return_value = mock_ticker_instance + self.fetcher.provider = MagicMock() + self.fetcher.provider.fetch_data.return_value = pd.DataFrame() # Empty result = self.fetcher.fetch_one("INVALID", "us") self.assertFalse(result.success) self.assertIn("No data", result.error) - @patch("classes.data.DataFetcher.yf.Ticker") - def test_fetch_one_retry_on_error(self, mock_ticker): + def test_fetch_one_retry_on_error(self): """Test retry logic on fetch error.""" - mock_ticker_instance = MagicMock() - mock_ticker_instance.history.side_effect = Exception("Network error") - mock_ticker.return_value = mock_ticker_instance - - # Use short delays for testing fetcher = StockFetcher(max_retries=2, retry_delays=[0.01, 0.02]) + fetcher.provider = MagicMock() + fetcher.provider.fetch_data.side_effect = Exception("Network error") + result = fetcher.fetch_one("AAPL", "us") self.assertFalse(result.success) diff --git a/tests/unit/test_backtesting.py b/tests/unit/test_backtesting.py new file mode 100644 index 0000000..67e7e64 --- /dev/null +++ b/tests/unit/test_backtesting.py @@ -0,0 +1,75 @@ +import pytest +import pandas as pd +import numpy as np +from datetime import datetime +import os +from unittest.mock import MagicMock + +from src.classes.backtesting.engine import BacktestEngine, ProjectAlphaStrategy +from src.classes.screeners.breakout import BreakoutScreener +from src.classes.screeners.trendline import TrendlineScreener +from src.classes.backtesting.performance import BacktestPerformance + +# Mock Data Fixture +@pytest.fixture +def mock_ohlcv_data(): + dates = pd.date_range(start="2023-01-01", periods=100, freq='D') + df = pd.DataFrame({ + 'Open': np.linspace(100, 200, 100), + 'High': np.linspace(105, 205, 100), + 'Low': np.linspace(95, 195, 100), + 'Close': np.linspace(100, 200, 100), # Exact uptrend + 'Volume': np.random.randint(1000, 2000, 100) + }, index=dates) + return df + +def test_backtest_engine_run(mock_ohlcv_data): + # Test execution with Breakout screener logic + engine = BacktestEngine(mock_ohlcv_data, initial_capital=10000) + bt, stats = engine.run(strategy_class=ProjectAlphaStrategy, screener_cls=BreakoutScreener) + + assert stats is not None + assert 'Start' in stats + assert 'End' in stats + assert 'Return [%]' in stats + +def test_backtest_performance_metrics(mock_ohlcv_data): + engine = BacktestEngine(mock_ohlcv_data) + bt, stats = engine.run(strategy_class=ProjectAlphaStrategy, screener_cls=BreakoutScreener) + + result = BacktestPerformance.extract_metrics(stats, "MOCK", "Breakout") + + assert result.ticker == "MOCK" + assert result.strategy == "Breakout" + assert isinstance(result.return_pct, float) + assert isinstance(result.sharpe_ratio, float) + +def test_vectorized_signals_uptrend(mock_ohlcv_data): + # In uptrend mock data, Close > SMA20 should trigger signals + # We test via the strategy indirect execution or checking adapter logic directly + from src.classes.backtesting.adapter import ScreenerSignalAdapter + + adapter = ScreenerSignalAdapter(BreakoutScreener()) + # Breakout logic requires Close > SMA20 AND Volume > VolSMA20 etc. + # Our mock data is perfectly linear uptrend so Close > SMA20 is true after window + # Volume is random though. + + # Let's adjust volume to be increasing to force signal + mock_ohlcv_data['Volume'] = np.linspace(1000, 2000, 100) + + signals = adapter.compute_signal_vectorized_breakout(mock_ohlcv_data) + + # Should be 0 for first 20 days (NaN SMA), then 1 because Close > SMA and Vol > SMA + assert signals.iloc[0] == 0 + + # Ensure Day 30 meets all conditions: + # 1. Close > SMA20 (Already true due to uptrend) + # 2. Volume > SMA20_Volume (We set increasing volume, so latest > average) + # 3. Close > Open (Green candle) + # In our mock data: Open=130.3, Close=130.3 -> Doji. This might fail Close > Open check. + # Let's force a green candle on day 30 + mock_ohlcv_data.iloc[30, mock_ohlcv_data.columns.get_loc('Close')] = 135 + mock_ohlcv_data.iloc[30, mock_ohlcv_data.columns.get_loc('Open')] = 130 + + signals = adapter.compute_signal_vectorized_breakout(mock_ohlcv_data) + assert signals.iloc[30] == 1 # Check a point well into trend diff --git a/tests/unit/test_breakout_confirmation.py b/tests/unit/test_breakout_confirmation.py new file mode 100644 index 0000000..6fab94c --- /dev/null +++ b/tests/unit/test_breakout_confirmation.py @@ -0,0 +1,105 @@ +import pytest +import pandas as pd +import numpy as np +from src.classes.screeners.breakout import BreakoutScreener +from src.classes.screeners.base import Signal + +@pytest.fixture +def breakout_data(): + """Create synthetic data with a breakout pattern.""" + dates = pd.date_range(start="2023-01-01", periods=50, freq="B") + data = pd.DataFrame({ + "Open": np.linspace(100, 105, 50), + "High": np.linspace(101, 106, 50), + "Low": np.linspace(99, 104, 50), + "Close": np.linspace(100.5, 105.5, 50), + "Volume": np.full(50, 1000000), + "Adj Close": np.linspace(100.5, 105.5, 50) + }, index=dates) + + # Create valid breakout on the last day + # 1. Bullish candle: Close > Open + # 2. Max body in 10 days: Large body + # 3. Low selling pressure: High ~= Close + # 4. OC > threshold: Large move + # 5. Volume > threshold: High volume + + idx = -1 + data.iloc[idx, data.columns.get_loc("Open")] = 100.0 + data.iloc[idx, data.columns.get_loc("Close")] = 110.0 # +10% move + data.iloc[idx, data.columns.get_loc("High")] = 110.1 # Tiny wick + data.iloc[idx, data.columns.get_loc("Low")] = 99.0 + data.iloc[idx, data.columns.get_loc("Volume")] = 2000000 # 2x avg volume + + return data + +@pytest.fixture +def screener(): + return BreakoutScreener( + min_adx=20.0, + atr_expansion_factor=1.5 + ) + +def test_breakout_confirmed(screener, breakout_data): + """Test valid breakout with strong ADX and expanding ATR.""" + df = breakout_data.copy() + + # Add ADX > 20 + df["ADX"] = 25.0 + + # Add ATR expansion + # Mean ATR = 1.0, Current ATR = 2.0 ( > 1.5x) + df["ATR"] = 1.0 + df.iloc[-1, df.columns.get_loc("ATR")] = 2.0 + + result = screener.screen("TEST", df) + + assert result.signal == Signal.BUY + assert result.details["breakout_count"] >= 1 + assert result.details["adx"] == 25.0 + # Mean of 19 ones and 1 two is 1.05. Expansion = 2.0 / 1.05 ~= 1.90 + assert result.details["atr_expansion"] == pytest.approx(1.9, rel=0.1) + +def test_breakout_rejected_low_adx(screener, breakout_data): + """Test breakout rejected due to low ADX (weak trend).""" + df = breakout_data.copy() + + # Weak trend + df["ADX"] = 15.0 + + # Valid ATR + df["ATR"] = 1.0 + df.iloc[-1, df.columns.get_loc("ATR")] = 2.0 + + result = screener.screen("TEST", df) + + # Should hold because ADX < 20 + assert result.signal == Signal.HOLD + assert "No breakout pattern found" in result.details.get("reason", "") + +def test_breakout_rejected_low_atr_expansion(screener, breakout_data): + """Test breakout rejected due to lack of volatility expansion.""" + df = breakout_data.copy() + + # Strong trend + df["ADX"] = 25.0 + + # No ATR expansion + df["ATR"] = 1.0 + df.iloc[-1, df.columns.get_loc("ATR")] = 1.2 # < 1.5x + + result = screener.screen("TEST", df) + + # Should hold because ATR expansion < 1.5 + assert result.signal == Signal.HOLD + +def test_missing_indicators_fallback(screener, breakout_data): + """Test fallback when ADX/ATR columns are missing (should skip checks).""" + df = breakout_data.copy() + # No ADX or ATR columns + + result = screener.screen("TEST", df) + + # Should BUY because indicators are missing, so checks are skipped + assert result.signal == Signal.BUY + assert "adx" not in result.details diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py new file mode 100644 index 0000000..9543e1d --- /dev/null +++ b/tests/unit/test_config.py @@ -0,0 +1,32 @@ +import pytest +from src.config.settings import Settings + +def test_settings_defaults(): + """Verify default values are loaded correctly.""" + settings = Settings() + assert settings.market == "us" + assert settings.risk_per_trade == 0.01 + assert settings.atr_multiplier == 2.0 + assert settings.smtp_port == 587 + +def test_settings_env_override(monkeypatch): + """Verify environment variables override defaults.""" + monkeypatch.setenv("PA_MARKET", "india") + monkeypatch.setenv("PA_RISK_PER_TRADE", "0.02") + monkeypatch.setenv("PA_ATR_MULTIPLIER", "3.0") + + settings = Settings() + assert settings.market == "india" + assert settings.risk_per_trade == 0.02 + assert settings.atr_multiplier == 3.0 + +def test_settings_type_coercion(monkeypatch): + """Verify string env vars are coerced to correct types.""" + monkeypatch.setenv("PA_MAX_POSITIONS", "20") + monkeypatch.setenv("PA_LEARNING_RATE", "0.05") + + settings = Settings() + assert settings.max_positions == 20 + assert isinstance(settings.max_positions, int) + assert settings.learning_rate == 0.05 + assert isinstance(settings.learning_rate, float) diff --git a/tests/unit/test_consensus.py b/tests/unit/test_consensus.py new file mode 100644 index 0000000..2e04bcd --- /dev/null +++ b/tests/unit/test_consensus.py @@ -0,0 +1,66 @@ +import pytest +from classes.screeners.consensus import ConsensusEngine, ConsensusResult +from classes.screeners.base import ScreenerResult, Signal + +@pytest.fixture +def engine(): + return ConsensusEngine() + +def test_consensus_score_calculation(engine): + """Test basic weighted score calculation.""" + # Breakout (0.4) + Trend (0.3) + Volatility (0.1) + Fund (0.1) + Sent (0.1) + # Total = 1.0 + + results = { + "breakout": ScreenerResult(ticker="TEST", signal=Signal.BUY, confidence=0.8), + "trend": ScreenerResult(ticker="TEST", signal=Signal.BUY, confidence=0.6), + } + + # breakout: 0.8 * 0.4 = 0.32 + # trend: 0.6 * 0.3 = 0.18 + # Synergy bonus apply? Yes, both > 0 + # Base = 0.50 + 0.1 bonus = 0.60 + + consensus = engine.calculate_score("TEST", results) + + assert consensus.score == 0.60 + assert consensus.primary_signal == Signal.BUY + assert consensus.recommendation == "BUY" + +def test_consensus_synergy_boost(engine): + """Test that having both Breakout and Trend boosts the score.""" + results = { + "breakout": ScreenerResult(ticker="TEST", signal=Signal.BUY, confidence=0.5), # 0.2 + "trend": ScreenerResult(ticker="TEST", signal=Signal.BUY, confidence=0.5), # 0.15 + } + # Base = 0.35 + 0.1 bonus = 0.45 + + consensus = engine.calculate_score("TEST", results) + assert consensus.signals["synergy_bonus"] == 0.1 + assert consensus.score == 0.45 + +def test_consensus_single_signal(engine): + """Test score with only one signal.""" + results = { + "breakout": ScreenerResult(ticker="TEST", signal=Signal.BUY, confidence=0.9), # 0.36 + } + # Base = 0.36, No bonus + + consensus = engine.calculate_score("TEST", results) + assert "synergy_bonus" not in consensus.signals + assert consensus.score == 0.36 + assert consensus.primary_signal == Signal.HOLD # Score < 0.5 + +def test_consensus_with_filters(engine): + """Test consensus with fundamental/sentiment filters.""" + results = { + "breakout": ScreenerResult(ticker="TEST", signal=Signal.BUY, confidence=0.8), # 0.32 + } + filters = { + "fundamental": 1.0, # 0.1 + "sentiment": 0.5 # 0.05 + } + # Base = 0.32 + 0.1 + 0.05 = 0.47 + + consensus = engine.calculate_score("TEST", results, filters) + assert consensus.score == 0.47 diff --git a/tests/unit/test_data_repair.py b/tests/unit/test_data_repair.py new file mode 100644 index 0000000..76ca01c --- /dev/null +++ b/tests/unit/test_data_repair.py @@ -0,0 +1,50 @@ +import pytest +import pandas as pd +import numpy as np +from classes.data.validators import repair_data + +def test_repair_negative_volume(): + df = pd.DataFrame({ + "Volume": [100, -50, 200, -10], + "Open": [10, 11, 12, 13], + "High": [10, 11, 12, 13], + "Low": [10, 11, 12, 13], + "Close": [10, 11, 12, 13] + }) + df_repaired = repair_data(df, "TEST") + assert df_repaired["Volume"].min() >= 0 + assert df_repaired["Volume"].iloc[1] == 0 + assert df_repaired["Volume"].iloc[3] == 0 + +def test_repair_missing_prices(): + df = pd.DataFrame({ + "Close": [10.0, np.nan, 12.0, 13.0], + "Open": [10.0, 11.0, 12.0, 13.0], + "High": [10.0, 11.0, 12.0, 13.0], + "Low": [10.0, 11.0, 12.0, 13.0], + "Volume": [100, 100, 100, 100] + }) + # Add datetime index for interpolation + df.index = pd.date_range("2023-01-01", periods=4) + + df_repaired = repair_data(df, "TEST") + assert not df_repaired["Close"].isnull().any() + # Interpolated value should be 11.0 + assert df_repaired["Close"].iloc[1] == 11.0 + +def test_repair_drop_unrepairable(): + df = pd.DataFrame({ + "Close": [10.0, np.nan, np.nan, np.nan, np.nan, 15.0], # Too many NaNs > limit=3 + "Open": [10.0, 10.0, 10.0, 10.0, 10.0, 15.0], + "High": [10.0, 10.0, 10.0, 10.0, 10.0, 15.0], + "Low": [10.0, 10.0, 10.0, 10.0, 10.0, 15.0], + "Volume": [100, 100, 100, 100, 100, 100] + }) + df.index = pd.date_range("2023-01-01", periods=6) + + df_repaired = repair_data(df, "TEST") + # Should drop the NaNs if interpolation failed or limit exceeded? + # limit=3 means max 3 consecutive NaNs are filled. + # Here we have 4. So some might remain and then be dropped. + assert not df_repaired.isnull().any().any() + assert len(df_repaired) < 6 diff --git a/tests/unit/test_database_manager.py b/tests/unit/test_database_manager.py new file mode 100644 index 0000000..b543325 --- /dev/null +++ b/tests/unit/test_database_manager.py @@ -0,0 +1,82 @@ +import pytest +import sqlite3 +import pandas as pd +import json +from src.classes import DatabaseManager + +@pytest.fixture +def db_connection(): + """Create an in-memory database connection for testing.""" + conn = sqlite3.connect(":memory:") + DatabaseManager.create_tables(conn) + yield conn + conn.close() + +def test_create_tables(db_connection): + """Test table creation.""" + cursor = db_connection.cursor() + + # Check if price_data table exists + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='price_data'") + assert cursor.fetchone() is not None + + # Check if company_info table exists + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='company_info'") + assert cursor.fetchone() is not None + +def test_insert_and_get_price_rows(db_connection): + """Test inserting and retrieving price data.""" + symbol = "TEST" + dates = pd.date_range("2023-01-01", periods=5) + data = { + "Open": [100.0] * 5, + "High": [105.0] * 5, + "Low": [95.0] * 5, + "Close": [102.0] * 5, + "Adj Close": [102.0] * 5, + "Volume": [1000] * 5, + "Dividends": [0.0] * 5, + "Stock Splits": [0.0] * 5 + } + df = pd.DataFrame(data, index=dates) + + DatabaseManager.insert_price_rows(db_connection, symbol, df) + + # Verify data retrieval + retrieved_df = DatabaseManager.get_price_dataframe(db_connection, symbol) + assert not retrieved_df.empty + assert len(retrieved_df) == 5 + assert "Close" in retrieved_df.columns + assert retrieved_df.iloc[0]["Close"] == 102.0 + +def test_insert_and_get_company_info(db_connection): + """Test inserting and retrieving company info.""" + symbol = "TEST" + info = {"sector": "Technology", "industry": "Software"} + + DatabaseManager.insert_company_info(db_connection, symbol, info) + + retrieved_info = DatabaseManager.get_company_info(db_connection, symbol) + assert retrieved_info == info + +def test_get_last_date(db_connection): + """Test retrieving last date.""" + symbol = "TEST" + dates = pd.date_range("2023-01-01", periods=2) + data = { + "Open": [100.0] * 2, + "High": [105.0] * 2, + "Low": [95.0] * 2, + "Close": [102.0] * 2, + "Adj Close": [102.0] * 2, + "Volume": [1000] * 2, + "Dividends": [0.0] * 2, + "Stock Splits": [0.0] * 2 + } + df = pd.DataFrame(data, index=dates) + DatabaseManager.insert_price_rows(db_connection, symbol, df) + + last_date = DatabaseManager.get_last_date(db_connection, symbol) + # Sqlite might store as string without time if we converted it, or with time. + # Just check the date part. + assert str(last_date).startswith("2023-01-02") diff --git a/tests/unit/test_download.py b/tests/unit/test_download.py new file mode 100644 index 0000000..d6f1760 --- /dev/null +++ b/tests/unit/test_download.py @@ -0,0 +1,50 @@ +import pytest +from unittest.mock import MagicMock, patch +import pandas as pd +from src.classes import Download +from requests.models import Response + +@pytest.fixture +def mock_response(): + resp = Response() + resp.status_code = 200 + resp._content = b'{"chart":{"result":[{"meta":{"currency":"USD","symbol":"AAPL","exchangeName":"NMS","instrumentType":"EQUITY","firstTradeDate":345479400,"regularMarketTime":1616184000,"gmtoffset":-14400,"timezone":"EDT","exchangeTimezoneName":"America/New_York","regularMarketPrice":119.99,"chartPreviousClose":120.53,"priceHint":2,"currentTradingPeriod":{"pre":{"timezone":"EDT","end":1616164200,"start":1616140800,"gmtoffset":-14400},"regular":{"timezone":"EDT","end":1616187600,"start":1616164200,"gmtoffset":-14400},"post":{"timezone":"EDT","end":1616202000,"start":1616187600,"gmtoffset":-14400}},"dataGranularity":"1d","range":"","validRanges":["1d","5d","1mo","3mo","6mo","1y","2y","5y","10y","ytd","max"]},"timestamp":[1616164200],"indicators":{"quote":[{"low":[119.0],"open":[120.0],"high":[121.0],"volume":[1000],"close":[119.9]}],"adjclose":[{"adjclose":[119.9]}]}}],"error":null}}' + return resp + +def test_download_one_success(mock_response): + with patch("requests.get", return_value=mock_response) as mock_get: + data = Download._download_one("AAPL", 1616164200, 1616187600) + assert data["chart"]["result"][0]["meta"]["symbol"] == "AAPL" + mock_get.assert_called_once() + +def test_parse_quotes(): + data = { + "timestamp": [1616164200, 1616250600], + "indicators": { + "quote": [{ + "open": [100.0, 101.0], + "high": [105.0, 106.0], + "low": [95.0, 96.0], + "close": [102.0, 103.0], + "volume": [1000, 1200] + }], + "adjclose": [{ + "adjclose": [102.0, 103.0] + }] + } + } + + df = Download._parse_quotes(data) + assert not df.empty + assert len(df) == 2 + assert "Adj Close" in df.columns + assert "Volume" in df.columns + # Ensure correct index type (pandas converts to datetime) + assert isinstance(df.index, pd.Index) + +def test_handle_start_end_dates(): + start, end = Download._handle_start_end_dates("2023-01-01", "2023-01-31") + assert isinstance(start, int) + assert isinstance(end, int) + assert start < end + diff --git a/tests/unit/test_filters.py b/tests/unit/test_filters.py new file mode 100644 index 0000000..0827865 --- /dev/null +++ b/tests/unit/test_filters.py @@ -0,0 +1,102 @@ +import pytest +from unittest.mock import MagicMock, patch +from src.classes.filters.fundamental_filter import FundamentalFilter +from src.classes.filters.sentiment_filter import SentimentFilter + +# --- FundamentalFilter Tests --- + +def test_fundamental_filter_no_key(): + """Test safe fallback when no API key is provided.""" + f = FundamentalFilter(api_key=None) + result = f.check_health("TEST") + assert result["passed"] is True + assert result["reason"] == "No API access" + +@patch("finnhub.Client") +def test_fundamental_filter_pass(mock_client_cls): + """Test passing fundamental check.""" + mock_client = MagicMock() + mock_client.company_basic_financials.return_value = { + "metric": { + "totalDebtToEquity": 50.0, # Healthy + "netProfitMarginTTM": 10.0 + } + } + mock_client_cls.return_value = mock_client + + f = FundamentalFilter(api_key="fake_key") + result = f.check_health("AAPL") + + assert result["passed"] is True + assert result["reason"] == "Fundamentals OK" + +@patch("finnhub.Client") +def test_fundamental_filter_fail(mock_client_cls): + """Test failing fundamental check (High Debt).""" + mock_client = MagicMock() + mock_client.company_basic_financials.return_value = { + "metric": { + "totalDebtToEquity": 300.0, # > 200 threshold + "netProfitMarginTTM": 5.0 + } + } + mock_client_cls.return_value = mock_client + + f = FundamentalFilter(api_key="fake_key") + result = f.check_health("BAD_STOCK") + + assert result["passed"] is False + assert "High Debt" in result["reason"] + +# --- SentimentFilter Tests --- + +@patch("src.classes.filters.sentiment_filter.pipeline") +def test_sentiment_filter_positive(mock_pipeline): + """Test positive sentiment aggregation.""" + mock_classifier = MagicMock() + mock_classifier.return_value = [ + {"label": "positive", "score": 0.9}, + {"label": "neutral", "score": 0.5}, # 0.0 value + ] + mock_pipeline.return_value = mock_classifier + + SentimentFilter._classifier = None # Reset singleton + s = SentimentFilter() + headlines = ["Great earnings", "Meeting tomorrow"] + result = s.analyze_sentiment(headlines) + + # Avg: (0.9 + 0.0) / 2 = 0.45 > 0.15 + assert result["label"] == "positive" + assert result["score"] == 0.45 + +@patch("src.classes.filters.sentiment_filter.pipeline") +def test_sentiment_filter_negative(mock_pipeline): + """Test negative sentiment aggregation.""" + mock_classifier = MagicMock() + mock_classifier.return_value = [ + {"label": "negative", "score": 0.8}, + {"label": "negative", "score": 0.4}, + ] + mock_pipeline.return_value = mock_classifier + + SentimentFilter._classifier = None # Reset singleton + s = SentimentFilter() + headlines = ["Lawsuit filed", "CEO resigns"] + result = s.analyze_sentiment(headlines) + + # Avg: (-0.8 + -0.4) / 2 = -0.6 < -0.15 + assert result["label"] == "negative" + assert result["score"] == -0.6 + +@patch("src.classes.filters.sentiment_filter.pipeline") +def test_sentiment_filter_error_handling(mock_pipeline): + """Test robust error handling.""" + mock_pipeline.side_effect = Exception("Model failed") + + SentimentFilter._classifier = None # Reset singleton + s = SentimentFilter() + assert s.classifier is None + + result = s.analyze_sentiment(["Some headline"]) + assert result["label"] == "neutral" + assert result["score"] == 0.0 diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py new file mode 100644 index 0000000..7020ac7 --- /dev/null +++ b/tests/unit/test_models.py @@ -0,0 +1,40 @@ +import pytest +import numpy as np +import tensorflow as tf +from src.classes import Models + +# Only run if TensorFlow is installed +try: + import tensorflow_probability as tfp + TF_AVAILABLE = True +except ImportError: + TF_AVAILABLE = False + +@pytest.mark.skipif(not TF_AVAILABLE, reason="TensorFlow Probability not installed") +def test_s_model_creation(): + """Test creation of s_model Distribution.""" + # order_scale has len 1 -> order = 0. + # phi shape: (num_stocks, order+1) -> (2, 1) + # tt shape: (order+1, T) -> (1, 10). tensordot axes=1 sums dim 1 of phi and dim 0 of tt. + info = { + "tt": np.random.randn(1, 10).astype(np.float32), # Corrected shape (1, 10) + "order_scale": np.array([1.0], dtype=np.float32), + "num_stocks": 2 + } + + model = Models.s_model(info) + assert model is not None + # Check if we can sample + sample = model.sample() + assert len(sample) == 3 # phi, psi, y + +@pytest.mark.skipif(not TF_AVAILABLE, reason="TensorFlow Probability not installed") +def test_conj_lin_model(): + """Test conjugate linear model logic.""" + # Create random log-price data: (2 stocks, 10 days) + logp = np.random.randn(2, 10) + + preds = Models.conj_lin_model(logp) + + # Should return predictions for T-1 steps + assert preds.shape == (2, 9) diff --git a/tests/unit/test_provider_chain.py b/tests/unit/test_provider_chain.py new file mode 100644 index 0000000..b349442 --- /dev/null +++ b/tests/unit/test_provider_chain.py @@ -0,0 +1,203 @@ +import pytest +from unittest.mock import patch, MagicMock +from datetime import datetime +import pandas as pd +from datetime import datetime +import pandas as pd +from unittest.mock import patch, MagicMock +from src.classes.data.provider_chain import YFinanceProvider, PolygonProvider + +class TestYFinanceProvider: + @pytest.fixture + def provider(self): + return YFinanceProvider() + + @pytest.fixture + def mock_yf_download(self): + with patch('yfinance.download') as mock: + yield mock + + def test_fetch_data_success(self, provider, mock_yf_download): + # Setup mock data + mock_df = pd.DataFrame({ + 'Open': [100.0, 101.0], + 'High': [105.0, 106.0], + 'Low': [99.0, 100.0], + 'Close': [102.0, 103.0], + 'Volume': [1000, 1100], + 'Adj Close': [102.0, 103.0] # Should be ignored or handled + }, index=pd.to_datetime(['2023-01-01', '2023-01-02'])) + + mock_yf_download.return_value = mock_df + + start = datetime(2023, 1, 1) + end = datetime(2023, 1, 3) + result = provider.fetch_data('AAPL', start, end) + + # Assertions + assert not result.empty + assert list(result.columns) == ['Open', 'High', 'Low', 'Close', 'Volume'] + assert len(result) == 2 + assert result.index.freq is None # or whatever pandas infers + + mock_yf_download.assert_called_once_with( + 'AAPL', start=start, end=end, progress=False, auto_adjust=False + ) + + def test_fetch_data_returns_empty(self, provider, mock_yf_download): + mock_yf_download.return_value = pd.DataFrame() + + start = datetime(2023, 1, 1) + end = datetime(2023, 1, 2) + result = provider.fetch_data('INVALID', start, end) + + assert result.empty + + def test_fetch_data_handles_multiindex_columns(self, provider, mock_yf_download): + # yfinance sometimes returns MultiIndex columns (Price, Ticker) + mock_df = pd.DataFrame({ + ('Open', 'AAPL'): [100.0], + ('High', 'AAPL'): [105.0], + ('Low', 'AAPL'): [99.0], + ('Close', 'AAPL'): [102.0], + ('Volume', 'AAPL'): [1000] + }, index=pd.to_datetime(['2023-01-01'])) + + mock_yf_download.return_value = mock_df + + start = datetime(2023, 1, 1) + end = datetime(2023, 1, 2) + result = provider.fetch_data('AAPL', start, end) + + assert not result.empty + assert list(result.columns) == ['Open', 'High', 'Low', 'Close', 'Volume'] + + def test_fetch_data_missing_columns(self, provider, mock_yf_download): + # Case where returned data is missing required columns + mock_df = pd.DataFrame({ + 'Open': [100.0], + # Missing High, Low, Close, Volume + }, index=pd.to_datetime(['2023-01-01'])) + + mock_yf_download.return_value = mock_df + + + with pytest.raises(KeyError): + provider.fetch_data('AAPL', datetime(2023, 1, 1), datetime(2023, 1, 2)) + + def test_check_health_success(self, provider): + # Mock yf.Ticker(...).history(...) + with patch('yfinance.Ticker') as mock_ticker_cls: + mock_ticker = mock_ticker_cls.return_value + # Non-empty dataframe check + mock_ticker.history.return_value = pd.DataFrame({'Close': [100]}) + + assert provider.check_health() is True + mock_ticker.history.assert_called_with(period="1d") + + def test_check_health_failure(self, provider): + with patch('yfinance.Ticker') as mock_ticker_cls: + mock_ticker = mock_ticker_cls.return_value + # Empty dataframe check + mock_ticker.history.return_value = pd.DataFrame() + + assert provider.check_health() is False + + # Exception check + with patch('yfinance.Ticker') as mock_ticker_cls: + mock_ticker_cls.side_effect = Exception("API error") + assert provider.check_health() is False + + def test_get_company_info(self, provider): + with patch('yfinance.Ticker') as mock_ticker_cls: + mock_info = {"sector": "Technology", "symbol": "AAPL"} + mock_ticker_cls.return_value.info = mock_info + + info = provider.get_company_info("AAPL") + assert info == mock_info + mock_ticker_cls.assert_called_with("AAPL") + + def test_get_company_info_failure(self, provider): + with patch('yfinance.Ticker') as mock_ticker_cls: + mock_ticker_cls.side_effect = Exception("API Error") + assert provider.get_company_info("AAPL") == {} + + + +class TestPolygonProvider: + @pytest.fixture + def mock_client(self): + with patch('src.classes.data.provider_chain.RESTClient') as mock: + yield mock.return_value + + @pytest.fixture + def provider(self, mock_client): + return PolygonProvider(api_key="fake_key") + + def test_fetch_data_success(self, provider, mock_client): + # Mock Agg objects + agg1 = MagicMock(timestamp=1672531200000, open=100, high=105, low=99, close=102, volume=1000) + agg2 = MagicMock(timestamp=1672617600000, open=101, high=106, low=100, close=103, volume=1100) + + mock_client.list_aggs.return_value = [agg1, agg2] + + start = datetime(2023, 1, 1) + end = datetime(2023, 1, 3) + result = provider.fetch_data('AAPL', start, end) + + assert not result.empty + assert len(result) == 2 + assert list(result.columns) == ['Open', 'High', 'Low', 'Close', 'Volume'] + assert result.index[0] == datetime.fromtimestamp(1672531200) + + mock_client.list_aggs.assert_called_once() + args, kwargs = mock_client.list_aggs.call_args + assert args[0] == 'AAPL' + assert kwargs['limit'] == 50000 + + def test_fetch_data_empty(self, provider, mock_client): + mock_client.list_aggs.return_value = [] + + start = datetime(2023, 1, 1) + end = datetime(2023, 1, 2) + result = provider.fetch_data('AAPL', start, end) + + assert result.empty + assert isinstance(result, pd.DataFrame) + + def test_check_health_success(self, provider, mock_client): + # Mock get_last_trade returning something valid + mock_client.get_last_trade.return_value = MagicMock() + + assert provider.check_health() is True + mock_client.get_last_trade.assert_called_with(ticker="SPY") + + def test_check_health_failure(self, provider, mock_client): + # Mock failure (returns None or raises) + mock_client.get_last_trade.return_value = None + assert provider.check_health() is False + + # Exception + mock_client.get_last_trade.side_effect = Exception("API Error") + assert provider.check_health() is False + + def test_get_company_info(self, provider, mock_client): + # Mock details object + mock_details = MagicMock() + mock_details.sic_description = "Technology" + mock_details.market_cap = 1000000 + + mock_client.get_ticker_details.return_value = mock_details + + info = provider.get_company_info("AAPL") + assert info["sector"] == "Technology" + assert info["marketCap"] == 1000000 + + mock_client.get_ticker_details.assert_called_with("AAPL") + + def test_get_company_info_failure(self, provider, mock_client): + mock_client.get_ticker_details.side_effect = Exception("API Error") + assert provider.get_company_info("AAPL") == {} + + + diff --git a/tests/unit/test_regime.py b/tests/unit/test_regime.py new file mode 100644 index 0000000..ced8a2f --- /dev/null +++ b/tests/unit/test_regime.py @@ -0,0 +1,122 @@ +import pandas as pd +import numpy as np +import pytest +from unittest.mock import MagicMock, patch +from classes.analysis.regime import RegimeDetector + + +@pytest.fixture +def sample_data(): + """Create synthetic price data with clear trends.""" + dates = pd.date_range(start="2023-01-01", periods=100, freq="D") + + # Create 3 segments: Bull, Bear, Sideways + # Bull: +1% daily + bull = np.cumprod(np.ones(30) * 1.01) + + # Sideways: Oscillation + sideways = np.ones(40) * bull[-1] + np.sin(np.arange(40)) * 2 + + # Bear: -1% daily + bear = np.cumprod(np.ones(30) * 0.99) * sideways[-1] + + prices = np.concatenate([bull, sideways, bear]) + return pd.DataFrame({"Close": prices}, index=dates) + + +class TestRegimeDetector: + + def test_initialization(self): + detector = RegimeDetector(n_components=3) + assert detector.n_components == 3 + assert detector.state_map == {} + + def test_prepare_features(self, sample_data): + detector = RegimeDetector() + features = detector.prepare_features(sample_data) + + # Original 100 points + # log returns needs 1 shift -> 99 + # rolling 20 needs 19 more -> 80 valid points + # actually rolling is inclusive but first 19 are NaN + # so 100 - 1 - 19 = 80 ? + # computed on log_ret which has 99 points. + # log_ret[0] is NaN. + # rolling starts at index 0 of log_ret? no. + # Let's check logic: + # log_ret has first NaN. + # rolling(20) on log_ret... first 20 items will include that NaN? + # Actually standard Pandas rolling behaviour. + + # Expected shape: (n_samples, 2) + assert features.shape[1] == 2 + assert len(features) > 0 + + def test_fit_logic(self, sample_data): + detector = RegimeDetector() + + # Use a mock for GaussianHMM to avoid complex fitting in unit test + # but we also want to test the mapping logic which depends on means_ + + # Let's try fitting real small data, HMM is fast enough for 100 points + detector.fit(sample_data) + + assert len(detector.state_map) == 3 + assert "Bull" in detector.state_map.values() + assert "Bear" in detector.state_map.values() + assert "Sideways" in detector.state_map.values() + + def test_mapping_correctness(self): + """Verify that highest return state is mapped to Bull.""" + detector = RegimeDetector() + + # Mock the model and its attributes + detector.model.fit = MagicMock() + # 3 states with returns: -0.01 (Bear), 0.0 (Sideways), 0.01 (Bull) + # means_ shape is (n_components, n_features) + detector.model.means_ = np.array([ + [-0.01, 0.005], # State 0: Negative return + [0.01, 0.005], # State 1: Positive return + [0.00, 0.002] # State 2: Zero return + ]) + + # Manually trigger the mapping logic by calling fit with mocked helper + # Since fit calls prepare_features and then model.fit, we can't easily partially mock + # unless we extract mapping logic or mock everything. + # Let's just mock prepare_features to return dummy X + + with patch.object(detector, 'prepare_features', return_value=np.zeros((10, 2))): + detector.fit(pd.DataFrame({'Close': range(25)})) + + # State 1 has highest return (0.01) -> Bull + # State 0 has lowest return (-0.01) -> Bear + # State 2 is middle -> Sideways + + assert detector.state_map[1] == "Bull" + assert detector.state_map[0] == "Bear" + assert detector.state_map[2] == "Sideways" + + def test_predict_alignment(self, sample_data): + detector = RegimeDetector() + detector.fit(sample_data) + + result = detector.predict(sample_data) + + # Result should have same index as input? + # Our logic returns df with new columns, but NaNs at start might be handled + # `prepare_features` drops NaNs. + # If we just return the tail, alignment is preserved for those dates. + # The rows with NaNs (first 20) should have NaN in Regime columns or be missing? + + # Current logic: `df.copy()` then assign `regime_series` which is indexed by valid dates. + # So rows 0-19 should have NaN in Regime_State/Regime. + + assert len(result) == len(sample_data) + assert "Regime" in result.columns + + # First 20 rows should be NaN because of rolling window + assert pd.isna(result["Regime"].iloc[0]) + assert pd.isna(result["Regime"].iloc[18]) + + # Later rows should be populated + assert not pd.isna(result["Regime"].iloc[-1]) diff --git a/tests/unit/test_risk_manager.py b/tests/unit/test_risk_manager.py new file mode 100644 index 0000000..526cdad --- /dev/null +++ b/tests/unit/test_risk_manager.py @@ -0,0 +1,111 @@ +import pytest +import math +from unittest.mock import patch, MagicMock +from src.classes.risk.risk_manager import RiskManager, OrderValidation + +# Mock settings to ensure consistent test environment +@pytest.fixture +def mock_settings(): + with patch("src.classes.risk.risk_manager.settings") as mock: + mock.risk_per_trade = 0.01 + mock.atr_multiplier = 2.0 + mock.max_positions = 5 + mock.max_portfolio_exposure = 1.0 + mock.trailing_stop = True + yield mock + +@pytest.fixture +def risk_manager(mock_settings): + # settings are imported in risk_manager, so patching it effectively controls defaults + # Re-instantiating inside test ensures fresh values + return RiskManager() + +def test_calculate_stop_loss_long(risk_manager): + """Test stop loss calculation for long positions.""" + entry_price = 100.0 + atr = 2.0 + # Long SL = Entry - (ATR * Multiplier) = 100 - (2 * 2) = 96 + # Note: risk_manager reads settings on init + sl = risk_manager.calculate_stop_loss(entry_price, atr, "long") + assert sl == 96.0 + +def test_calculate_stop_loss_short(risk_manager): + """Test stop loss calculation for short positions.""" + entry_price = 100.0 + atr = 2.0 + # Short SL = Entry + (ATR * Multiplier) = 100 + (2 * 2) = 104 + sl = risk_manager.calculate_stop_loss(entry_price, atr, "short") + assert sl == 104.0 + +def test_calculate_position_size(risk_manager): + """Test position sizing calculation.""" + account_size = 10000.0 + entry_price = 100.0 + stop_loss = 95.0 # Risk per share = 5.0 + + # Risk Amount = 10000 * 0.01 = 100.0 + # Shares = floor(100 / 5) = 20 + shares = risk_manager.calculate_position_size(account_size, entry_price, stop_loss) + assert shares == 20 + +def test_calculate_position_size_zero_risk(risk_manager): + """Test position sizing when risk per share is zero (or very close).""" + account_size = 10000.0 + entry_price = 100.0 + stop_loss = 100.0 # Risk per share = 0 + + shares = risk_manager.calculate_position_size(account_size, entry_price, stop_loss) + assert shares == 0 + +def test_calculate_position_size_small_risk(risk_manager): + """Test position sizing with very tight stop.""" + account_size = 10000.0 + entry_price = 100.0 + stop_loss = 99.9 # Risk per share = 0.1 + + # Risk Amount = 100.0 + # Shares = floor(100 / 0.1) = 1000 + shares = risk_manager.calculate_position_size(account_size, entry_price, stop_loss) + assert shares == 1000 + +def test_validate_order_success(risk_manager): + """Test valid order validation.""" + validation = risk_manager.validate_order( + current_positions=2, + current_exposure=5000, + order_value=1000, + account_size=10000 + ) + assert validation.valid is True + assert validation.reason is None + +def test_validate_order_max_positions(mock_settings): + """Test validation fails when max positions reached.""" + mock_settings.max_positions = 2 + # Re-init to pick up new setting + rm = RiskManager() + + # Current 2 (max 2) -> Fail + validation = rm.validate_order( + current_positions=2, + current_exposure=5000, + order_value=1000, + account_size=10000 + ) + assert validation.valid is False + assert "Max positions reached" in validation.reason + +def test_validate_order_max_exposure(mock_settings): + """Test validation fails when max exposure exceeded.""" + mock_settings.max_portfolio_exposure = 0.5 # 50% max + rm = RiskManager() + + # Current 4000 + Order 2000 = 6000 (60%) > 50% + validation = rm.validate_order( + current_positions=2, + current_exposure=4000, + order_value=2000, + account_size=10000 + ) + assert validation.valid is False + assert "Max portfolio exposure exceeded" in validation.reason diff --git a/tests/unit/test_screeners.py b/tests/unit/test_screeners.py new file mode 100644 index 0000000..0d742ae --- /dev/null +++ b/tests/unit/test_screeners.py @@ -0,0 +1,45 @@ +import pytest +import pandas as pd +from src.classes.screeners.breakout import BreakoutScreener +from src.classes.screeners.trendline import TrendlineScreener + +def test_breakout_screener_uptrend(uptrend_data): + """Test breakout screener on standard uptrend data (should not trigger breakout, just trend).""" + screener = BreakoutScreener() + result = screener.screen("TEST", uptrend_data) + + # Simple uptrend might likely be categorized as "ALONG TREND" or "GROWTH" depending on thresholds + # The run method returns a ScreenerResult object + assert result is not None + +def test_trendline_screener(): + """Test trendline screener identifying an uptrend.""" + # Create data with steep slope to ensure angle > 30 degrees + # Slope of 1.0 gives 45 degrees -> Weak Up (BUY) + prices = [100 + i for i in range(30)] # Slope 1.0 + data = pd.DataFrame({ + "Close": prices, + "Open": prices, + "High": prices, + "Low": prices, + "Volume": [1000]*30 + }) + + screener = TrendlineScreener(lookback_days=20) + result = screener.screen("TEST", data) + + assert result.signal.name == "BUY" or result.signal.value == "BUY" + assert result.confidence > 0 + +def test_breakout_screener_consolidation(breakout_data): + """Test breakout detection.""" + # This data is designed to have a breakout + # However, our fixture logic might need verifying if it actually triggers the specific 'BreakoutScreener' logic + screener = BreakoutScreener() + # Mock settings if needed + + # For now, just ensuring it runs without error and returns a result structure + result = screener.screen("TEST", breakout_data) + assert result.ticker == "TEST" + assert result.signal.name in ["BUY", "SELL", "HOLD", "STRONG_BUY", "STRONG_SELL"] + diff --git a/tests/unit/test_transaction_costs.py b/tests/unit/test_transaction_costs.py new file mode 100644 index 0000000..983585a --- /dev/null +++ b/tests/unit/test_transaction_costs.py @@ -0,0 +1,26 @@ +import pytest +from src.classes.risk.transaction_costs import TransactionCosts + +def test_calculate_cost_zero_commission(): + # Commission=0, Slippage=5bps, Spread=3bps -> Variable=6.5bps + tc = TransactionCosts(commission_per_trade=0.0, slippage_bps=5.0, spread_bps=3.0) + price = 100.0 + quantity = 100 + expected_variable = (100 * 100) * (6.5 / 10000) # 10000 * 0.00065 = 6.5 + assert tc.calculate_cost(price, quantity) == pytest.approx(6.5) + +def test_calculate_cost_with_commission(): + # Commission=20, Slippage=10bps, Spread=5bps -> Variable=12.5bps + tc = TransactionCosts(commission_per_trade=20.0, slippage_bps=10.0, spread_bps=5.0) + price = 500.0 + quantity = 10 + total_value = 5000.0 + expected_variable = total_value * (12.5 / 10000) # 5000 * 0.00125 = 6.25 + assert tc.calculate_cost(price, quantity) == pytest.approx(20.0 + 6.25) + +def test_defaults(): + us = TransactionCosts.us_default() + assert us.commission_per_trade == 0.0 + + ind = TransactionCosts.india_default() + assert ind.commission_per_trade == 20.0 diff --git a/tests/unit/test_walk_forward.py b/tests/unit/test_walk_forward.py new file mode 100644 index 0000000..5b7781d --- /dev/null +++ b/tests/unit/test_walk_forward.py @@ -0,0 +1,94 @@ +import pandas as pd +import numpy as np +import pytest +from unittest.mock import MagicMock, patch +from classes.backtesting.walk_forward import WalkForwardValidator +from classes.screeners.base import BaseScreener + +@pytest.fixture +def sample_data(): + dates = pd.date_range(start="2020-01-01", end="2022-01-01", freq="D") + df = pd.DataFrame({ + "Open": 100, + "High": 105, + "Low": 95, + "Close": 100, + "Volume": 1000, + "Dividends": 0.0, + "Stock Splits": 0.0 + }, index=dates) + return df + +class MockScreener(BaseScreener): + def run(self, data): + return pd.Series(0, index=data.index) # Neutral signal + +class TestWalkForwardValidator: + + def test_initialization(self, sample_data): + validator = WalkForwardValidator(sample_data, train_period_days=365, test_period_days=90) + assert validator.train_period.days == 365 + assert validator.test_period.days == 90 + + def test_generate_windows(self, sample_data): + # 2 years of data (~730 days) + # Train 365, Test 90 + # Window 1: Train [0, 365), Test [365, 455) + # Window 2: Train [0, 455), Test [455, 545) + # ... + + validator = WalkForwardValidator(sample_data, train_period_days=365, test_period_days=90) + windows = list(validator.generate_windows()) + + assert len(windows) > 0 + + # Check first window + w1_train, w1_test, (t1_start, t1_end) = windows[0] + assert len(w1_train) >= 365 # Approx due to freq + # Test start should be train end + assert t1_start == w1_train.index[-1] + pd.Timedelta(days=1) + # Or reasonably close depending on slicing logic. + # Logic: train < current_train_end, test >= test_start (which is current_train_end) + # So test starts exactly where train ends (exclusive). + + # Check expansion + w2_train, w2_test, _ = windows[1] + assert len(w2_train) > len(w1_train) # Expanding window + + def test_validate_flow(self, sample_data): + validator = WalkForwardValidator(sample_data, train_period_days=300, test_period_days=50) + + # Mock BacktestEngine to avoid running actual backtests (slow/complex) + with patch("classes.backtesting.walk_forward.BacktestEngine") as MockEngine: + # Setup mock stats + mock_stats = { + "Return [%]": 10.0, + "Sharpe Ratio": 1.5, + "Max. Drawdown [%]": 5.0 + } + MockEngine.return_value.run.return_value = (None, mock_stats) + + results = validator.validate(MockScreener) + + assert len(results) > 0 + assert "IS_Sharpe" in results[0] + assert "OOS_Sharpe" in results[0] + assert results[0]["IS_Sharpe"] == 1.5 + + def test_get_summary(self, sample_data): + validator = WalkForwardValidator(sample_data) + validator.results = [ + {"IS_Sharpe": 2.0, "OOS_Sharpe": 1.0}, # Deg = 0.5 + {"IS_Sharpe": 2.0, "OOS_Sharpe": 0.5}, # Deg = 0.25 (Overfit) + ] + + summary = validator.get_summary() + assert not summary.empty + assert "Sharpe_Degradation" in summary.columns + assert "Overfit_Warning" in summary.columns + + assert summary.iloc[0]["Overfit_Warning"] == False # 0.5 is threshold, maybe inclusive? + # Logic: degradation < 0.5 -> True + # 0.5 < 0.5 is False + + assert summary.iloc[1]["Overfit_Warning"] == True