Big refactoring
Some checks failed
Deploy / lint (push) Failing after 21s
Deploy / test (push) Has been skipped
Deploy / deploy (push) Has been skipped

This commit is contained in:
Dennis Thiessen
2026-03-03 15:20:18 +01:00
parent 181cfe6588
commit 0a011d4ce9
55 changed files with 6898 additions and 544 deletions

View File

@@ -13,14 +13,27 @@ ALPACA_API_SECRET=
GEMINI_API_KEY=
GEMINI_MODEL=gemini-2.0-flash
# Sentiment Provider — OpenAI
OPENAI_API_KEY=
OPENAI_MODEL=gpt-4o-mini
OPENAI_SENTIMENT_BATCH_SIZE=5
# Fundamentals Provider — Financial Modeling Prep
FMP_API_KEY=
# Fundamentals Provider — Finnhub (optional fallback)
FINNHUB_API_KEY=
# Fundamentals Provider — Alpha Vantage (optional fallback)
ALPHA_VANTAGE_API_KEY=
# Scheduled Jobs
DATA_COLLECTOR_FREQUENCY=daily
SENTIMENT_POLL_INTERVAL_MINUTES=30
FUNDAMENTAL_FETCH_FREQUENCY=daily
RR_SCAN_FREQUENCY=daily
FUNDAMENTAL_RATE_LIMIT_RETRIES=3
FUNDAMENTAL_RATE_LIMIT_BACKOFF_SECONDS=15
# Scoring Defaults
DEFAULT_WATCHLIST_AUTO_SIZE=10

View File

@@ -40,6 +40,9 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- uses: actions/setup-node@v4
with:
node-version: "20"
- run: pip install -e ".[dev]"
- run: alembic upgrade head
env:
@@ -47,6 +50,15 @@ jobs:
- run: pytest --tb=short
env:
DATABASE_URL: postgresql+asyncpg://test_user:test_pass@localhost:5432/test_db
- run: |
cd frontend
npm ci
if node -e "require.resolve('vitest/package.json')" >/dev/null 2>&1; then
npm test
else
echo "vitest not configured; skipping frontend tests"
fi
npm run build
deploy:
needs: test
@@ -65,4 +77,8 @@ jobs:
source .venv/bin/activate
pip install -e .
alembic upgrade head
cd frontend
npm ci
npm run build
cd ..
sudo systemctl restart stock-data-backend

View File

@@ -0,0 +1,27 @@
{
"enabled": true,
"name": "Code Quality Analyzer",
"description": "Analyzes modified source code files for potential improvements including code smells, design patterns, best practices, readability, maintainability, and performance optimizations",
"version": "1",
"when": {
"type": "fileEdited",
"patterns": [
"*.py",
"*.ts",
"*.tsx",
"*.js",
"*.jsx",
"*.java",
"*.go",
"*.rs",
"*.cpp",
"*.c",
"*.h",
"*.cs"
]
},
"then": {
"type": "askAgent",
"prompt": "Analyze the modified code for potential improvements. Check for: 1) Code smells and anti-patterns, 2) Opportunities to apply design patterns, 3) Best practices violations, 4) Readability improvements, 5) Maintainability concerns, 6) Performance optimization opportunities. Provide specific, actionable suggestions while ensuring functionality remains intact."
}
}

View File

@@ -0,0 +1,19 @@
{
"enabled": true,
"name": "Update Docs on Code Change",
"description": "Monitors Python source files and prompts agent to update README.md or docs folder when code changes are saved",
"version": "1",
"when": {
"type": "fileEdited",
"patterns": [
"*.py",
"requirements.txt",
"pyproject.toml",
"alembic.ini"
]
},
"then": {
"type": "askAgent",
"prompt": "A source file was just modified. Review the changes and update the documentation in README.md to reflect any new features, API changes, configuration updates, or important implementation details. Keep the documentation clear, accurate, and up-to-date."
}
}

View File

@@ -1,80 +1,5 @@
{
"mcpServers": {
"context7": {
"gallery": true,
"command": "npx",
"args": [
"-y",
"@upstash/context7-mcp@latest"
],
"env": {
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080"
},
"type": "stdio"
},
"aws.mcp": {
"command": "uvx",
"timeout": 100000,
"transport": "stdio",
"args": [
"mcp-proxy-for-aws@latest",
"https://aws-mcp.us-east-1.api.aws/mcp"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false,
"autoApprove": []
},
"aws.eks.mcp": {
"command": "uvx",
"timeout": 100000,
"transport": "stdio",
"args": [
"mcp-proxy-for-aws@latest",
"https://eks-mcp.eu-central-1.api.aws/mcp",
"--service",
"eks-mcp"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false,
"autoApprove": []
},
"aws.ecs.mcp": {
"command": "uvx",
"timeout": 100000,
"transport": "stdio",
"args": [
"mcp-proxy-for-aws@latest",
"https://ecs-mcp.us-east-1.api.aws/mcp",
"--service",
"ecs-mcp"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false,
"autoApprove": []
},
"iaws.support.agent": {
"command": "uvx",
"args": [

View File

@@ -0,0 +1 @@
{"specId": "71b6e4c6-56fa-4d43-b1ca-c4a89e8c8b5e", "workflowType": "requirements-first", "specType": "feature"}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,222 @@
# Requirements Document
## Introduction
The Intelligent Trade Recommendation System enhances the Signal Dashboard platform by providing clear, actionable trading recommendations with confidence scoring, multiple price targets, and probability estimates. The system addresses critical gaps in the current trade setup generation: contradictory signal detection, single-target limitations, and lack of directional guidance for non-professional traders.
The system analyzes multi-dimensional signals (sentiment, technical, momentum, S/R positioning) to recommend both LONG and SHORT directions with confidence scores, identifies multiple S/R-based price targets with probability estimates, and detects signal conflicts to prevent contradictory recommendations.
## Glossary
- **Trade_Recommendation_Engine**: The core system component that analyzes signals and generates directional recommendations with confidence scores
- **Direction_Analyzer**: Component that evaluates LONG vs SHORT direction based on signal alignment
- **Target_Generator**: Component that identifies multiple S/R levels as price targets
- **Probability_Estimator**: Component that calculates likelihood of reaching each target
- **Signal_Conflict_Detector**: Component that identifies contradictions between sentiment, technical, and momentum signals
- **Recommendation_Summary**: User-facing output containing recommended action, confidence, reasoning, and risk level
- **S/R_Level**: Support/Resistance level with strength score and price
- **Signal_Alignment**: Degree of agreement between sentiment, technical, momentum, and fundamental dimensions
- **Confidence_Score**: Percentage (0-100%) indicating likelihood of success for a directional recommendation
- **Target_Probability**: Percentage likelihood of price reaching a specific target level
- **ATR**: Average True Range, volatility measure used for stop-loss calculation
- **R:R_Ratio**: Risk-to-Reward ratio comparing potential profit to potential loss
- **Composite_Score**: Weighted aggregate score (0-100) from all dimensions
- **Dimension_Score**: Individual score for technical, sr_quality, sentiment, fundamental, or momentum dimension
## Requirements
### Requirement 1: Bidirectional Trade Setup Generation
**User Story:** As a trader, I want to see both LONG and SHORT trade setups for each ticker, so that I can evaluate opportunities in both directions regardless of market conditions.
#### Acceptance Criteria
1. WHEN the Trade_Recommendation_Engine analyzes a ticker, THE Trade_Recommendation_Engine SHALL generate both a LONG setup and a SHORT setup
2. THE Trade_Recommendation_Engine SHALL calculate separate entry prices, stop losses, and targets for each direction
3. WHEN generating a LONG setup, THE Trade_Recommendation_Engine SHALL use resistance levels as targets and support levels for stop-loss calculation
4. WHEN generating a SHORT setup, THE Trade_Recommendation_Engine SHALL use support levels as targets and resistance levels for stop-loss calculation
5. THE Trade_Recommendation_Engine SHALL store both setups in the TradeSetup model with distinct direction fields
### Requirement 2: Direction Confidence Scoring
**User Story:** As a non-professional trader, I want to see confidence scores for LONG vs SHORT directions, so that I can understand which direction has higher probability of success.
#### Acceptance Criteria
1. THE Direction_Analyzer SHALL calculate a confidence score (0-100%) for the LONG direction
2. THE Direction_Analyzer SHALL calculate a confidence score (0-100%) for the SHORT direction
3. WHEN sentiment is bullish AND technical score is above 60 AND momentum score is above 60, THE Direction_Analyzer SHALL assign LONG confidence above 70%
4. WHEN sentiment is bearish AND technical score is below 40 AND momentum score is below 40, THE Direction_Analyzer SHALL assign SHORT confidence above 70%
5. WHEN signal dimensions contradict each other, THE Direction_Analyzer SHALL reduce confidence scores for both directions below 60%
6. THE Direction_Analyzer SHALL store confidence scores in the TradeSetup model for each direction
### Requirement 3: Multiple Price Target Identification
**User Story:** As a trader, I want multiple price targets at different S/R levels, so that I can implement staged profit-taking and proper risk management.
#### Acceptance Criteria
1. WHEN generating targets for a LONG setup, THE Target_Generator SHALL identify 3 to 5 resistance levels above the entry price
2. WHEN generating targets for a SHORT setup, THE Target_Generator SHALL identify 3 to 5 support levels below the entry price
3. THE Target_Generator SHALL classify targets as Conservative (nearest), Moderate (mid-range), or Aggressive (furthest)
4. THE Target_Generator SHALL calculate the R:R ratio for each target level
5. WHEN fewer than 3 S/R levels exist in the target direction, THE Target_Generator SHALL use the available levels and flag the setup as having limited targets
6. THE Target_Generator SHALL order targets by distance from entry price
### Requirement 4: Target Probability Estimation
**User Story:** As a trader, I want to know the probability of reaching each price target, so that I can set realistic expectations and plan my exits.
#### Acceptance Criteria
1. THE Probability_Estimator SHALL calculate a target probability percentage (0-100%) for each price target
2. WHEN calculating probability, THE Probability_Estimator SHALL consider S/R level strength score (higher strength increases probability)
3. WHEN calculating probability, THE Probability_Estimator SHALL consider distance from entry (closer targets receive higher probability)
4. WHEN calculating probability, THE Probability_Estimator SHALL consider signal alignment (aligned signals increase probability by 10-20%)
5. WHEN calculating probability, THE Probability_Estimator SHALL consider ATR (higher volatility increases probability for distant targets)
6. THE Probability_Estimator SHALL assign Conservative targets probability above 60%
7. THE Probability_Estimator SHALL assign Moderate targets probability between 40% and 70%
8. THE Probability_Estimator SHALL assign Aggressive targets probability below 50%
### Requirement 5: Signal Conflict Detection
**User Story:** As a trader, I want to be warned when signals contradict each other, so that I can avoid high-risk trades with mixed indicators.
#### Acceptance Criteria
1. THE Signal_Conflict_Detector SHALL compare sentiment classification (bearish/neutral/bullish) with technical score direction
2. WHEN sentiment is bearish AND technical score is above 60, THE Signal_Conflict_Detector SHALL flag a sentiment-technical conflict
3. WHEN sentiment is bullish AND technical score is below 40, THE Signal_Conflict_Detector SHALL flag a sentiment-technical conflict
4. THE Signal_Conflict_Detector SHALL compare momentum score with technical score
5. WHEN momentum score and technical score differ by more than 30 points, THE Signal_Conflict_Detector SHALL flag a momentum-technical conflict
6. THE Signal_Conflict_Detector SHALL store conflict flags in the TradeSetup model
7. WHEN conflicts are detected, THE Signal_Conflict_Detector SHALL reduce confidence scores by 15-25%
### Requirement 6: Recommendation Summary Generation
**User Story:** As a non-professional trader, I want a clear recommendation summary with action, confidence, and reasoning, so that I can make informed trading decisions without analyzing raw data.
#### Acceptance Criteria
1. THE Recommendation_Summary SHALL include a recommended action field with values: "LONG (High Confidence)", "LONG (Moderate Confidence)", "SHORT (High Confidence)", "SHORT (Moderate Confidence)", or "NEUTRAL (Conflicting Signals)"
2. WHEN LONG confidence is above 70% AND LONG confidence exceeds SHORT confidence by 20%, THE Recommendation_Summary SHALL recommend "LONG (High Confidence)"
3. WHEN SHORT confidence is above 70% AND SHORT confidence exceeds LONG confidence by 20%, THE Recommendation_Summary SHALL recommend "SHORT (High Confidence)"
4. WHEN confidence scores differ by less than 20%, THE Recommendation_Summary SHALL recommend "NEUTRAL (Conflicting Signals)"
5. THE Recommendation_Summary SHALL include reasoning text explaining the recommendation based on signal alignment
6. THE Recommendation_Summary SHALL include a risk level assessment: Low (all signals aligned), Medium (minor conflicts), or High (major conflicts)
7. THE Recommendation_Summary SHALL display the composite score alongside the recommendation
### Requirement 7: Trade Setup API Enhancement
**User Story:** As a frontend developer, I want the trade setup API to return enhanced recommendation data, so that I can display confidence scores, multiple targets, and probabilities in the UI.
#### Acceptance Criteria
1. WHEN the trade setup API endpoint is called for a ticker, THE API SHALL return both LONG and SHORT setups
2. THE API SHALL include confidence_score field for each setup
3. THE API SHALL include a targets array with 3-5 target objects for each setup
4. WHEN returning target objects, THE API SHALL include price, distance_from_entry, rr_ratio, probability, and classification (Conservative/Moderate/Aggressive) fields
5. THE API SHALL include conflict_flags array listing detected signal conflicts
6. THE API SHALL include recommendation_summary object with action, reasoning, and risk_level fields
7. THE API SHALL return setups ordered by confidence score (highest first)
### Requirement 8: Historical S/R Strength Integration
**User Story:** As a system, I want to use historical S/R level strength in probability calculations, so that targets at stronger levels receive higher probability estimates.
#### Acceptance Criteria
1. THE Probability_Estimator SHALL retrieve the strength score from the SRLevel model for each target
2. WHEN an S/R level has strength score above 80, THE Probability_Estimator SHALL increase target probability by 10-15%
3. WHEN an S/R level has strength score below 40, THE Probability_Estimator SHALL decrease target probability by 10-15%
4. THE Probability_Estimator SHALL normalize strength scores to a 0-1 scale before applying to probability calculation
5. WHEN an S/R level has been tested multiple times historically, THE Probability_Estimator SHALL increase its weight in probability calculation
### Requirement 9: Volatility-Adjusted Target Selection
**User Story:** As a trader, I want target selection to account for volatility, so that targets are realistic given the ticker's typical price movement.
#### Acceptance Criteria
1. THE Target_Generator SHALL retrieve the current ATR value for the ticker
2. WHEN ATR is high (above 5% of current price), THE Target_Generator SHALL include more distant S/R levels as valid targets
3. WHEN ATR is low (below 2% of current price), THE Target_Generator SHALL limit targets to S/R levels within 3x ATR distance
4. THE Target_Generator SHALL calculate target distance as a multiple of ATR
5. THE Target_Generator SHALL exclude S/R levels that are less than 1x ATR from entry price
### Requirement 10: Recommendation Persistence and History
**User Story:** As a trader, I want to track how recommendations change over time, so that I can evaluate the system's accuracy and learn from past recommendations.
#### Acceptance Criteria
1. THE Trade_Recommendation_Engine SHALL store each generated recommendation with a timestamp
2. THE Trade_Recommendation_Engine SHALL preserve previous recommendations when generating new ones
3. THE API SHALL provide an endpoint to retrieve recommendation history for a ticker
4. WHEN retrieving history, THE API SHALL return recommendations ordered by timestamp (newest first)
5. THE API SHALL include actual_outcome field indicating whether targets were reached (to be updated post-trade)
### Requirement 11: Frontend Recommendation Display
**User Story:** As a trader, I want to see recommendations clearly displayed in the ticker detail page, so that I can quickly understand the suggested action and targets.
#### Acceptance Criteria
1. THE Ticker_Detail_Page SHALL display the recommendation summary prominently at the top
2. THE Ticker_Detail_Page SHALL show LONG and SHORT setups side-by-side with confidence scores
3. THE Ticker_Detail_Page SHALL display targets in a table with columns: Classification, Price, Distance, R:R, Probability
4. WHEN signal conflicts exist, THE Ticker_Detail_Page SHALL display a warning badge with conflict details
5. THE Ticker_Detail_Page SHALL highlight the recommended direction with visual emphasis (border, background color)
6. THE Ticker_Detail_Page SHALL display risk level with color coding: green (Low), yellow (Medium), red (High)
### Requirement 12: Scanner Integration with Recommendations
**User Story:** As a trader, I want the scanner to show recommended direction and confidence, so that I can quickly filter for high-confidence opportunities.
#### Acceptance Criteria
1. THE Scanner_Page SHALL display a "Recommended Action" column showing the recommended direction and confidence level
2. THE Scanner_Page SHALL allow filtering by recommended action (LONG High, LONG Moderate, SHORT High, SHORT Moderate, NEUTRAL)
3. THE Scanner_Page SHALL allow filtering by minimum confidence score
4. THE Scanner_Page SHALL display the highest-probability target for each setup in the table
5. WHEN a user clicks a setup row, THE Scanner_Page SHALL navigate to the ticker detail page with the recommendation expanded
### Requirement 13: Admin Configuration for Recommendation Thresholds
**User Story:** As an admin, I want to configure confidence score thresholds and probability calculation weights, so that I can tune the recommendation system based on market conditions.
#### Acceptance Criteria
1. THE Admin_Settings_Page SHALL provide inputs for high confidence threshold (default: 70%)
2. THE Admin_Settings_Page SHALL provide inputs for moderate confidence threshold (default: 50%)
3. THE Admin_Settings_Page SHALL provide inputs for signal alignment weight in probability calculation (default: 15%)
4. THE Admin_Settings_Page SHALL provide inputs for S/R strength weight in probability calculation (default: 20%)
5. THE Admin_Settings_Page SHALL provide inputs for distance penalty factor in probability calculation (default: 0.1)
6. WHEN admin saves settings, THE Settings_Service SHALL update the configuration in the Settings model
7. THE Trade_Recommendation_Engine SHALL retrieve current thresholds from Settings before generating recommendations
### Requirement 14: Recommendation Calculation Performance
**User Story:** As a system, I want recommendation generation to complete within acceptable time limits, so that users receive timely updates without delays.
#### Acceptance Criteria
1. WHEN generating recommendations for a single ticker, THE Trade_Recommendation_Engine SHALL complete within 500 milliseconds
2. WHEN the scheduled job generates recommendations for all tickers, THE Trade_Recommendation_Engine SHALL process at least 10 tickers per second
3. THE Trade_Recommendation_Engine SHALL use database query optimization to retrieve all required dimension scores in a single query
4. THE Trade_Recommendation_Engine SHALL cache S/R levels for each ticker to avoid repeated database queries
5. WHEN recommendation generation fails for a ticker, THE Trade_Recommendation_Engine SHALL log the error and continue processing remaining tickers
### Requirement 15: Recommendation Data Model Extension
**User Story:** As a developer, I want the TradeSetup model extended to store recommendation data, so that all recommendation information persists in the database.
#### Acceptance Criteria
1. THE TradeSetup model SHALL include a confidence_score field (Float, 0-100)
2. THE TradeSetup model SHALL include a targets field (JSON array of target objects)
3. THE TradeSetup model SHALL include a conflict_flags field (JSON array of strings)
4. THE TradeSetup model SHALL include a recommended_action field (String: LONG_HIGH, LONG_MODERATE, SHORT_HIGH, SHORT_MODERATE, NEUTRAL)
5. THE TradeSetup model SHALL include a reasoning field (Text)
6. THE TradeSetup model SHALL include a risk_level field (String: Low, Medium, High)
7. THE TradeSetup model SHALL maintain backward compatibility with existing entry_price, stop_loss, target, and rr_ratio fields for the primary target

30
.kiro/steering/product.md Normal file
View File

@@ -0,0 +1,30 @@
# Product Overview
Signal Dashboard is an investing-signal platform for NASDAQ stocks that surfaces optimal trading opportunities through multi-dimensional scoring.
## Core Philosophy
Don't predict price. Find the path of least resistance, key support/resistance zones, and asymmetric risk:reward setups.
## Key Features
- Multi-dimensional scoring engine (technical, S/R quality, sentiment, fundamental, momentum)
- Risk:Reward scanner with ATR-based stops (default 3:1 threshold)
- Support/Resistance detection with strength scoring and merge-within-tolerance
- Sentiment analysis with time-decay weighted scoring (Gemini 2.0 Flash with search grounding)
- Auto-populated watchlist (top-10 by composite score) + manual entries (cap: 20)
- Interactive candlestick chart with S/R overlays
- JWT auth with admin role and user access control
- Scheduled jobs: OHLCV collection, sentiment polling, fundamentals fetch, R:R scanning
## Data Providers
- Alpaca: OHLCV price data
- Gemini 2.0 Flash: Sentiment analysis via search grounding
- Financial Modeling Prep: Fundamental data (P/E, revenue growth, earnings surprise, market cap)
## User Roles
- Admin: Full access including user management, job control, data cleanup, system settings
- User: Access to watchlist, scanner, rankings, ticker details (when has_access=true)
- Registration: Configurable via admin settings

View File

@@ -0,0 +1,87 @@
# Project Structure
## Backend Architecture
```
app/
├── main.py # FastAPI app, lifespan, router registration
├── config.py # Pydantic settings from .env
├── database.py # Async SQLAlchemy engine + session factory
├── dependencies.py # DI: DB session, auth guards (require_access, require_admin)
├── exceptions.py # Custom exception hierarchy (ValidationError, NotFoundError, etc.)
├── middleware.py # Global error handler → JSON envelope
├── cache.py # LRU cache with per-ticker invalidation
├── scheduler.py # APScheduler job definitions
├── models/ # SQLAlchemy ORM models
├── schemas/ # Pydantic request/response schemas
├── services/ # Business logic layer
├── providers/ # External data provider integrations
└── routers/ # FastAPI route handlers
```
## Frontend Architecture
```
frontend/src/
├── App.tsx # Route definitions
├── main.tsx # React entry point
├── api/ # Axios API client modules (one per resource)
├── components/
│ ├── admin/ # User table, job controls, settings, data cleanup
│ ├── auth/ # Protected route wrapper
│ ├── charts/ # Canvas candlestick chart
│ ├── layout/ # App shell, sidebar, mobile nav
│ ├── rankings/ # Rankings table, weights form
│ ├── scanner/ # Trade table
│ ├── ticker/ # Sentiment panel, fundamentals, indicators, S/R overlay
│ ├── ui/ # Badge, toast, skeleton, score card, confirm dialog
│ └── watchlist/ # Watchlist table, add ticker form
├── hooks/ # React Query hooks (one per resource)
├── lib/ # Types, formatting utilities
├── pages/ # Page components (Login, Register, Watchlist, Ticker, Scanner, Rankings, Admin)
├── stores/ # Zustand auth store
└── styles/ # Global CSS with glassmorphism classes
```
## Key Patterns
### Backend
- **Layered architecture**: Router → Service → Model
- **Dependency injection**: FastAPI Depends() for DB session and auth
- **Exception handling**: Custom exceptions caught by global middleware, returned as JSON envelope
- **API envelope**: All responses wrapped in `{ status: "success"|"error", data: any, error?: string }`
- **Cascade deletes**: Ticker deletion cascades to all related data (OHLCV, sentiment, fundamentals, S/R, scores, trades, watchlist)
- **Async everywhere**: All DB operations use async/await with asyncpg
### Frontend
- **API client**: Axios interceptors for JWT injection and envelope unwrapping
- **Server state**: TanStack React Query with query keys per resource
- **Client state**: Zustand for auth (token, user, login/logout)
- **Error handling**: ApiError class, toast notifications for mutations
- **Protected routes**: ProtectedRoute wrapper checks auth, redirects to /login
- **Glassmorphism**: Frosted glass panels, gradient text, ambient glow, mesh gradient background
## Database Models
All models inherit from `Base` (SQLAlchemy declarative base):
- `Ticker`: Registry of tracked symbols (cascade delete parent)
- `OHLCVRecord`: Price data (open, high, low, close, volume)
- `SentimentScore`: Sentiment analysis results with time-decay
- `FundamentalData`: P/E, revenue growth, earnings surprise, market cap
- `SRLevel`: Support/Resistance levels with strength scoring
- `DimensionScore`: Individual dimension scores (technical, sr_quality, sentiment, fundamental, momentum)
- `CompositeScore`: Weighted composite score
- `TradeSetup`: Detected R:R setups (long/short, entry, stop, target)
- `WatchlistEntry`: User watchlist entries (auto/manual)
- `User`: Auth and access control
- `Settings`: System-wide configuration
## Testing
- Backend tests: `tests/unit/` and `tests/property/`
- Frontend tests: `frontend/src/**/*.test.tsx`
- Fixtures in `tests/conftest.py`
- Hypothesis strategies for property-based testing

86
.kiro/steering/tech.md Normal file
View File

@@ -0,0 +1,86 @@
# Tech Stack
## Backend
- Python 3.12+
- FastAPI with Uvicorn
- SQLAlchemy 2.0 (async) with asyncpg
- PostgreSQL database
- Alembic for migrations
- APScheduler for scheduled jobs
- JWT auth (python-jose, passlib with bcrypt)
- Pydantic for validation and settings
## Frontend
- React 18 with TypeScript
- Vite 5 (build tool)
- TanStack React Query v5 (server state)
- Zustand (client state, auth)
- React Router v6 (SPA routing)
- Axios with JWT interceptor
- Tailwind CSS 3 with custom glassmorphism design system
- Canvas 2D for candlestick charts
## Testing
- Backend: pytest, pytest-asyncio, Hypothesis (property-based testing)
- Frontend: Vitest
- Test database: In-memory SQLite (no PostgreSQL needed for tests)
## Common Commands
### Backend
```bash
# Setup
python -m venv .venv
source .venv/bin/activate
pip install -e ".[dev]"
# Database
createdb stock_data_backend
alembic upgrade head
# Run
uvicorn app.main:app --reload --host 0.0.0.0 --port 8000
# Test
pytest tests/ -v
```
### Frontend
```bash
cd frontend
# Setup
npm install
# Run dev server (proxies /api/v1/ to backend)
npm run dev
# Build
npm run build
# Test
npm test # Single run
npm run test:watch # Watch mode
```
## Environment Variables
Required in `.env`:
- `DATABASE_URL`: PostgreSQL connection string (postgresql+asyncpg://...)
- `JWT_SECRET`: Random secret for JWT signing
- `ALPACA_API_KEY`, `ALPACA_API_SECRET`: For OHLCV data
- `GEMINI_API_KEY`: For sentiment analysis
- `FMP_API_KEY`: For fundamental data
See `.env.example` for full list with defaults.
## API Documentation
- Swagger UI: http://localhost:8000/docs
- ReDoc: http://localhost:8000/redoc
- All endpoints under `/api/v1/`

View File

@@ -17,19 +17,20 @@ Investing-signal platform for NASDAQ stocks. Surfaces the best trading opportuni
| Charts | Canvas 2D candlestick chart with S/R overlays |
| Routing | React Router v6 (SPA) |
| HTTP | Axios with JWT interceptor |
| Data providers | Alpaca (OHLCV), Gemini 2.0 Flash (sentiment via search grounding), Financial Modeling Prep (fundamentals) |
| Data providers | Alpaca (OHLCV), OpenAI (sentiment, optional micro-batch), Fundamentals chain: FMP → Finnhub → Alpha Vantage |
## Features
### Backend
- Ticker registry with full cascade delete
- Universe bootstrap for `sp500`, `nasdaq100`, `nasdaq_all` via admin endpoint
- OHLCV price storage with upsert and validation
- Technical indicators: ADX, EMA, RSI, ATR, Volume Profile, Pivot Points, EMA Cross
- Support/Resistance detection with strength scoring and merge-within-tolerance
- Sentiment analysis with time-decay weighted scoring
- Fundamental data tracking (P/E, revenue growth, earnings surprise, market cap)
- 5-dimension scoring engine (technical, S/R quality, sentiment, fundamental, momentum) with configurable weights
- Risk:Reward scanner — long and short setups, ATR-based stops, configurable R:R threshold (default 3:1)
- Risk:Reward scanner — long and short setups, ATR-based stops, configurable R:R threshold (default 1.5:1)
- Auto-populated watchlist (top-10 by composite score) + manual entries (cap: 20)
- JWT auth with admin role, configurable registration, user access control
- Scheduled jobs with enable/disable control and status monitoring
@@ -79,7 +80,7 @@ All under `/api/v1/`. Interactive docs at `/docs` (Swagger) and `/redoc`.
| Scores | `GET /scores/{symbol}`, `GET /rankings`, `PUT /scores/weights` |
| Trades | `GET /trades` |
| Watchlist | `GET /watchlist`, `POST /watchlist/{symbol}`, `DELETE /watchlist/{symbol}` |
| Admin | `GET /admin/users`, `PUT /admin/users/{id}/role`, `PUT /admin/users/{id}/access`, `DELETE /admin/data/{symbol}`, `POST /admin/jobs/{name}/trigger`, `PUT /admin/jobs/{name}/toggle`, `GET /admin/jobs`, `GET /admin/settings`, `PUT /admin/settings` |
| Admin | `GET /admin/users`, `POST /admin/users`, `PUT /admin/users/{id}/access`, `PUT /admin/users/{id}/password`, `PUT /admin/settings/registration`, `GET /admin/settings`, `PUT /admin/settings/{key}`, `GET/PUT /admin/settings/recommendations`, `GET/PUT /admin/settings/ticker-universe`, `POST /admin/tickers/bootstrap`, `POST /admin/data/cleanup`, `GET /admin/jobs`, `POST /admin/jobs/{name}/trigger`, `PUT /admin/jobs/{name}/toggle`, `GET /admin/pipeline/readiness` |
## Development Setup
@@ -157,11 +158,18 @@ Configure in `.env` (copy from `.env.example`):
| `ALPACA_API_SECRET` | For OHLCV | — | Alpaca Markets API secret |
| `GEMINI_API_KEY` | For sentiment | — | Google Gemini API key |
| `GEMINI_MODEL` | No | `gemini-2.0-flash` | Gemini model name |
| `FMP_API_KEY` | For fundamentals | — | Financial Modeling Prep API key |
| `OPENAI_API_KEY` | For sentiment (OpenAI path) | — | OpenAI API key |
| `OPENAI_MODEL` | No | `gpt-4o-mini` | OpenAI model name |
| `OPENAI_SENTIMENT_BATCH_SIZE` | No | `5` | Micro-batch size for sentiment collector |
| `FMP_API_KEY` | Optional (fundamentals) | — | Financial Modeling Prep API key (first provider in chain) |
| `FINNHUB_API_KEY` | Optional (fundamentals) | — | Finnhub API key (fallback provider) |
| `ALPHA_VANTAGE_API_KEY` | Optional (fundamentals) | — | Alpha Vantage API key (fallback provider) |
| `DATA_COLLECTOR_FREQUENCY` | No | `daily` | OHLCV collection schedule |
| `SENTIMENT_POLL_INTERVAL_MINUTES` | No | `30` | Sentiment polling interval |
| `FUNDAMENTAL_FETCH_FREQUENCY` | No | `daily` | Fundamentals fetch schedule |
| `RR_SCAN_FREQUENCY` | No | `daily` | R:R scanner schedule |
| `FUNDAMENTAL_RATE_LIMIT_RETRIES` | No | `3` | Retries per ticker on fundamentals rate-limit |
| `FUNDAMENTAL_RATE_LIMIT_BACKOFF_SECONDS` | No | `15` | Base backoff seconds for fundamentals retry (exponential) |
| `DEFAULT_WATCHLIST_AUTO_SIZE` | No | `10` | Auto-watchlist size |
| `DEFAULT_RR_THRESHOLD` | No | `3.0` | Minimum R:R ratio for setups |
| `DB_POOL_SIZE` | No | `5` | Database connection pool size |

View File

@@ -0,0 +1,59 @@
"""add recommendation fields to trade_setups
Revision ID: 003
Revises: 002
Create Date: 2026-03-03 00:00:00.000000
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = "003"
down_revision: Union[str, None] = "002"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column(
"trade_setups",
sa.Column("confidence_score", sa.Float(), nullable=True),
)
op.add_column(
"trade_setups",
sa.Column("targets_json", sa.Text(), nullable=True),
)
op.add_column(
"trade_setups",
sa.Column("conflict_flags_json", sa.Text(), nullable=True),
)
op.add_column(
"trade_setups",
sa.Column("recommended_action", sa.String(length=20), nullable=True),
)
op.add_column(
"trade_setups",
sa.Column("reasoning", sa.Text(), nullable=True),
)
op.add_column(
"trade_setups",
sa.Column("risk_level", sa.String(length=10), nullable=True),
)
op.add_column(
"trade_setups",
sa.Column("actual_outcome", sa.String(length=20), nullable=True),
)
def downgrade() -> None:
op.drop_column("trade_setups", "actual_outcome")
op.drop_column("trade_setups", "risk_level")
op.drop_column("trade_setups", "reasoning")
op.drop_column("trade_setups", "recommended_action")
op.drop_column("trade_setups", "conflict_flags_json")
op.drop_column("trade_setups", "targets_json")
op.drop_column("trade_setups", "confidence_score")

View File

@@ -22,15 +22,24 @@ class Settings(BaseSettings):
# Sentiment Provider — OpenAI
openai_api_key: str = ""
openai_model: str = "gpt-4o-mini"
openai_sentiment_batch_size: int = 5
# Fundamentals Provider — Financial Modeling Prep
fmp_api_key: str = ""
# Fundamentals Provider — Finnhub (optional fallback)
finnhub_api_key: str = ""
# Fundamentals Provider — Alpha Vantage (optional fallback)
alpha_vantage_api_key: str = ""
# Scheduled Jobs
data_collector_frequency: str = "daily"
sentiment_poll_interval_minutes: int = 30
fundamental_fetch_frequency: str = "daily"
rr_scan_frequency: str = "daily"
fundamental_rate_limit_retries: int = 3
fundamental_rate_limit_backoff_seconds: int = 15
# Scoring Defaults
default_watchlist_auto_size: int = 10

View File

@@ -1,6 +1,8 @@
from datetime import datetime
from sqlalchemy import DateTime, Float, ForeignKey, String
import json
from sqlalchemy import DateTime, Float, ForeignKey, String, Text
from sqlalchemy.orm import Mapped, mapped_column, relationship
from app.database import Base
@@ -23,4 +25,34 @@ class TradeSetup(Base):
DateTime(timezone=True), nullable=False
)
confidence_score: Mapped[float | None] = mapped_column(Float, nullable=True)
targets_json: Mapped[str | None] = mapped_column(Text, nullable=True)
conflict_flags_json: Mapped[str | None] = mapped_column(Text, nullable=True)
recommended_action: Mapped[str | None] = mapped_column(String(20), nullable=True)
reasoning: Mapped[str | None] = mapped_column(Text, nullable=True)
risk_level: Mapped[str | None] = mapped_column(String(10), nullable=True)
actual_outcome: Mapped[str | None] = mapped_column(String(20), nullable=True)
ticker = relationship("Ticker", back_populates="trade_setups")
@property
def targets(self) -> list[dict]:
if not self.targets_json:
return []
try:
parsed = json.loads(self.targets_json)
except (TypeError, ValueError):
return []
return parsed if isinstance(parsed, list) else []
@property
def conflict_flags(self) -> list[str]:
if not self.conflict_flags_json:
return []
try:
parsed = json.loads(self.conflict_flags_json)
except (TypeError, ValueError):
return []
if not isinstance(parsed, list):
return []
return [str(item) for item in parsed]

View File

@@ -0,0 +1,253 @@
"""Chained fundamentals provider with fallback adapters.
Order:
1) FMP (if configured)
2) Finnhub (if configured)
3) Alpha Vantage (if configured)
"""
from __future__ import annotations
import logging
import os
from datetime import datetime, timezone
from pathlib import Path
import httpx
from app.config import settings
from app.exceptions import ProviderError, RateLimitError
from app.providers.fmp import FMPFundamentalProvider
from app.providers.protocol import FundamentalData, FundamentalProvider
logger = logging.getLogger(__name__)
_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "")
if not _CA_BUNDLE or not Path(_CA_BUNDLE).exists():
_CA_BUNDLE_PATH: str | bool = True
else:
_CA_BUNDLE_PATH = _CA_BUNDLE
def _safe_float(value: object) -> float | None:
if value is None:
return None
try:
return float(value)
except (TypeError, ValueError):
return None
class FinnhubFundamentalProvider:
"""Fundamentals provider backed by Finnhub free endpoints."""
def __init__(self, api_key: str) -> None:
if not api_key:
raise ProviderError("Finnhub API key is required")
self._api_key = api_key
self._base_url = "https://finnhub.io/api/v1"
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
unavailable: dict[str, str] = {}
async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client:
profile_resp = await client.get(
f"{self._base_url}/stock/profile2",
params={"symbol": ticker, "token": self._api_key},
)
metric_resp = await client.get(
f"{self._base_url}/stock/metric",
params={"symbol": ticker, "metric": "all", "token": self._api_key},
)
earnings_resp = await client.get(
f"{self._base_url}/stock/earnings",
params={"symbol": ticker, "limit": 1, "token": self._api_key},
)
for resp, endpoint in (
(profile_resp, "profile2"),
(metric_resp, "stock/metric"),
(earnings_resp, "stock/earnings"),
):
if resp.status_code == 429:
raise RateLimitError(f"Finnhub rate limit hit for {ticker} ({endpoint})")
if resp.status_code in (401, 403):
raise ProviderError(f"Finnhub access denied for {ticker} ({endpoint}): HTTP {resp.status_code}")
if resp.status_code != 200:
raise ProviderError(f"Finnhub error for {ticker} ({endpoint}): HTTP {resp.status_code}")
profile_payload = profile_resp.json() if profile_resp.text else {}
metric_payload = metric_resp.json() if metric_resp.text else {}
earnings_payload = earnings_resp.json() if earnings_resp.text else []
metrics = metric_payload.get("metric", {}) if isinstance(metric_payload, dict) else {}
market_cap = _safe_float((profile_payload or {}).get("marketCapitalization"))
pe_ratio = _safe_float(metrics.get("peTTM") or metrics.get("peNormalizedAnnual"))
revenue_growth = _safe_float(metrics.get("revenueGrowthTTMYoy") or metrics.get("revenueGrowth5Y"))
earnings_surprise = None
if isinstance(earnings_payload, list) and earnings_payload:
first = earnings_payload[0] if isinstance(earnings_payload[0], dict) else {}
earnings_surprise = _safe_float(first.get("surprisePercent"))
if pe_ratio is None:
unavailable["pe_ratio"] = "not available from provider payload"
if revenue_growth is None:
unavailable["revenue_growth"] = "not available from provider payload"
if earnings_surprise is None:
unavailable["earnings_surprise"] = "not available from provider payload"
if market_cap is None:
unavailable["market_cap"] = "not available from provider payload"
return FundamentalData(
ticker=ticker,
pe_ratio=pe_ratio,
revenue_growth=revenue_growth,
earnings_surprise=earnings_surprise,
market_cap=market_cap,
fetched_at=datetime.now(timezone.utc),
unavailable_fields=unavailable,
)
class AlphaVantageFundamentalProvider:
"""Fundamentals provider backed by Alpha Vantage free endpoints."""
def __init__(self, api_key: str) -> None:
if not api_key:
raise ProviderError("Alpha Vantage API key is required")
self._api_key = api_key
self._base_url = "https://www.alphavantage.co/query"
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
unavailable: dict[str, str] = {}
async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client:
overview_resp = await client.get(
self._base_url,
params={"function": "OVERVIEW", "symbol": ticker, "apikey": self._api_key},
)
earnings_resp = await client.get(
self._base_url,
params={"function": "EARNINGS", "symbol": ticker, "apikey": self._api_key},
)
income_resp = await client.get(
self._base_url,
params={"function": "INCOME_STATEMENT", "symbol": ticker, "apikey": self._api_key},
)
for resp, endpoint in (
(overview_resp, "OVERVIEW"),
(earnings_resp, "EARNINGS"),
(income_resp, "INCOME_STATEMENT"),
):
if resp.status_code == 429:
raise RateLimitError(f"Alpha Vantage rate limit hit for {ticker} ({endpoint})")
if resp.status_code != 200:
raise ProviderError(f"Alpha Vantage error for {ticker} ({endpoint}): HTTP {resp.status_code}")
overview = overview_resp.json() if overview_resp.text else {}
earnings = earnings_resp.json() if earnings_resp.text else {}
income = income_resp.json() if income_resp.text else {}
if isinstance(overview, dict) and overview.get("Information"):
raise ProviderError(f"Alpha Vantage unavailable for {ticker}: {overview.get('Information')}")
if isinstance(overview, dict) and overview.get("Note"):
raise RateLimitError(f"Alpha Vantage rate limit for {ticker}: {overview.get('Note')}")
pe_ratio = _safe_float((overview or {}).get("PERatio"))
market_cap = _safe_float((overview or {}).get("MarketCapitalization"))
earnings_surprise = None
quarterly = earnings.get("quarterlyEarnings", []) if isinstance(earnings, dict) else []
if isinstance(quarterly, list) and quarterly:
first = quarterly[0] if isinstance(quarterly[0], dict) else {}
earnings_surprise = _safe_float(first.get("surprisePercentage"))
revenue_growth = None
annual = income.get("annualReports", []) if isinstance(income, dict) else []
if isinstance(annual, list) and len(annual) >= 2:
curr = _safe_float((annual[0] or {}).get("totalRevenue"))
prev = _safe_float((annual[1] or {}).get("totalRevenue"))
if curr is not None and prev not in (None, 0):
revenue_growth = ((curr - prev) / abs(prev)) * 100.0
if pe_ratio is None:
unavailable["pe_ratio"] = "not available from provider payload"
if revenue_growth is None:
unavailable["revenue_growth"] = "not available from provider payload"
if earnings_surprise is None:
unavailable["earnings_surprise"] = "not available from provider payload"
if market_cap is None:
unavailable["market_cap"] = "not available from provider payload"
return FundamentalData(
ticker=ticker,
pe_ratio=pe_ratio,
revenue_growth=revenue_growth,
earnings_surprise=earnings_surprise,
market_cap=market_cap,
fetched_at=datetime.now(timezone.utc),
unavailable_fields=unavailable,
)
class ChainedFundamentalProvider:
"""Try multiple fundamental providers in order until one succeeds."""
def __init__(self, providers: list[tuple[str, FundamentalProvider]]) -> None:
if not providers:
raise ProviderError("No fundamental providers configured")
self._providers = providers
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
errors: list[str] = []
for provider_name, provider in self._providers:
try:
data = await provider.fetch_fundamentals(ticker)
has_any_metric = any(
value is not None
for value in (data.pe_ratio, data.revenue_growth, data.earnings_surprise, data.market_cap)
)
if not has_any_metric:
errors.append(f"{provider_name}: no usable metrics returned")
continue
unavailable = dict(data.unavailable_fields)
unavailable["provider"] = provider_name
return FundamentalData(
ticker=data.ticker,
pe_ratio=data.pe_ratio,
revenue_growth=data.revenue_growth,
earnings_surprise=data.earnings_surprise,
market_cap=data.market_cap,
fetched_at=data.fetched_at,
unavailable_fields=unavailable,
)
except Exception as exc:
errors.append(f"{provider_name}: {type(exc).__name__}: {exc}")
attempts = "; ".join(errors[:6]) if errors else "no provider attempts"
raise ProviderError(f"All fundamentals providers failed for {ticker}. Attempts: {attempts}")
def build_fundamental_provider_chain() -> FundamentalProvider:
providers: list[tuple[str, FundamentalProvider]] = []
if settings.fmp_api_key:
providers.append(("fmp", FMPFundamentalProvider(settings.fmp_api_key)))
if settings.finnhub_api_key:
providers.append(("finnhub", FinnhubFundamentalProvider(settings.finnhub_api_key)))
if settings.alpha_vantage_api_key:
providers.append(("alpha_vantage", AlphaVantageFundamentalProvider(settings.alpha_vantage_api_key)))
if not providers:
raise ProviderError(
"No fundamentals provider configured. Set one of FMP_API_KEY, FINNHUB_API_KEY, ALPHA_VANTAGE_API_KEY"
)
logger.info("Fundamentals provider chain configured: %s", [name for name, _ in providers])
return ChainedFundamentalProvider(providers)

View File

@@ -33,6 +33,24 @@ Rules:
- reasoning should cite specific recent news or events you found
"""
_SENTIMENT_BATCH_PROMPT = """\
Search the web for the LATEST news, analyst opinions, and market developments \
about each stock ticker from the past 24-48 hours.
Tickers:
{tickers_csv}
Respond ONLY with a JSON array (no markdown, no extra text), one object per ticker:
[{{"ticker":"AAPL","classification":"bullish|bearish|neutral","confidence":0-100,"reasoning":"brief explanation"}}]
Rules:
- Include every ticker exactly once
- ticker must be uppercase symbol
- classification must be exactly one of: bullish, bearish, neutral
- confidence must be an integer from 0 to 100
- reasoning should cite specific recent news or events you found
"""
VALID_CLASSIFICATIONS = {"bullish", "bearish", "neutral"}
@@ -49,17 +67,8 @@ class OpenAISentimentProvider:
self._client = AsyncOpenAI(api_key=api_key, http_client=http_client)
self._model = model
async def fetch_sentiment(self, ticker: str) -> SentimentData:
"""Use the Responses API with web_search_preview to get live sentiment."""
try:
response = await self._client.responses.create(
model=self._model,
tools=[{"type": "web_search_preview"}],
instructions="You are a financial sentiment analyst. Always respond with valid JSON only, no markdown fences.",
input=_SENTIMENT_PROMPT.format(ticker=ticker),
)
# Extract text from the ResponseOutputMessage in the output
@staticmethod
def _extract_raw_text(response: object, ticker_context: str) -> str:
raw_text = ""
for item in response.output:
if item.type == "message" and item.content:
@@ -71,22 +80,18 @@ class OpenAISentimentProvider:
break
if not raw_text:
raise ProviderError(f"No text output from OpenAI for {ticker}")
raise ProviderError(f"No text output from OpenAI for {ticker_context}")
raw_text = raw_text.strip()
logger.debug("OpenAI raw response for %s: %s", ticker, raw_text)
# Strip markdown fences if present
clean = raw_text
clean = raw_text.strip()
if clean.startswith("```"):
clean = clean.split("\n", 1)[1] if "\n" in clean else clean[3:]
if clean.endswith("```"):
clean = clean[:-3]
clean = clean.strip()
return clean.strip()
parsed = json.loads(clean)
classification = parsed.get("classification", "").lower()
@staticmethod
def _normalize_single_result(parsed: dict, ticker: str, citations: list[dict[str, str]]) -> SentimentData:
classification = str(parsed.get("classification", "")).lower()
if classification not in VALID_CLASSIFICATIONS:
raise ProviderError(
f"Invalid classification '{classification}' from OpenAI for {ticker}"
@@ -94,11 +99,39 @@ class OpenAISentimentProvider:
confidence = int(parsed.get("confidence", 50))
confidence = max(0, min(100, confidence))
reasoning = str(parsed.get("reasoning", ""))
reasoning = parsed.get("reasoning", "")
if reasoning:
logger.info("OpenAI sentiment for %s: %s (confidence=%d) — %s",
ticker, classification, confidence, reasoning)
logger.info(
"OpenAI sentiment for %s: %s (confidence=%d) — %s",
ticker,
classification,
confidence,
reasoning,
)
return SentimentData(
ticker=ticker,
classification=classification,
confidence=confidence,
source="openai",
timestamp=datetime.now(timezone.utc),
reasoning=reasoning,
citations=citations,
)
async def fetch_sentiment(self, ticker: str) -> SentimentData:
"""Use the Responses API with web_search_preview to get live sentiment."""
try:
response = await self._client.responses.create(
model=self._model,
tools=[{"type": "web_search_preview"}],
instructions="You are a financial sentiment analyst. Always respond with valid JSON only, no markdown fences.",
input=_SENTIMENT_PROMPT.format(ticker=ticker),
)
clean = self._extract_raw_text(response, ticker)
logger.debug("OpenAI raw response for %s: %s", ticker, clean)
parsed = json.loads(clean)
# Extract url_citation annotations from response output
citations: list[dict[str, str]] = []
@@ -112,19 +145,10 @@ class OpenAISentimentProvider:
"url": getattr(annotation, "url", ""),
"title": getattr(annotation, "title", ""),
})
return SentimentData(
ticker=ticker,
classification=classification,
confidence=confidence,
source="openai",
timestamp=datetime.now(timezone.utc),
reasoning=reasoning,
citations=citations,
)
return self._normalize_single_result(parsed, ticker, citations)
except json.JSONDecodeError as exc:
logger.error("Failed to parse OpenAI JSON for %s: %s — raw: %s", ticker, exc, raw_text)
logger.error("Failed to parse OpenAI JSON for %s: %s", ticker, exc)
raise ProviderError(f"Invalid JSON from OpenAI for {ticker}") from exc
except ProviderError:
raise
@@ -134,3 +158,49 @@ class OpenAISentimentProvider:
raise RateLimitError(f"OpenAI rate limit hit for {ticker}") from exc
logger.error("OpenAI provider error for %s: %s", ticker, exc)
raise ProviderError(f"OpenAI provider error for {ticker}: {exc}") from exc
async def fetch_sentiment_batch(self, tickers: list[str]) -> dict[str, SentimentData]:
"""Fetch sentiment for multiple tickers in one OpenAI request.
Returns a map keyed by uppercase ticker symbol. Invalid/missing rows are skipped.
"""
normalized = [t.strip().upper() for t in tickers if t and t.strip()]
if not normalized:
return {}
ticker_context = ",".join(normalized)
try:
response = await self._client.responses.create(
model=self._model,
tools=[{"type": "web_search_preview"}],
instructions="You are a financial sentiment analyst. Always respond with valid JSON only, no markdown fences.",
input=_SENTIMENT_BATCH_PROMPT.format(tickers_csv=", ".join(normalized)),
)
clean = self._extract_raw_text(response, ticker_context)
logger.debug("OpenAI batch raw response for %s: %s", ticker_context, clean)
parsed = json.loads(clean)
if not isinstance(parsed, list):
raise ProviderError("Batch sentiment response must be a JSON array")
out: dict[str, SentimentData] = {}
requested = set(normalized)
for row in parsed:
if not isinstance(row, dict):
continue
symbol = str(row.get("ticker", "")).strip().upper()
if symbol not in requested:
continue
try:
out[symbol] = self._normalize_single_result(row, symbol, citations=[])
except Exception:
continue
return out
except json.JSONDecodeError as exc:
raise ProviderError(f"Invalid batch JSON from OpenAI for {ticker_context}") from exc
except ProviderError:
raise
except Exception as exc:
msg = str(exc).lower()
if "429" in msg or "rate" in msg or "quota" in msg:
raise RateLimitError(f"OpenAI rate limit hit for batch {ticker_context}") from exc
raise ProviderError(f"OpenAI batch provider error for {ticker_context}: {exc}") from exc

View File

@@ -3,7 +3,7 @@
All endpoints require admin role.
"""
from fastapi import APIRouter, Depends
from fastapi import APIRouter, Depends, Query
from sqlalchemy.ext.asyncio import AsyncSession
from app.dependencies import get_db, require_admin
@@ -12,13 +12,16 @@ from app.schemas.admin import (
CreateUserRequest,
DataCleanupRequest,
JobToggle,
RecommendationConfigUpdate,
PasswordReset,
RegistrationToggle,
SystemSettingUpdate,
TickerUniverseUpdate,
UserManagement,
)
from app.schemas.common import APIEnvelope
from app.services import admin_service
from app.services import ticker_universe_service
router = APIRouter(tags=["admin"])
@@ -123,6 +126,47 @@ async def list_settings(
)
@router.get("/admin/settings/recommendations", response_model=APIEnvelope)
async def get_recommendation_settings(
_admin: User = Depends(require_admin),
db: AsyncSession = Depends(get_db),
):
config = await admin_service.get_recommendation_config(db)
return APIEnvelope(status="success", data=config)
@router.put("/admin/settings/recommendations", response_model=APIEnvelope)
async def update_recommendation_settings(
body: RecommendationConfigUpdate,
_admin: User = Depends(require_admin),
db: AsyncSession = Depends(get_db),
):
updated = await admin_service.update_recommendation_config(
db,
body.model_dump(exclude_unset=True),
)
return APIEnvelope(status="success", data=updated)
@router.get("/admin/settings/ticker-universe", response_model=APIEnvelope)
async def get_ticker_universe_setting(
_admin: User = Depends(require_admin),
db: AsyncSession = Depends(get_db),
):
data = await admin_service.get_ticker_universe_default(db)
return APIEnvelope(status="success", data=data)
@router.put("/admin/settings/ticker-universe", response_model=APIEnvelope)
async def update_ticker_universe_setting(
body: TickerUniverseUpdate,
_admin: User = Depends(require_admin),
db: AsyncSession = Depends(get_db),
):
data = await admin_service.update_ticker_universe_default(db, body.universe)
return APIEnvelope(status="success", data=data)
@router.put("/admin/settings/{key}", response_model=APIEnvelope)
async def update_setting(
key: str,
@@ -138,6 +182,21 @@ async def update_setting(
)
@router.post("/admin/tickers/bootstrap", response_model=APIEnvelope)
async def bootstrap_tickers(
universe: str = Query("sp500", pattern="^(sp500|nasdaq100|nasdaq_all)$"),
prune_missing: bool = Query(False),
_admin: User = Depends(require_admin),
db: AsyncSession = Depends(get_db),
):
result = await ticker_universe_service.bootstrap_universe(
db,
universe,
prune_missing=prune_missing,
)
return APIEnvelope(status="success", data=result)
# ---------------------------------------------------------------------------
# Data cleanup
# ---------------------------------------------------------------------------
@@ -167,6 +226,15 @@ async def list_jobs(
return APIEnvelope(status="success", data=jobs)
@router.get("/admin/pipeline/readiness", response_model=APIEnvelope)
async def get_pipeline_readiness(
_admin: User = Depends(require_admin),
db: AsyncSession = Depends(get_db),
):
data = await admin_service.get_pipeline_readiness(db)
return APIEnvelope(status="success", data=data)
@router.post("/admin/jobs/{job_name}/trigger", response_model=APIEnvelope)
async def trigger_job(
job_name: str,

View File

@@ -18,10 +18,17 @@ from app.dependencies import get_db, require_access
from app.exceptions import ProviderError
from app.models.user import User
from app.providers.alpaca import AlpacaOHLCVProvider
from app.providers.fmp import FMPFundamentalProvider
from app.providers.fundamentals_chain import build_fundamental_provider_chain
from app.providers.openai_sentiment import OpenAISentimentProvider
from app.services.rr_scanner_service import scan_ticker
from app.schemas.common import APIEnvelope
from app.services import fundamental_service, ingestion_service, sentiment_service
from app.services import (
fundamental_service,
ingestion_service,
scoring_service,
sentiment_service,
sr_service,
)
logger = logging.getLogger(__name__)
@@ -99,10 +106,10 @@ async def fetch_symbol(
}
# --- Fundamentals ---
if settings.fmp_api_key:
if settings.fmp_api_key or settings.finnhub_api_key or settings.alpha_vantage_api_key:
try:
fmp_provider = FMPFundamentalProvider(settings.fmp_api_key)
fdata = await fmp_provider.fetch_fundamentals(symbol_upper)
fundamentals_provider = build_fundamental_provider_chain()
fdata = await fundamentals_provider.fetch_fundamentals(symbol_upper)
await fundamental_service.store_fundamental(
db,
symbol=symbol_upper,
@@ -119,9 +126,50 @@ async def fetch_symbol(
else:
sources["fundamentals"] = {
"status": "skipped",
"message": "FMP API key not configured",
"message": "No fundamentals provider key configured",
}
# --- Derived pipeline: S/R levels ---
try:
levels = await sr_service.recalculate_sr_levels(db, symbol_upper)
sources["sr_levels"] = {
"status": "ok",
"count": len(levels),
"message": None,
}
except Exception as exc:
logger.error("S/R recalc failed for %s: %s", symbol_upper, exc)
sources["sr_levels"] = {"status": "error", "message": str(exc)}
# --- Derived pipeline: scores ---
try:
score_payload = await scoring_service.get_score(db, symbol_upper)
sources["scores"] = {
"status": "ok",
"composite_score": score_payload.get("composite_score"),
"missing_dimensions": score_payload.get("missing_dimensions", []),
"message": None,
}
except Exception as exc:
logger.error("Score recompute failed for %s: %s", symbol_upper, exc)
sources["scores"] = {"status": "error", "message": str(exc)}
# --- Derived pipeline: scanner ---
try:
setups = await scan_ticker(
db,
symbol_upper,
rr_threshold=settings.default_rr_threshold,
)
sources["scanner"] = {
"status": "ok",
"setups_found": len(setups),
"message": None,
}
except Exception as exc:
logger.error("Scanner run failed for %s: %s", symbol_upper, exc)
sources["scanner"] = {"status": "error", "message": str(exc)}
# Always return success — per-source breakdown tells the full story
return APIEnvelope(
status="success",

View File

@@ -5,8 +5,8 @@ from sqlalchemy.ext.asyncio import AsyncSession
from app.dependencies import get_db, require_access
from app.schemas.common import APIEnvelope
from app.schemas.trade_setup import TradeSetupResponse
from app.services.rr_scanner_service import get_trade_setups
from app.schemas.trade_setup import RecommendationSummaryResponse, TradeSetupResponse
from app.services.rr_scanner_service import get_trade_setup_history, get_trade_setups
router = APIRouter(tags=["trades"])
@@ -16,13 +16,73 @@ async def list_trade_setups(
direction: str | None = Query(
None, description="Filter by direction: long or short"
),
min_confidence: float | None = Query(
None, ge=0, le=100, description="Minimum confidence score"
),
recommended_action: str | None = Query(
None,
description="Filter by action: LONG_HIGH, LONG_MODERATE, SHORT_HIGH, SHORT_MODERATE, NEUTRAL",
),
_user=Depends(require_access),
db: AsyncSession = Depends(get_db),
) -> APIEnvelope:
"""Get all trade setups sorted by R:R desc, secondary composite desc.
"""Get latest trade setups with recommendation data."""
rows = await get_trade_setups(
db,
direction=direction,
min_confidence=min_confidence,
recommended_action=recommended_action,
)
data = []
for row in rows:
summary = RecommendationSummaryResponse(
action=row.get("recommended_action") or "NEUTRAL",
reasoning=row.get("reasoning"),
risk_level=row.get("risk_level"),
composite_score=row["composite_score"],
)
payload = {**row, "recommendation_summary": summary}
data.append(TradeSetupResponse(**payload).model_dump(mode="json"))
Optional direction filter (long/short).
"""
rows = await get_trade_setups(db, direction=direction)
data = [TradeSetupResponse(**r).model_dump(mode="json") for r in rows]
return APIEnvelope(status="success", data=data)
@router.get("/trades/{symbol}", response_model=APIEnvelope)
async def get_ticker_trade_setups(
symbol: str,
_user=Depends(require_access),
db: AsyncSession = Depends(get_db),
) -> APIEnvelope:
rows = await get_trade_setups(db, symbol=symbol)
data = []
for row in rows:
summary = RecommendationSummaryResponse(
action=row.get("recommended_action") or "NEUTRAL",
reasoning=row.get("reasoning"),
risk_level=row.get("risk_level"),
composite_score=row["composite_score"],
)
payload = {**row, "recommendation_summary": summary}
data.append(TradeSetupResponse(**payload).model_dump(mode="json"))
return APIEnvelope(status="success", data=data)
@router.get("/trades/{symbol}/history", response_model=APIEnvelope)
async def get_ticker_trade_history(
symbol: str,
_user=Depends(require_access),
db: AsyncSession = Depends(get_db),
) -> APIEnvelope:
rows = await get_trade_setup_history(db, symbol=symbol)
data = []
for row in rows:
summary = RecommendationSummaryResponse(
action=row.get("recommended_action") or "NEUTRAL",
reasoning=row.get("reasoning"),
risk_level=row.get("risk_level"),
composite_score=row["composite_score"],
)
payload = {**row, "recommendation_summary": summary}
data.append(TradeSetupResponse(**payload).model_dump(mode="json"))
return APIEnvelope(status="success", data=data)

View File

@@ -15,21 +15,27 @@ from __future__ import annotations
import json
import logging
from datetime import date, timedelta
import asyncio
from datetime import date, datetime, timedelta, timezone
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from sqlalchemy import select
from sqlalchemy import case, func, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import settings
from app.database import async_session_factory
from app.models.fundamental import FundamentalData
from app.models.ohlcv import OHLCVRecord
from app.models.settings import SystemSetting
from app.models.sentiment import SentimentScore
from app.models.ticker import Ticker
from app.providers.alpaca import AlpacaOHLCVProvider
from app.providers.fmp import FMPFundamentalProvider
from app.providers.fundamentals_chain import build_fundamental_provider_chain
from app.providers.openai_sentiment import OpenAISentimentProvider
from app.providers.protocol import SentimentData
from app.services import fundamental_service, ingestion_service, sentiment_service
from app.services.rr_scanner_service import scan_all_tickers
from app.services.ticker_universe_service import bootstrap_universe
logger = logging.getLogger(__name__)
@@ -43,6 +49,64 @@ _last_successful: dict[str, str | None] = {
"fundamental_collector": None,
}
_job_runtime: dict[str, dict[str, object]] = {
"data_collector": {
"running": False,
"status": "idle",
"processed": 0,
"total": None,
"progress_pct": None,
"current_ticker": None,
"started_at": None,
"finished_at": None,
"message": None,
},
"sentiment_collector": {
"running": False,
"status": "idle",
"processed": 0,
"total": None,
"progress_pct": None,
"current_ticker": None,
"started_at": None,
"finished_at": None,
"message": None,
},
"fundamental_collector": {
"running": False,
"status": "idle",
"processed": 0,
"total": None,
"progress_pct": None,
"current_ticker": None,
"started_at": None,
"finished_at": None,
"message": None,
},
"rr_scanner": {
"running": False,
"status": "idle",
"processed": 0,
"total": None,
"progress_pct": None,
"current_ticker": None,
"started_at": None,
"finished_at": None,
"message": None,
},
"ticker_universe_sync": {
"running": False,
"status": "idle",
"processed": 0,
"total": None,
"progress_pct": None,
"current_ticker": None,
"started_at": None,
"finished_at": None,
"message": None,
},
}
# ---------------------------------------------------------------------------
# Helpers
@@ -62,6 +126,71 @@ def _log_job_error(job_name: str, ticker: str, error: Exception) -> None:
)
def _runtime_start(job_name: str, total: int | None = None, message: str | None = None) -> None:
now = datetime.now(timezone.utc).isoformat()
_job_runtime[job_name] = {
"running": True,
"status": "running",
"processed": 0,
"total": total,
"progress_pct": 0.0 if total and total > 0 else None,
"current_ticker": None,
"started_at": now,
"finished_at": None,
"message": message,
}
def _runtime_progress(
job_name: str,
processed: int,
total: int | None,
current_ticker: str | None = None,
message: str | None = None,
) -> None:
progress_pct: float | None = None
if total and total > 0:
progress_pct = round((processed / total) * 100.0, 1)
runtime = _job_runtime.get(job_name, {})
runtime.update({
"running": True,
"status": "running",
"processed": processed,
"total": total,
"progress_pct": progress_pct,
"current_ticker": current_ticker,
"message": message,
})
_job_runtime[job_name] = runtime
def _runtime_finish(
job_name: str,
status: str,
processed: int,
total: int | None,
message: str | None = None,
) -> None:
runtime = _job_runtime.get(job_name, {})
runtime.update({
"running": False,
"status": status,
"processed": processed,
"total": total,
"progress_pct": 100.0 if total and processed >= total else runtime.get("progress_pct"),
"current_ticker": None,
"finished_at": datetime.now(timezone.utc).isoformat(),
"message": message,
})
_job_runtime[job_name] = runtime
def get_job_runtime_snapshot(job_name: str | None = None) -> dict[str, dict[str, object]] | dict[str, object]:
if job_name is not None:
return dict(_job_runtime.get(job_name, {}))
return {name: dict(meta) for name, meta in _job_runtime.items()}
async def _is_job_enabled(db: AsyncSession, job_name: str) -> bool:
"""Check SystemSetting for job enabled state. Defaults to True."""
key = f"job_{job_name}_enabled"
@@ -80,6 +209,61 @@ async def _get_all_tickers(db: AsyncSession) -> list[str]:
return list(result.scalars().all())
async def _get_ohlcv_priority_tickers(db: AsyncSession) -> list[str]:
"""Return symbols prioritized for OHLCV collection.
Priority:
1) Tickers with no OHLCV bars
2) Tickers with data, oldest latest OHLCV date first
3) Alphabetical tiebreaker
"""
latest_date = func.max(OHLCVRecord.date)
missing_first = case((latest_date.is_(None), 0), else_=1)
result = await db.execute(
select(Ticker.symbol)
.outerjoin(OHLCVRecord, OHLCVRecord.ticker_id == Ticker.id)
.group_by(Ticker.id, Ticker.symbol)
.order_by(missing_first.asc(), latest_date.asc(), Ticker.symbol.asc())
)
return list(result.scalars().all())
async def _get_sentiment_priority_tickers(db: AsyncSession) -> list[str]:
"""Return symbols prioritized for sentiment collection.
Priority:
1) Tickers with no sentiment records
2) Tickers with records, oldest latest sentiment timestamp first
3) Alphabetical tiebreaker
"""
latest_ts = func.max(SentimentScore.timestamp)
missing_first = case((latest_ts.is_(None), 0), else_=1)
result = await db.execute(
select(Ticker.symbol)
.outerjoin(SentimentScore, SentimentScore.ticker_id == Ticker.id)
.group_by(Ticker.id, Ticker.symbol)
.order_by(missing_first.asc(), latest_ts.asc(), Ticker.symbol.asc())
)
return list(result.scalars().all())
async def _get_fundamental_priority_tickers(db: AsyncSession) -> list[str]:
"""Return symbols prioritized for fundamentals refresh.
Priority:
1) Tickers with no fundamentals snapshot yet
2) Tickers with existing fundamentals, oldest fetched_at first
3) Alphabetical tiebreaker
"""
missing_first = case((FundamentalData.fetched_at.is_(None), 0), else_=1)
result = await db.execute(
select(Ticker.symbol)
.outerjoin(FundamentalData, FundamentalData.ticker_id == Ticker.id)
.order_by(missing_first.asc(), FundamentalData.fetched_at.asc(), Ticker.symbol.asc())
)
return list(result.scalars().all())
def _resume_tickers(symbols: list[str], job_name: str) -> list[str]:
"""Reorder tickers to resume after the last successful one (rate-limit resume).
@@ -94,6 +278,11 @@ def _resume_tickers(symbols: list[str], job_name: str) -> list[str]:
return symbols[idx + 1:] + symbols[:idx + 1]
def _chunked(symbols: list[str], chunk_size: int) -> list[list[str]]:
size = max(1, chunk_size)
return [symbols[i:i + size] for i in range(0, len(symbols), size)]
# ---------------------------------------------------------------------------
# Job: Data Collector (OHLCV)
# ---------------------------------------------------------------------------
@@ -104,46 +293,57 @@ async def collect_ohlcv() -> None:
Uses AlpacaOHLCVProvider. Processes each ticker independently.
On rate limit, records last successful ticker for resume.
Start date is resolved by ingestion progress:
- existing ticker: resume from last_ingested_date + 1
- new ticker: backfill ~1 year by default
"""
job_name = "data_collector"
logger.info(json.dumps({"event": "job_start", "job": job_name}))
_runtime_start(job_name)
processed = 0
total: int | None = None
try:
async with async_session_factory() as db:
if not await _is_job_enabled(db, job_name):
logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"}))
_runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled")
return
symbols = await _get_all_tickers(db)
symbols = await _get_ohlcv_priority_tickers(db)
if not symbols:
logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0}))
_runtime_finish(job_name, "completed", processed=0, total=0, message="No tickers")
return
# Reorder for rate-limit resume
symbols = _resume_tickers(symbols, job_name)
total = len(symbols)
_runtime_progress(job_name, processed=0, total=total)
# Build provider (skip if keys not configured)
if not settings.alpaca_api_key or not settings.alpaca_api_secret:
logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "alpaca keys not configured"}))
_runtime_finish(job_name, "skipped", processed=0, total=total, message="Alpaca keys not configured")
return
try:
provider = AlpacaOHLCVProvider(settings.alpaca_api_key, settings.alpaca_api_secret)
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=0, total=total, message=str(exc))
return
end_date = date.today()
start_date = end_date - timedelta(days=5) # Fetch last 5 days to catch up
processed = 0
for symbol in symbols:
_runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol)
async with async_session_factory() as db:
try:
result = await ingestion_service.fetch_and_ingest(
db, provider, symbol, start_date=start_date, end_date=end_date,
db, provider, symbol, start_date=None, end_date=end_date,
)
_last_successful[job_name] = symbol
processed += 1
_runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol)
logger.info(json.dumps({
"event": "ticker_collected",
"job": job_name,
@@ -159,6 +359,7 @@ async def collect_ohlcv() -> None:
"ticker": symbol,
"processed": processed,
}))
_runtime_finish(job_name, "rate_limited", processed=processed, total=total, message=f"Rate limited at {symbol}")
return
except Exception as exc:
_log_job_error(job_name, symbol, exc)
@@ -166,6 +367,10 @@ async def collect_ohlcv() -> None:
# Reset resume pointer on full completion
_last_successful[job_name] = None
logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed}))
_runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Processed {processed} tickers")
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc))
# ---------------------------------------------------------------------------
@@ -181,35 +386,90 @@ async def collect_sentiment() -> None:
"""
job_name = "sentiment_collector"
logger.info(json.dumps({"event": "job_start", "job": job_name}))
_runtime_start(job_name)
processed = 0
total: int | None = None
try:
async with async_session_factory() as db:
if not await _is_job_enabled(db, job_name):
logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"}))
_runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled")
return
symbols = await _get_all_tickers(db)
symbols = await _get_sentiment_priority_tickers(db)
if not symbols:
logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0}))
_runtime_finish(job_name, "completed", processed=0, total=0, message="No tickers")
return
symbols = _resume_tickers(symbols, job_name)
total = len(symbols)
_runtime_progress(job_name, processed=0, total=total)
if not settings.openai_api_key:
logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "openai key not configured"}))
_runtime_finish(job_name, "skipped", processed=0, total=total, message="OpenAI key not configured")
return
try:
provider = OpenAISentimentProvider(settings.openai_api_key, settings.openai_model)
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=0, total=total, message=str(exc))
return
processed = 0
batch_size = max(1, settings.openai_sentiment_batch_size)
batches = _chunked(symbols, batch_size)
for symbol in symbols:
async with async_session_factory() as db:
for batch in batches:
current_hint = batch[0] if len(batch) == 1 else f"{batch[0]} (+{len(batch) - 1})"
_runtime_progress(job_name, processed=processed, total=total, current_ticker=current_hint)
batch_results: dict[str, SentimentData] = {}
if len(batch) > 1 and hasattr(provider, "fetch_sentiment_batch"):
try:
batch_results = await provider.fetch_sentiment_batch(batch)
except Exception as exc:
msg = str(exc).lower()
if "rate" in msg or "quota" in msg or "429" in msg:
logger.warning(json.dumps({
"event": "rate_limited",
"job": job_name,
"ticker": batch[0],
"processed": processed,
}))
_runtime_finish(job_name, "rate_limited", processed=processed, total=total, message=f"Rate limited at {batch[0]}")
return
logger.warning(json.dumps({
"event": "batch_fallback",
"job": job_name,
"batch": batch,
"reason": str(exc),
}))
for symbol in batch:
_runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol)
data = batch_results.get(symbol) if batch_results else None
if data is None:
try:
data = await provider.fetch_sentiment(symbol)
except Exception as exc:
msg = str(exc).lower()
if "rate" in msg or "quota" in msg or "429" in msg:
logger.warning(json.dumps({
"event": "rate_limited",
"job": job_name,
"ticker": symbol,
"processed": processed,
}))
_runtime_finish(job_name, "rate_limited", processed=processed, total=total, message=f"Rate limited at {symbol}")
return
_log_job_error(job_name, symbol, exc)
continue
async with async_session_factory() as db:
try:
await sentiment_service.store_sentiment(
db,
symbol=symbol,
@@ -222,6 +482,7 @@ async def collect_sentiment() -> None:
)
_last_successful[job_name] = symbol
processed += 1
_runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol)
logger.info(json.dumps({
"event": "ticker_collected",
"job": job_name,
@@ -230,19 +491,14 @@ async def collect_sentiment() -> None:
"confidence": data.confidence,
}))
except Exception as exc:
msg = str(exc).lower()
if "rate" in msg or "quota" in msg or "429" in msg:
logger.warning(json.dumps({
"event": "rate_limited",
"job": job_name,
"ticker": symbol,
"processed": processed,
}))
return
_log_job_error(job_name, symbol, exc)
_last_successful[job_name] = None
logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed}))
_runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Processed {processed} tickers")
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc))
# ---------------------------------------------------------------------------
@@ -258,35 +514,48 @@ async def collect_fundamentals() -> None:
"""
job_name = "fundamental_collector"
logger.info(json.dumps({"event": "job_start", "job": job_name}))
_runtime_start(job_name)
processed = 0
total: int | None = None
try:
async with async_session_factory() as db:
if not await _is_job_enabled(db, job_name):
logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"}))
_runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled")
return
symbols = await _get_all_tickers(db)
symbols = await _get_fundamental_priority_tickers(db)
if not symbols:
logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0}))
_runtime_finish(job_name, "completed", processed=0, total=0, message="No tickers")
return
symbols = _resume_tickers(symbols, job_name)
total = len(symbols)
_runtime_progress(job_name, processed=0, total=total)
if not settings.fmp_api_key:
logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "fmp key not configured"}))
if not (settings.fmp_api_key or settings.finnhub_api_key or settings.alpha_vantage_api_key):
logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "no fundamentals provider keys configured"}))
_runtime_finish(job_name, "skipped", processed=0, total=total, message="No fundamentals provider keys configured")
return
try:
provider = FMPFundamentalProvider(settings.fmp_api_key)
provider = build_fundamental_provider_chain()
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=0, total=total, message=str(exc))
return
processed = 0
max_retries = max(0, settings.fundamental_rate_limit_retries)
base_backoff = max(1, settings.fundamental_rate_limit_backoff_seconds)
for symbol in symbols:
async with async_session_factory() as db:
_runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol)
attempt = 0
while True:
try:
data = await provider.fetch_fundamentals(symbol)
async with async_session_factory() as db:
await fundamental_service.store_fundamental(
db,
symbol=symbol,
@@ -298,25 +567,61 @@ async def collect_fundamentals() -> None:
)
_last_successful[job_name] = symbol
processed += 1
_runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol)
logger.info(json.dumps({
"event": "ticker_collected",
"job": job_name,
"ticker": symbol,
}))
break
except Exception as exc:
msg = str(exc).lower()
if "rate" in msg or "429" in msg:
if attempt < max_retries:
wait_seconds = base_backoff * (2 ** attempt)
attempt += 1
logger.warning(json.dumps({
"event": "rate_limited_retry",
"job": job_name,
"ticker": symbol,
"attempt": attempt,
"max_retries": max_retries,
"wait_seconds": wait_seconds,
"processed": processed,
}))
_runtime_progress(
job_name,
processed=processed,
total=total,
current_ticker=symbol,
message=f"Rate-limited at {symbol}; retry {attempt}/{max_retries} in {wait_seconds}s",
)
await asyncio.sleep(wait_seconds)
continue
logger.warning(json.dumps({
"event": "rate_limited",
"job": job_name,
"ticker": symbol,
"processed": processed,
}))
_runtime_finish(
job_name,
"rate_limited",
processed=processed,
total=total,
message=f"Rate limited at {symbol} after {attempt} retries",
)
return
_log_job_error(job_name, symbol, exc)
break
_last_successful[job_name] = None
logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed}))
_runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Processed {processed} tickers")
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc))
# ---------------------------------------------------------------------------
@@ -332,22 +637,84 @@ async def scan_rr() -> None:
"""
job_name = "rr_scanner"
logger.info(json.dumps({"event": "job_start", "job": job_name}))
_runtime_start(job_name)
processed = 0
total: int | None = None
try:
async with async_session_factory() as db:
if not await _is_job_enabled(db, job_name):
logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"}))
_runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled")
return
symbols = await _get_all_tickers(db)
total = len(symbols)
_runtime_progress(job_name, processed=0, total=total)
try:
setups = await scan_all_tickers(
db, rr_threshold=settings.default_rr_threshold,
)
processed = total or 0
_runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Found {len(setups)} setups")
logger.info(json.dumps({
"event": "job_complete",
"job": job_name,
"setups_found": len(setups),
}))
except Exception as exc:
_runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc))
logger.error(json.dumps({
"event": "job_error",
"job": job_name,
"error_type": type(exc).__name__,
"message": str(exc),
}))
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
_runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc))
# ---------------------------------------------------------------------------
# Job: Ticker Universe Sync
# ---------------------------------------------------------------------------
async def sync_ticker_universe() -> None:
"""Sync tracked tickers from configured default universe.
Setting key: ticker_universe_default (sp500 | nasdaq100 | nasdaq_all)
"""
job_name = "ticker_universe_sync"
logger.info(json.dumps({"event": "job_start", "job": job_name}))
_runtime_start(job_name, total=1)
try:
async with async_session_factory() as db:
if not await _is_job_enabled(db, job_name):
logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"}))
_runtime_finish(job_name, "skipped", processed=0, total=1, message="Disabled")
return
result = await db.execute(
select(SystemSetting).where(SystemSetting.key == "ticker_universe_default")
)
setting = result.scalar_one_or_none()
universe = (setting.value if setting else "sp500").strip().lower()
async with async_session_factory() as db:
summary = await bootstrap_universe(db, universe, prune_missing=False)
_runtime_progress(job_name, processed=1, total=1)
_runtime_finish(job_name, "completed", processed=1, total=1, message=f"Synced {universe}")
logger.info(json.dumps({
"event": "job_complete",
"job": job_name,
"universe": universe,
"summary": summary,
}))
except Exception as exc:
_runtime_finish(job_name, "error", processed=0, total=1, message=str(exc))
logger.error(json.dumps({
"event": "job_error",
"job": job_name,
@@ -427,6 +794,16 @@ def configure_scheduler() -> None:
replace_existing=True,
)
# Universe Sync — nightly
scheduler.add_job(
sync_ticker_universe,
"interval",
hours=24,
id="ticker_universe_sync",
name="Ticker Universe Sync",
replace_existing=True,
)
logger.info(
json.dumps({
"event": "scheduler_configured",
@@ -435,6 +812,7 @@ def configure_scheduler() -> None:
"sentiment_collector": {"minutes": settings.sentiment_poll_interval_minutes},
"fundamental_collector": fund_interval,
"rr_scanner": rr_interval,
"ticker_universe_sync": {"hours": 24},
},
})
)

View File

@@ -1,5 +1,7 @@
"""Admin request/response schemas."""
from typing import Literal
from pydantic import BaseModel, Field
@@ -39,3 +41,18 @@ class DataCleanupRequest(BaseModel):
class JobToggle(BaseModel):
"""Schema for enabling/disabling a scheduled job."""
enabled: bool
class RecommendationConfigUpdate(BaseModel):
high_confidence_threshold: float | None = Field(default=None, ge=0, le=100)
moderate_confidence_threshold: float | None = Field(default=None, ge=0, le=100)
confidence_diff_threshold: float | None = Field(default=None, ge=0, le=100)
signal_alignment_weight: float | None = Field(default=None, ge=0, le=1)
sr_strength_weight: float | None = Field(default=None, ge=0, le=1)
distance_penalty_factor: float | None = Field(default=None, ge=0, le=1)
momentum_technical_divergence_threshold: float | None = Field(default=None, ge=0, le=100)
fundamental_technical_divergence_threshold: float | None = Field(default=None, ge=0, le=100)
class TickerUniverseUpdate(BaseModel):
universe: Literal["sp500", "nasdaq100", "nasdaq_all"]

View File

@@ -4,7 +4,25 @@ from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel
from pydantic import BaseModel, Field
class TradeTargetResponse(BaseModel):
price: float
distance_from_entry: float
distance_atr_multiple: float
rr_ratio: float
probability: float
classification: str
sr_level_id: int
sr_strength: float
class RecommendationSummaryResponse(BaseModel):
action: str
reasoning: str | None
risk_level: str | None
composite_score: float
class TradeSetupResponse(BaseModel):
@@ -19,3 +37,11 @@ class TradeSetupResponse(BaseModel):
rr_ratio: float
composite_score: float
detected_at: datetime
confidence_score: float | None = None
targets: list[TradeTargetResponse] = Field(default_factory=list)
conflict_flags: list[str] = Field(default_factory=list)
recommended_action: str | None = None
reasoning: str | None = None
risk_level: str | None = None
actual_outcome: str | None = None
recommendation_summary: RecommendationSummaryResponse | None = None

View File

@@ -3,16 +3,34 @@
from datetime import datetime, timedelta, timezone
from passlib.hash import bcrypt
from sqlalchemy import delete, select
from sqlalchemy import delete, func, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.exceptions import DuplicateError, NotFoundError, ValidationError
from app.models.fundamental import FundamentalData
from app.models.ohlcv import OHLCVRecord
from app.models.score import CompositeScore, DimensionScore
from app.models.sentiment import SentimentScore
from app.models.sr_level import SRLevel
from app.models.settings import SystemSetting
from app.models.ticker import Ticker
from app.models.trade_setup import TradeSetup
from app.models.user import User
RECOMMENDATION_CONFIG_DEFAULTS: dict[str, float] = {
"recommendation_high_confidence_threshold": 70.0,
"recommendation_moderate_confidence_threshold": 50.0,
"recommendation_confidence_diff_threshold": 20.0,
"recommendation_signal_alignment_weight": 0.15,
"recommendation_sr_strength_weight": 0.20,
"recommendation_distance_penalty_factor": 0.10,
"recommendation_momentum_technical_divergence_threshold": 30.0,
"recommendation_fundamental_technical_divergence_threshold": 40.0,
}
DEFAULT_TICKER_UNIVERSE = "sp500"
SUPPORTED_TICKER_UNIVERSES = {"sp500", "nasdaq100", "nasdaq_all"}
# ---------------------------------------------------------------------------
# User management
@@ -125,6 +143,67 @@ async def update_setting(db: AsyncSession, key: str, value: str) -> SystemSettin
return setting
def _recommendation_public_to_storage_key(key: str) -> str:
return f"recommendation_{key}"
async def get_recommendation_config(db: AsyncSession) -> dict[str, float]:
result = await db.execute(
select(SystemSetting).where(SystemSetting.key.like("recommendation_%"))
)
rows = result.scalars().all()
config = dict(RECOMMENDATION_CONFIG_DEFAULTS)
for row in rows:
try:
config[row.key] = float(row.value)
except (TypeError, ValueError):
continue
return {
"high_confidence_threshold": config["recommendation_high_confidence_threshold"],
"moderate_confidence_threshold": config["recommendation_moderate_confidence_threshold"],
"confidence_diff_threshold": config["recommendation_confidence_diff_threshold"],
"signal_alignment_weight": config["recommendation_signal_alignment_weight"],
"sr_strength_weight": config["recommendation_sr_strength_weight"],
"distance_penalty_factor": config["recommendation_distance_penalty_factor"],
"momentum_technical_divergence_threshold": config["recommendation_momentum_technical_divergence_threshold"],
"fundamental_technical_divergence_threshold": config["recommendation_fundamental_technical_divergence_threshold"],
}
async def update_recommendation_config(
db: AsyncSession,
payload: dict[str, float],
) -> dict[str, float]:
for public_key, public_value in payload.items():
storage_key = _recommendation_public_to_storage_key(public_key)
await update_setting(db, storage_key, str(public_value))
return await get_recommendation_config(db)
async def get_ticker_universe_default(db: AsyncSession) -> dict[str, str]:
result = await db.execute(
select(SystemSetting).where(SystemSetting.key == "ticker_universe_default")
)
setting = result.scalar_one_or_none()
universe = setting.value if setting else DEFAULT_TICKER_UNIVERSE
if universe not in SUPPORTED_TICKER_UNIVERSES:
universe = DEFAULT_TICKER_UNIVERSE
return {"universe": universe}
async def update_ticker_universe_default(db: AsyncSession, universe: str) -> dict[str, str]:
normalised = universe.strip().lower()
if normalised not in SUPPORTED_TICKER_UNIVERSES:
supported = ", ".join(sorted(SUPPORTED_TICKER_UNIVERSES))
raise ValidationError(f"Unsupported ticker universe '{universe}'. Supported: {supported}")
await update_setting(db, "ticker_universe_default", normalised)
return {"universe": normalised}
# ---------------------------------------------------------------------------
# Data cleanup
# ---------------------------------------------------------------------------
@@ -160,23 +239,181 @@ async def cleanup_data(db: AsyncSession, older_than_days: int) -> dict[str, int]
return counts
async def get_pipeline_readiness(db: AsyncSession) -> list[dict]:
"""Return per-ticker readiness snapshot for ingestion/scoring/scanner pipeline."""
tickers_result = await db.execute(select(Ticker).order_by(Ticker.symbol.asc()))
tickers = list(tickers_result.scalars().all())
if not tickers:
return []
ticker_ids = [ticker.id for ticker in tickers]
ohlcv_stats_result = await db.execute(
select(
OHLCVRecord.ticker_id,
func.count(OHLCVRecord.id),
func.max(OHLCVRecord.date),
)
.where(OHLCVRecord.ticker_id.in_(ticker_ids))
.group_by(OHLCVRecord.ticker_id)
)
ohlcv_stats = {
ticker_id: {
"bars": int(count or 0),
"last_date": max_date.isoformat() if max_date else None,
}
for ticker_id, count, max_date in ohlcv_stats_result.all()
}
dim_rows_result = await db.execute(
select(DimensionScore).where(DimensionScore.ticker_id.in_(ticker_ids))
)
dim_map_by_ticker: dict[int, dict[str, tuple[float | None, bool]]] = {}
for row in dim_rows_result.scalars().all():
dim_map_by_ticker.setdefault(row.ticker_id, {})[row.dimension] = (row.score, row.is_stale)
sr_counts_result = await db.execute(
select(SRLevel.ticker_id, func.count(SRLevel.id))
.where(SRLevel.ticker_id.in_(ticker_ids))
.group_by(SRLevel.ticker_id)
)
sr_counts = {ticker_id: int(count or 0) for ticker_id, count in sr_counts_result.all()}
sentiment_stats_result = await db.execute(
select(
SentimentScore.ticker_id,
func.count(SentimentScore.id),
func.max(SentimentScore.timestamp),
)
.where(SentimentScore.ticker_id.in_(ticker_ids))
.group_by(SentimentScore.ticker_id)
)
sentiment_stats = {
ticker_id: {
"count": int(count or 0),
"last_at": max_ts.isoformat() if max_ts else None,
}
for ticker_id, count, max_ts in sentiment_stats_result.all()
}
fundamentals_result = await db.execute(
select(FundamentalData.ticker_id, FundamentalData.fetched_at)
.where(FundamentalData.ticker_id.in_(ticker_ids))
)
fundamentals_map = {
ticker_id: fetched_at.isoformat() if fetched_at else None
for ticker_id, fetched_at in fundamentals_result.all()
}
composites_result = await db.execute(
select(CompositeScore.ticker_id, CompositeScore.is_stale)
.where(CompositeScore.ticker_id.in_(ticker_ids))
)
composites_map = {
ticker_id: is_stale
for ticker_id, is_stale in composites_result.all()
}
setup_counts_result = await db.execute(
select(TradeSetup.ticker_id, func.count(TradeSetup.id))
.where(TradeSetup.ticker_id.in_(ticker_ids))
.group_by(TradeSetup.ticker_id)
)
setup_counts = {ticker_id: int(count or 0) for ticker_id, count in setup_counts_result.all()}
readiness: list[dict] = []
for ticker in tickers:
ohlcv = ohlcv_stats.get(ticker.id, {"bars": 0, "last_date": None})
ohlcv_bars = int(ohlcv["bars"])
ohlcv_last_date = ohlcv["last_date"]
dim_map = dim_map_by_ticker.get(ticker.id, {})
sr_count = int(sr_counts.get(ticker.id, 0))
sentiment = sentiment_stats.get(ticker.id, {"count": 0, "last_at": None})
sentiment_count = int(sentiment["count"])
sentiment_last_at = sentiment["last_at"]
fundamentals_fetched_at = fundamentals_map.get(ticker.id)
has_fundamentals = ticker.id in fundamentals_map
has_composite = ticker.id in composites_map
composite_stale = composites_map.get(ticker.id)
setup_count = int(setup_counts.get(ticker.id, 0))
missing_reasons: list[str] = []
if ohlcv_bars < 30:
missing_reasons.append("insufficient_ohlcv_bars(<30)")
if "technical" not in dim_map or dim_map["technical"][0] is None:
missing_reasons.append("missing_technical")
if "momentum" not in dim_map or dim_map["momentum"][0] is None:
missing_reasons.append("missing_momentum")
if "sr_quality" not in dim_map or dim_map["sr_quality"][0] is None:
missing_reasons.append("missing_sr_quality")
if sentiment_count == 0:
missing_reasons.append("missing_sentiment")
if not has_fundamentals:
missing_reasons.append("missing_fundamentals")
if not has_composite:
missing_reasons.append("missing_composite")
if setup_count == 0:
missing_reasons.append("missing_trade_setup")
readiness.append(
{
"symbol": ticker.symbol,
"ohlcv_bars": ohlcv_bars,
"ohlcv_last_date": ohlcv_last_date,
"dimensions": {
"technical": dim_map.get("technical", (None, True))[0],
"sr_quality": dim_map.get("sr_quality", (None, True))[0],
"sentiment": dim_map.get("sentiment", (None, True))[0],
"fundamental": dim_map.get("fundamental", (None, True))[0],
"momentum": dim_map.get("momentum", (None, True))[0],
},
"sentiment_count": sentiment_count,
"sentiment_last_at": sentiment_last_at,
"has_fundamentals": has_fundamentals,
"fundamentals_fetched_at": fundamentals_fetched_at,
"sr_level_count": sr_count,
"has_composite": has_composite,
"composite_stale": composite_stale,
"trade_setup_count": setup_count,
"missing_reasons": missing_reasons,
"ready_for_scanner": ohlcv_bars >= 15 and sr_count > 0,
}
)
return readiness
# ---------------------------------------------------------------------------
# Job control (placeholder — scheduler is Task 12.1)
# ---------------------------------------------------------------------------
VALID_JOB_NAMES = {"data_collector", "sentiment_collector", "fundamental_collector", "rr_scanner"}
VALID_JOB_NAMES = {
"data_collector",
"sentiment_collector",
"fundamental_collector",
"rr_scanner",
"ticker_universe_sync",
}
JOB_LABELS = {
"data_collector": "Data Collector (OHLCV)",
"sentiment_collector": "Sentiment Collector",
"fundamental_collector": "Fundamental Collector",
"rr_scanner": "R:R Scanner",
"ticker_universe_sync": "Ticker Universe Sync",
}
async def list_jobs(db: AsyncSession) -> list[dict]:
"""Return status of all scheduled jobs."""
from app.scheduler import scheduler
from app.scheduler import get_job_runtime_snapshot, scheduler
jobs_out = []
for name in sorted(VALID_JOB_NAMES):
@@ -194,12 +431,23 @@ async def list_jobs(db: AsyncSession) -> list[dict]:
if job and job.next_run_time:
next_run = job.next_run_time.isoformat()
runtime = get_job_runtime_snapshot(name)
jobs_out.append({
"name": name,
"label": JOB_LABELS.get(name, name),
"enabled": enabled,
"next_run_at": next_run,
"registered": job is not None,
"running": bool(runtime.get("running", False)),
"runtime_status": runtime.get("status"),
"runtime_processed": runtime.get("processed"),
"runtime_total": runtime.get("total"),
"runtime_progress_pct": runtime.get("progress_pct"),
"runtime_current_ticker": runtime.get("current_ticker"),
"runtime_started_at": runtime.get("started_at"),
"runtime_finished_at": runtime.get("finished_at"),
"runtime_message": runtime.get("message"),
})
return jobs_out
@@ -213,7 +461,26 @@ async def trigger_job(db: AsyncSession, job_name: str) -> dict[str, str]:
if job_name not in VALID_JOB_NAMES:
raise ValidationError(f"Unknown job: {job_name}. Valid jobs: {', '.join(sorted(VALID_JOB_NAMES))}")
from app.scheduler import scheduler
from app.scheduler import get_job_runtime_snapshot, scheduler
runtime_target = get_job_runtime_snapshot(job_name)
if runtime_target.get("running"):
return {
"job": job_name,
"status": "busy",
"message": f"Job '{job_name}' is already running",
}
all_runtime = get_job_runtime_snapshot()
for running_name, runtime in all_runtime.items():
if running_name == job_name:
continue
if runtime.get("running"):
return {
"job": job_name,
"status": "blocked",
"message": f"Cannot trigger '{job_name}' while '{running_name}' is running",
}
job = scheduler.get_job(job_name)
if job is None:

View File

@@ -9,10 +9,11 @@ import logging
from dataclasses import dataclass
from datetime import date, timedelta
from sqlalchemy import select
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.exceptions import NotFoundError, ProviderError, RateLimitError
from app.models.ohlcv import OHLCVRecord
from app.models.settings import IngestionProgress
from app.models.ticker import Ticker
from app.providers.protocol import MarketDataProvider
@@ -50,6 +51,13 @@ async def _get_progress(db: AsyncSession, ticker_id: int) -> IngestionProgress |
return result.scalar_one_or_none()
async def _get_ohlcv_bar_count(db: AsyncSession, ticker_id: int) -> int:
result = await db.execute(
select(func.count()).select_from(OHLCVRecord).where(OHLCVRecord.ticker_id == ticker_id)
)
return int(result.scalar() or 0)
async def _update_progress(
db: AsyncSession, ticker_id: int, last_date: date
) -> None:
@@ -84,10 +92,17 @@ async def fetch_and_ingest(
if end_date is None:
end_date = date.today()
# Resolve start_date: use progress resume or default to 1 year ago
# Resolve start_date: use progress resume or default to 1 year ago.
# If we have too little history, force a one-year backfill even if
# ingestion progress exists (upsert makes this safe and idempotent).
if start_date is None:
progress = await _get_progress(db, ticker.id)
if progress is not None:
bar_count = await _get_ohlcv_bar_count(db, ticker.id)
minimum_backfill_bars = 200
if bar_count < minimum_backfill_bars:
start_date = end_date - timedelta(days=365)
elif progress is not None:
start_date = progress.last_ingested_date + timedelta(days=1)
else:
start_date = end_date - timedelta(days=365)

View File

@@ -0,0 +1,499 @@
from __future__ import annotations
import json
import logging
from typing import Any
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.settings import SystemSetting
from app.models.sr_level import SRLevel
from app.models.ticker import Ticker
from app.models.trade_setup import TradeSetup
logger = logging.getLogger(__name__)
DEFAULT_RECOMMENDATION_CONFIG: dict[str, float] = {
"recommendation_high_confidence_threshold": 70.0,
"recommendation_moderate_confidence_threshold": 50.0,
"recommendation_confidence_diff_threshold": 20.0,
"recommendation_signal_alignment_weight": 0.15,
"recommendation_sr_strength_weight": 0.20,
"recommendation_distance_penalty_factor": 0.10,
"recommendation_momentum_technical_divergence_threshold": 30.0,
"recommendation_fundamental_technical_divergence_threshold": 40.0,
}
def _clamp(value: float, low: float, high: float) -> float:
return max(low, min(high, value))
def _sentiment_value(sentiment_classification: str | None) -> str | None:
if sentiment_classification is None:
return None
return sentiment_classification.strip().lower()
def check_signal_alignment(
direction: str,
dimension_scores: dict[str, float],
sentiment_classification: str | None,
) -> tuple[bool, str]:
technical = float(dimension_scores.get("technical", 50.0))
momentum = float(dimension_scores.get("momentum", 50.0))
sentiment = _sentiment_value(sentiment_classification)
if direction == "long":
aligned_count = sum([
technical > 60,
momentum > 60,
sentiment == "bullish",
])
if aligned_count >= 2:
return True, "Technical, momentum, and/or sentiment align with LONG direction."
return False, "Signals are mixed for LONG direction."
aligned_count = sum([
technical < 40,
momentum < 40,
sentiment == "bearish",
])
if aligned_count >= 2:
return True, "Technical, momentum, and/or sentiment align with SHORT direction."
return False, "Signals are mixed for SHORT direction."
class SignalConflictDetector:
def detect_conflicts(
self,
dimension_scores: dict[str, float],
sentiment_classification: str | None,
config: dict[str, float] | None = None,
) -> list[str]:
cfg = config or DEFAULT_RECOMMENDATION_CONFIG
technical = float(dimension_scores.get("technical", 50.0))
momentum = float(dimension_scores.get("momentum", 50.0))
fundamental = float(dimension_scores.get("fundamental", 50.0))
sentiment = _sentiment_value(sentiment_classification)
mt_threshold = float(cfg.get("recommendation_momentum_technical_divergence_threshold", 30.0))
ft_threshold = float(cfg.get("recommendation_fundamental_technical_divergence_threshold", 40.0))
conflicts: list[str] = []
if sentiment == "bearish" and technical > 60:
conflicts.append(
f"sentiment-technical: Bearish sentiment conflicts with bullish technical ({technical:.0f})"
)
if sentiment == "bullish" and technical < 40:
conflicts.append(
f"sentiment-technical: Bullish sentiment conflicts with bearish technical ({technical:.0f})"
)
mt_diff = abs(momentum - technical)
if mt_diff > mt_threshold:
conflicts.append(
"momentum-technical: "
f"Momentum ({momentum:.0f}) diverges from technical ({technical:.0f}) by {mt_diff:.0f} points"
)
if sentiment == "bearish" and momentum > 60:
conflicts.append(
f"sentiment-momentum: Bearish sentiment conflicts with momentum ({momentum:.0f})"
)
if sentiment == "bullish" and momentum < 40:
conflicts.append(
f"sentiment-momentum: Bullish sentiment conflicts with momentum ({momentum:.0f})"
)
ft_diff = abs(fundamental - technical)
if ft_diff > ft_threshold:
conflicts.append(
"fundamental-technical: "
f"Fundamental ({fundamental:.0f}) diverges significantly from technical ({technical:.0f})"
)
return conflicts
class DirectionAnalyzer:
def calculate_confidence(
self,
direction: str,
dimension_scores: dict[str, float],
sentiment_classification: str | None,
conflicts: list[str] | None = None,
) -> float:
confidence = 50.0
technical = float(dimension_scores.get("technical", 50.0))
momentum = float(dimension_scores.get("momentum", 50.0))
fundamental = float(dimension_scores.get("fundamental", 50.0))
sentiment = _sentiment_value(sentiment_classification)
if direction == "long":
if technical > 70:
confidence += 25.0
elif technical > 60:
confidence += 15.0
if momentum > 70:
confidence += 20.0
elif momentum > 60:
confidence += 15.0
if sentiment == "bullish":
confidence += 15.0
elif sentiment == "neutral":
confidence += 5.0
if fundamental > 60:
confidence += 10.0
else:
if technical < 30:
confidence += 25.0
elif technical < 40:
confidence += 15.0
if momentum < 30:
confidence += 20.0
elif momentum < 40:
confidence += 15.0
if sentiment == "bearish":
confidence += 15.0
elif sentiment == "neutral":
confidence += 5.0
if fundamental < 40:
confidence += 10.0
for conflict in conflicts or []:
if "sentiment-technical" in conflict:
confidence -= 20.0
elif "momentum-technical" in conflict:
confidence -= 15.0
elif "sentiment-momentum" in conflict:
confidence -= 20.0
elif "fundamental-technical" in conflict:
confidence -= 10.0
return _clamp(confidence, 0.0, 100.0)
class TargetGenerator:
def generate_targets(
self,
direction: str,
entry_price: float,
stop_loss: float,
sr_levels: list[SRLevel],
atr_value: float,
) -> list[dict[str, Any]]:
if atr_value <= 0:
return []
risk = abs(entry_price - stop_loss)
if risk <= 0:
return []
candidates: list[dict[str, Any]] = []
atr_pct = atr_value / entry_price if entry_price > 0 else 0.0
max_atr_multiple: float | None = None
if atr_pct > 0.05:
max_atr_multiple = 10.0
elif atr_pct < 0.02:
max_atr_multiple = 3.0
for level in sr_levels:
is_candidate = False
if direction == "long":
is_candidate = level.type == "resistance" and level.price_level > entry_price
else:
is_candidate = level.type == "support" and level.price_level < entry_price
if not is_candidate:
continue
distance = abs(level.price_level - entry_price)
distance_atr_multiple = distance / atr_value
if distance_atr_multiple < 1.0:
continue
if max_atr_multiple is not None and distance_atr_multiple > max_atr_multiple:
continue
reward = abs(level.price_level - entry_price)
rr_ratio = reward / risk
norm_rr = min(rr_ratio / 10.0, 1.0)
norm_strength = _clamp(level.strength, 0, 100) / 100.0
norm_proximity = 1.0 - min(distance / entry_price, 1.0)
quality = 0.35 * norm_rr + 0.35 * norm_strength + 0.30 * norm_proximity
candidates.append(
{
"price": float(level.price_level),
"distance_from_entry": float(distance),
"distance_atr_multiple": float(distance_atr_multiple),
"rr_ratio": float(rr_ratio),
"classification": "Moderate",
"sr_level_id": int(level.id),
"sr_strength": float(level.strength),
"quality": float(quality),
}
)
candidates.sort(key=lambda row: row["quality"], reverse=True)
selected = candidates[:5]
selected.sort(key=lambda row: row["distance_from_entry"])
if not selected:
return []
n = len(selected)
for idx, target in enumerate(selected):
if n <= 2:
target["classification"] = "Conservative" if idx == 0 else "Aggressive"
elif idx <= 1:
target["classification"] = "Conservative"
elif idx >= n - 2:
target["classification"] = "Aggressive"
else:
target["classification"] = "Moderate"
target.pop("quality", None)
return selected
class ProbabilityEstimator:
def estimate_probability(
self,
target: dict[str, Any],
dimension_scores: dict[str, float],
sentiment_classification: str | None,
direction: str,
config: dict[str, float],
) -> float:
classification = str(target.get("classification", "Moderate"))
strength = float(target.get("sr_strength", 50.0))
atr_multiple = float(target.get("distance_atr_multiple", 1.0))
if classification == "Conservative":
base_prob = 70.0
elif classification == "Aggressive":
base_prob = 40.0
else:
base_prob = 55.0
if strength >= 80:
strength_adj = 15.0
elif strength >= 60:
strength_adj = 10.0
elif strength >= 40:
strength_adj = 5.0
else:
strength_adj = -10.0
technical = float(dimension_scores.get("technical", 50.0))
momentum = float(dimension_scores.get("momentum", 50.0))
sentiment = _sentiment_value(sentiment_classification)
alignment_adj = 0.0
if direction == "long":
if technical > 60 and (sentiment == "bullish" or momentum > 60):
alignment_adj = 15.0
elif technical < 40 or (sentiment == "bearish" and momentum < 40):
alignment_adj = -15.0
else:
if technical < 40 and (sentiment == "bearish" or momentum < 40):
alignment_adj = 15.0
elif technical > 60 or (sentiment == "bullish" and momentum > 60):
alignment_adj = -15.0
volatility_adj = 0.0
if atr_multiple > 5:
volatility_adj = 5.0
elif atr_multiple < 2:
volatility_adj = 5.0
signal_weight = float(config.get("recommendation_signal_alignment_weight", 0.15))
sr_weight = float(config.get("recommendation_sr_strength_weight", 0.20))
distance_penalty = float(config.get("recommendation_distance_penalty_factor", 0.10))
scaled_alignment_adj = alignment_adj * (signal_weight / 0.15)
scaled_strength_adj = strength_adj * (sr_weight / 0.20)
distance_adj = -distance_penalty * max(atr_multiple - 1.0, 0.0) * 2.0
probability = base_prob + scaled_strength_adj + scaled_alignment_adj + volatility_adj + distance_adj
probability = _clamp(probability, 10.0, 90.0)
if classification == "Conservative":
probability = max(probability, 61.0)
elif classification == "Moderate":
probability = _clamp(probability, 40.0, 70.0)
elif classification == "Aggressive":
probability = min(probability, 49.0)
return round(probability, 2)
signal_conflict_detector = SignalConflictDetector()
direction_analyzer = DirectionAnalyzer()
target_generator = TargetGenerator()
probability_estimator = ProbabilityEstimator()
async def get_recommendation_config(db: AsyncSession) -> dict[str, float]:
result = await db.execute(
select(SystemSetting).where(SystemSetting.key.like("recommendation_%"))
)
rows = result.scalars().all()
config: dict[str, float] = dict(DEFAULT_RECOMMENDATION_CONFIG)
for setting in rows:
try:
config[setting.key] = float(setting.value)
except (TypeError, ValueError):
logger.warning("Invalid recommendation setting value for %s: %s", setting.key, setting.value)
return config
def _risk_level_from_conflicts(conflicts: list[str]) -> str:
if not conflicts:
return "Low"
severe = [c for c in conflicts if "sentiment-technical" in c or "sentiment-momentum" in c]
if len(severe) >= 2 or len(conflicts) >= 3:
return "High"
return "Medium"
def _choose_recommended_action(
long_confidence: float,
short_confidence: float,
config: dict[str, float],
) -> str:
high = float(config.get("recommendation_high_confidence_threshold", 70.0))
moderate = float(config.get("recommendation_moderate_confidence_threshold", 50.0))
diff = float(config.get("recommendation_confidence_diff_threshold", 20.0))
if long_confidence >= high and (long_confidence - short_confidence) >= diff:
return "LONG_HIGH"
if short_confidence >= high and (short_confidence - long_confidence) >= diff:
return "SHORT_HIGH"
if long_confidence >= moderate and (long_confidence - short_confidence) >= diff:
return "LONG_MODERATE"
if short_confidence >= moderate and (short_confidence - long_confidence) >= diff:
return "SHORT_MODERATE"
return "NEUTRAL"
def _build_reasoning(
direction: str,
confidence: float,
conflicts: list[str],
dimension_scores: dict[str, float],
sentiment_classification: str | None,
action: str,
) -> str:
aligned, alignment_text = check_signal_alignment(
direction,
dimension_scores,
sentiment_classification,
)
sentiment = _sentiment_value(sentiment_classification) or "unknown"
technical = float(dimension_scores.get("technical", 50.0))
momentum = float(dimension_scores.get("momentum", 50.0))
direction_text = direction.upper()
alignment_summary = "aligned" if aligned else "mixed"
base = (
f"{direction_text} confidence {confidence:.1f}% with {alignment_summary} signals "
f"(technical={technical:.0f}, momentum={momentum:.0f}, sentiment={sentiment})."
)
if conflicts:
return (
f"{base} {alignment_text} Detected {len(conflicts)} conflict(s), "
f"so recommendation is risk-adjusted. Action={action}."
)
return f"{base} {alignment_text} No major conflicts detected. Action={action}."
async def enhance_trade_setup(
db: AsyncSession,
ticker: Ticker,
setup: TradeSetup,
dimension_scores: dict[str, float],
sr_levels: list[SRLevel],
sentiment_classification: str | None,
atr_value: float,
) -> TradeSetup:
config = await get_recommendation_config(db)
conflicts = signal_conflict_detector.detect_conflicts(
dimension_scores=dimension_scores,
sentiment_classification=sentiment_classification,
config=config,
)
long_confidence = direction_analyzer.calculate_confidence(
direction="long",
dimension_scores=dimension_scores,
sentiment_classification=sentiment_classification,
conflicts=conflicts,
)
short_confidence = direction_analyzer.calculate_confidence(
direction="short",
dimension_scores=dimension_scores,
sentiment_classification=sentiment_classification,
conflicts=conflicts,
)
direction = setup.direction.lower()
confidence = long_confidence if direction == "long" else short_confidence
targets = target_generator.generate_targets(
direction=direction,
entry_price=setup.entry_price,
stop_loss=setup.stop_loss,
sr_levels=sr_levels,
atr_value=atr_value,
)
for target in targets:
target["probability"] = probability_estimator.estimate_probability(
target=target,
dimension_scores=dimension_scores,
sentiment_classification=sentiment_classification,
direction=direction,
config=config,
)
if len(targets) < 3:
conflicts = [*conflicts, "target-availability: Fewer than 3 valid S/R targets available"]
action = _choose_recommended_action(long_confidence, short_confidence, config)
risk_level = _risk_level_from_conflicts(conflicts)
setup.confidence_score = round(confidence, 2)
setup.targets_json = json.dumps(targets)
setup.conflict_flags_json = json.dumps(conflicts)
setup.recommended_action = action
setup.reasoning = _build_reasoning(
direction=direction,
confidence=confidence,
conflicts=conflicts,
dimension_scores=dimension_scores,
sentiment_classification=sentiment_classification,
action=action,
)
setup.risk_level = risk_level
return setup

View File

@@ -3,24 +3,27 @@
Scans tracked tickers for asymmetric risk-reward trade setups.
Long: target = nearest SR above, stop = entry - ATR × multiplier.
Short: target = nearest SR below, stop = entry + ATR × multiplier.
Filters by configurable R:R threshold (default 3:1).
Filters by configurable R:R threshold (default 1.5).
"""
from __future__ import annotations
import json
import logging
from datetime import datetime, timezone
from sqlalchemy import delete, select
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.exceptions import NotFoundError
from app.models.score import CompositeScore
from app.models.score import CompositeScore, DimensionScore
from app.models.sentiment import SentimentScore
from app.models.sr_level import SRLevel
from app.models.ticker import Ticker
from app.models.trade_setup import TradeSetup
from app.services.indicator_service import _extract_ohlcv, compute_atr
from app.services.price_service import query_ohlcv
from app.services.recommendation_service import enhance_trade_setup
logger = logging.getLogger(__name__)
@@ -45,70 +48,63 @@ def _compute_quality_score(
w_proximity: float = 0.30,
rr_cap: float = 10.0,
) -> float:
"""Compute a quality score for a candidate S/R level.
Combines normalized R:R ratio, level strength, and proximity to entry
into a single 01 score using configurable weights.
"""
"""Compute a quality score for a candidate S/R level."""
norm_rr = min(rr / rr_cap, 1.0)
norm_strength = strength / 100.0
norm_proximity = 1.0 - min(distance / entry_price, 1.0)
return w_rr * norm_rr + w_strength * norm_strength + w_proximity * norm_proximity
async def _get_dimension_scores(db: AsyncSession, ticker_id: int) -> dict[str, float]:
result = await db.execute(
select(DimensionScore).where(DimensionScore.ticker_id == ticker_id)
)
rows = result.scalars().all()
return {row.dimension: float(row.score) for row in rows}
async def _get_latest_sentiment(db: AsyncSession, ticker_id: int) -> str | None:
result = await db.execute(
select(SentimentScore)
.where(SentimentScore.ticker_id == ticker_id)
.order_by(SentimentScore.timestamp.desc())
.limit(1)
)
row = result.scalar_one_or_none()
return row.classification if row else None
async def scan_ticker(
db: AsyncSession,
symbol: str,
rr_threshold: float = 1.5,
atr_multiplier: float = 1.5,
) -> list[TradeSetup]:
"""Scan a single ticker for trade setups meeting the R:R threshold.
1. Fetch OHLCV data and compute ATR.
2. Fetch SR levels.
3. Compute long and short setups.
4. Filter by R:R threshold.
5. Delete old setups for this ticker and persist new ones.
Returns list of persisted TradeSetup models.
"""
"""Scan a single ticker for trade setups meeting the R:R threshold."""
ticker = await _get_ticker(db, symbol)
# Fetch OHLCV
records = await query_ohlcv(db, symbol)
if not records or len(records) < 15:
logger.info(
"Skipping %s: insufficient OHLCV data (%d bars, need 15+)",
symbol, len(records),
)
# Clear any stale setups
await db.execute(
delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
return []
_, highs, lows, closes, _ = _extract_ohlcv(records)
entry_price = closes[-1]
# Compute ATR
try:
atr_result = compute_atr(highs, lows, closes)
atr_value = atr_result["atr"]
except Exception:
logger.info("Skipping %s: cannot compute ATR", symbol)
await db.execute(
delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
return []
if atr_value <= 0:
logger.info("Skipping %s: ATR is zero or negative", symbol)
await db.execute(
delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
return []
# Fetch SR levels from DB (already computed by sr_service)
sr_result = await db.execute(
select(SRLevel).where(SRLevel.ticker_id == ticker.id)
)
@@ -116,9 +112,6 @@ async def scan_ticker(
if not sr_levels:
logger.info("Skipping %s: no SR levels available", symbol)
await db.execute(
delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
return []
levels_above = sorted(
@@ -131,18 +124,18 @@ async def scan_ticker(
reverse=True,
)
# Get composite score for this ticker
comp_result = await db.execute(
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
)
comp = comp_result.scalar_one_or_none()
composite_score = comp.score if comp else 0.0
dimension_scores = await _get_dimension_scores(db, ticker.id)
sentiment_classification = await _get_latest_sentiment(db, ticker.id)
now = datetime.now(timezone.utc)
setups: list[TradeSetup] = []
# Long setup: target = nearest SR above, stop = entry - ATR × multiplier
# Check all resistance levels above and pick the one with the best quality score
if levels_above:
stop = entry_price - (atr_value * atr_multiplier)
risk = entry_price - stop
@@ -152,15 +145,18 @@ async def scan_ticker(
best_candidate_target = 0.0
for lv in levels_above:
reward = lv.price_level - entry_price
if reward > 0:
if reward <= 0:
continue
rr = reward / risk
if rr >= rr_threshold:
if rr < rr_threshold:
continue
distance = lv.price_level - entry_price
quality = _compute_quality_score(rr, lv.strength, distance, entry_price)
if quality > best_quality:
best_quality = quality
best_candidate_rr = rr
best_candidate_target = lv.price_level
if best_candidate_rr > 0:
setups.append(TradeSetup(
ticker_id=ticker.id,
@@ -173,8 +169,6 @@ async def scan_ticker(
detected_at=now,
))
# Short setup: target = nearest SR below, stop = entry + ATR × multiplier
# Check all support levels below and pick the one with the best quality score
if levels_below:
stop = entry_price + (atr_value * atr_multiplier)
risk = stop - entry_price
@@ -184,15 +178,18 @@ async def scan_ticker(
best_candidate_target = 0.0
for lv in levels_below:
reward = entry_price - lv.price_level
if reward > 0:
if reward <= 0:
continue
rr = reward / risk
if rr >= rr_threshold:
if rr < rr_threshold:
continue
distance = entry_price - lv.price_level
quality = _compute_quality_score(rr, lv.strength, distance, entry_price)
if quality > best_quality:
best_quality = quality
best_candidate_rr = rr
best_candidate_target = lv.price_level
if best_candidate_rr > 0:
setups.append(TradeSetup(
ticker_id=ticker.id,
@@ -205,20 +202,32 @@ async def scan_ticker(
detected_at=now,
))
# Delete old setups for this ticker, persist new ones
await db.execute(
delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
enhanced_setups: list[TradeSetup] = []
for setup in setups:
try:
enhanced = await enhance_trade_setup(
db=db,
ticker=ticker,
setup=setup,
dimension_scores=dimension_scores,
sr_levels=sr_levels,
sentiment_classification=sentiment_classification,
atr_value=atr_value,
)
enhanced_setups.append(enhanced)
except Exception:
logger.exception("Error enhancing setup for %s (%s)", ticker.symbol, setup.direction)
enhanced_setups.append(setup)
for setup in enhanced_setups:
db.add(setup)
await db.commit()
# Refresh to get IDs
for s in setups:
for s in enhanced_setups:
await db.refresh(s)
return setups
return enhanced_setups
async def scan_all_tickers(
@@ -226,11 +235,7 @@ async def scan_all_tickers(
rr_threshold: float = 1.5,
atr_multiplier: float = 1.5,
) -> list[TradeSetup]:
"""Scan all tracked tickers for trade setups.
Processes each ticker independently — one failure doesn't stop others.
Returns all setups found across all tickers.
"""
"""Scan all tracked tickers for trade setups."""
result = await db.execute(select(Ticker).order_by(Ticker.symbol))
tickers = list(result.scalars().all())
@@ -250,29 +255,86 @@ async def scan_all_tickers(
async def get_trade_setups(
db: AsyncSession,
direction: str | None = None,
min_confidence: float | None = None,
recommended_action: str | None = None,
symbol: str | None = None,
) -> list[dict]:
"""Get all stored trade setups, optionally filtered by direction.
Returns dicts sorted by R:R desc, secondary composite desc.
Each dict includes the ticker symbol.
"""
"""Get latest stored trade setups, optionally filtered."""
stmt = (
select(TradeSetup, Ticker.symbol)
.join(Ticker, TradeSetup.ticker_id == Ticker.id)
)
if direction is not None:
stmt = stmt.where(TradeSetup.direction == direction.lower())
if symbol is not None:
stmt = stmt.where(Ticker.symbol == symbol.strip().upper())
if min_confidence is not None:
stmt = stmt.where(TradeSetup.confidence_score >= min_confidence)
if recommended_action is not None:
stmt = stmt.where(TradeSetup.recommended_action == recommended_action)
stmt = stmt.order_by(
TradeSetup.rr_ratio.desc(),
TradeSetup.composite_score.desc(),
)
stmt = stmt.order_by(TradeSetup.detected_at.desc(), TradeSetup.id.desc())
result = await db.execute(stmt)
rows = result.all()
return [
{
latest_by_key: dict[tuple[str, str], tuple[TradeSetup, str]] = {}
for setup, ticker_symbol in rows:
dedupe_key = (ticker_symbol, setup.direction)
if dedupe_key not in latest_by_key:
latest_by_key[dedupe_key] = (setup, ticker_symbol)
latest_rows = list(latest_by_key.values())
latest_rows.sort(
key=lambda row: (
row[0].confidence_score if row[0].confidence_score is not None else -1.0,
row[0].rr_ratio,
row[0].composite_score,
),
reverse=True,
)
return [_trade_setup_to_dict(setup, ticker_symbol) for setup, ticker_symbol in latest_rows]
async def get_trade_setup_history(
db: AsyncSession,
symbol: str,
) -> list[dict]:
"""Get full recommendation history for a symbol (newest first)."""
stmt = (
select(TradeSetup, Ticker.symbol)
.join(Ticker, TradeSetup.ticker_id == Ticker.id)
.where(Ticker.symbol == symbol.strip().upper())
.order_by(TradeSetup.detected_at.desc(), TradeSetup.id.desc())
)
result = await db.execute(stmt)
rows = result.all()
return [_trade_setup_to_dict(setup, ticker_symbol) for setup, ticker_symbol in rows]
def _trade_setup_to_dict(setup: TradeSetup, symbol: str) -> dict:
targets: list[dict] = []
conflicts: list[str] = []
if setup.targets_json:
try:
parsed_targets = json.loads(setup.targets_json)
if isinstance(parsed_targets, list):
targets = parsed_targets
except (TypeError, ValueError):
targets = []
if setup.conflict_flags_json:
try:
parsed_conflicts = json.loads(setup.conflict_flags_json)
if isinstance(parsed_conflicts, list):
conflicts = [str(item) for item in parsed_conflicts]
except (TypeError, ValueError):
conflicts = []
return {
"id": setup.id,
"symbol": symbol,
"direction": setup.direction,
@@ -282,6 +344,11 @@ async def get_trade_setups(
"rr_ratio": setup.rr_ratio,
"composite_score": setup.composite_score,
"detected_at": setup.detected_at,
"confidence_score": setup.confidence_score,
"targets": targets,
"conflict_flags": conflicts,
"recommended_action": setup.recommended_action,
"reasoning": setup.reasoning,
"risk_level": setup.risk_level,
"actual_outcome": setup.actual_outcome,
}
for setup, symbol in rows
]

View File

@@ -0,0 +1,405 @@
"""Ticker universe discovery and bootstrap service.
Provides a minimal, provider-backed way to populate tracked tickers from
well-known universes (S&P 500, NASDAQ-100, NASDAQ All).
"""
from __future__ import annotations
import json
import logging
import os
import re
from collections.abc import Iterable
from datetime import datetime, timezone
from pathlib import Path
import httpx
from sqlalchemy import delete, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import settings
from app.exceptions import ProviderError, ValidationError
from app.models.settings import SystemSetting
from app.models.ticker import Ticker
logger = logging.getLogger(__name__)
SUPPORTED_UNIVERSES = {"sp500", "nasdaq100", "nasdaq_all"}
_SYMBOL_PATTERN = re.compile(r"^[A-Z0-9-]{1,10}$")
_SEED_UNIVERSES: dict[str, list[str]] = {
"sp500": [
"AAPL", "MSFT", "NVDA", "AMZN", "META", "GOOGL", "GOOG", "BRK-B", "TSLA", "JPM",
"V", "MA", "UNH", "XOM", "LLY", "AVGO", "COST", "PG", "JNJ", "HD", "MRK", "BAC",
"ABBV", "PEP", "KO", "ADBE", "NFLX", "CRM", "CSCO", "WMT", "AMD", "TMO", "MCD",
"ORCL", "ACN", "CVX", "LIN", "DHR", "ABT", "QCOM", "TXN", "PM", "DIS", "INTU",
],
"nasdaq100": [
"AAPL", "MSFT", "NVDA", "AMZN", "META", "GOOGL", "GOOG", "TSLA", "AVGO", "COST",
"NFLX", "ADBE", "CSCO", "AMD", "INTU", "QCOM", "AMGN", "TXN", "INTC", "BKNG", "GILD",
"ISRG", "MDLZ", "ADP", "LRCX", "ADI", "PANW", "SNPS", "CDNS", "KLAC", "MELI", "MU",
"SBUX", "CSX", "REGN", "VRTX", "MAR", "MNST", "CTAS", "ASML", "PYPL", "AMAT", "NXPI",
],
"nasdaq_all": [
"AAPL", "MSFT", "NVDA", "AMZN", "META", "GOOGL", "TSLA", "AMD", "INTC", "QCOM", "CSCO",
"ADBE", "NFLX", "PYPL", "AMAT", "MU", "SBUX", "GILD", "INTU", "BKNG", "ADP", "CTAS",
"PANW", "SNPS", "CDNS", "LRCX", "KLAC", "MELI", "ASML", "REGN", "VRTX", "MDLZ", "AMGN",
],
}
_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "")
if not _CA_BUNDLE or not Path(_CA_BUNDLE).exists():
_CA_BUNDLE_PATH: str | bool = True
else:
_CA_BUNDLE_PATH = _CA_BUNDLE
def _validate_universe(universe: str) -> str:
normalised = universe.strip().lower()
if normalised not in SUPPORTED_UNIVERSES:
supported = ", ".join(sorted(SUPPORTED_UNIVERSES))
raise ValidationError(f"Unsupported universe '{universe}'. Supported: {supported}")
return normalised
def _normalise_symbols(symbols: Iterable[str]) -> list[str]:
deduped: set[str] = set()
for raw_symbol in symbols:
symbol = raw_symbol.strip().upper().replace(".", "-")
if not symbol:
continue
if _SYMBOL_PATTERN.fullmatch(symbol) is None:
continue
deduped.add(symbol)
return sorted(deduped)
def _extract_symbols_from_fmp_payload(payload: object) -> list[str]:
if not isinstance(payload, list):
return []
symbols: list[str] = []
for item in payload:
if not isinstance(item, dict):
continue
candidate = item.get("symbol") or item.get("ticker")
if isinstance(candidate, str):
symbols.append(candidate)
return symbols
async def _try_fmp_urls(
client: httpx.AsyncClient,
urls: list[str],
) -> tuple[list[str], list[str]]:
failures: list[str] = []
for url in urls:
endpoint = url.split("?")[0]
try:
response = await client.get(url)
except httpx.HTTPError as exc:
failures.append(f"{endpoint}: network error ({type(exc).__name__}: {exc})")
continue
if response.status_code != 200:
failures.append(f"{endpoint}: HTTP {response.status_code}")
continue
try:
payload = response.json()
except ValueError:
failures.append(f"{endpoint}: invalid JSON payload")
continue
symbols = _extract_symbols_from_fmp_payload(payload)
if symbols:
return symbols, failures
failures.append(f"{endpoint}: empty/unsupported payload")
return [], failures
async def _fetch_universe_symbols_from_fmp(universe: str) -> list[str]:
if not settings.fmp_api_key:
raise ValidationError(
"FMP API key is required for universe bootstrap (set FMP_API_KEY)"
)
api_key = settings.fmp_api_key
stable_base = "https://financialmodelingprep.com/stable"
legacy_base = "https://financialmodelingprep.com/api/v3"
stable_candidates: dict[str, list[str]] = {
"sp500": [
f"{stable_base}/sp500-constituent?apikey={api_key}",
f"{stable_base}/sp500-constituents?apikey={api_key}",
],
"nasdaq100": [
f"{stable_base}/nasdaq-100-constituent?apikey={api_key}",
f"{stable_base}/nasdaq100-constituent?apikey={api_key}",
f"{stable_base}/nasdaq-100-constituents?apikey={api_key}",
],
"nasdaq_all": [
f"{stable_base}/stock-screener?exchange=NASDAQ&isEtf=false&limit=10000&apikey={api_key}",
f"{stable_base}/available-traded/list?apikey={api_key}",
],
}
legacy_candidates: dict[str, list[str]] = {
"sp500": [
f"{legacy_base}/sp500_constituent?apikey={api_key}",
f"{legacy_base}/sp500_constituent",
],
"nasdaq100": [
f"{legacy_base}/nasdaq_constituent?apikey={api_key}",
f"{legacy_base}/nasdaq_constituent",
],
"nasdaq_all": [
f"{legacy_base}/stock-screener?exchange=NASDAQ&isEtf=false&limit=10000&apikey={api_key}",
],
}
failures: list[str] = []
async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client:
stable_symbols, stable_failures = await _try_fmp_urls(client, stable_candidates[universe])
failures.extend(stable_failures)
if stable_symbols:
return stable_symbols
legacy_symbols, legacy_failures = await _try_fmp_urls(client, legacy_candidates[universe])
failures.extend(legacy_failures)
if legacy_symbols:
return legacy_symbols
if failures:
reason = "; ".join(failures[:6])
logger.warning("FMP universe fetch failed for %s: %s", universe, reason)
raise ProviderError(
f"Failed to fetch universe symbols from FMP for '{universe}'. Attempts: {reason}"
)
raise ProviderError(f"Failed to fetch universe symbols from FMP for '{universe}'")
async def _fetch_html_symbols(
client: httpx.AsyncClient,
url: str,
pattern: str,
) -> tuple[list[str], str | None]:
try:
response = await client.get(url)
except httpx.HTTPError as exc:
return [], f"{url}: network error ({type(exc).__name__}: {exc})"
if response.status_code != 200:
return [], f"{url}: HTTP {response.status_code}"
matches = re.findall(pattern, response.text, flags=re.IGNORECASE)
if not matches:
return [], f"{url}: no symbols parsed"
return list(matches), None
async def _fetch_nasdaq_trader_symbols(
client: httpx.AsyncClient,
) -> tuple[list[str], str | None]:
url = "https://www.nasdaqtrader.com/dynamic/SymDir/nasdaqlisted.txt"
try:
response = await client.get(url)
except httpx.HTTPError as exc:
return [], f"{url}: network error ({type(exc).__name__}: {exc})"
if response.status_code != 200:
return [], f"{url}: HTTP {response.status_code}"
symbols: list[str] = []
for line in response.text.splitlines():
if not line or line.startswith("Symbol|") or line.startswith("File Creation Time"):
continue
parts = line.split("|")
if not parts:
continue
symbol = parts[0].strip()
test_issue = parts[6].strip() if len(parts) > 6 else "N"
if test_issue == "Y":
continue
symbols.append(symbol)
if not symbols:
return [], f"{url}: no symbols parsed"
return symbols, None
async def _fetch_universe_symbols_from_public(universe: str) -> tuple[list[str], list[str], str | None]:
failures: list[str] = []
sp500_url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
nasdaq100_url = "https://en.wikipedia.org/wiki/Nasdaq-100"
wiki_symbol_pattern = r"<td>\s*<a[^>]*>([A-Z.]{1,10})</a>\s*</td>"
async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client:
if universe == "sp500":
symbols, error = await _fetch_html_symbols(client, sp500_url, wiki_symbol_pattern)
if error:
failures.append(error)
else:
return symbols, failures, "wikipedia_sp500"
if universe == "nasdaq100":
symbols, error = await _fetch_html_symbols(client, nasdaq100_url, wiki_symbol_pattern)
if error:
failures.append(error)
else:
return symbols, failures, "wikipedia_nasdaq100"
if universe == "nasdaq_all":
symbols, error = await _fetch_nasdaq_trader_symbols(client)
if error:
failures.append(error)
else:
return symbols, failures, "nasdaq_trader"
return [], failures, None
async def _read_cached_symbols(db: AsyncSession, universe: str) -> list[str]:
key = f"ticker_universe_cache_{universe}"
result = await db.execute(select(SystemSetting).where(SystemSetting.key == key))
setting = result.scalar_one_or_none()
if setting is None:
return []
try:
payload = json.loads(setting.value)
except (TypeError, ValueError):
return []
if isinstance(payload, dict):
symbols = payload.get("symbols", [])
elif isinstance(payload, list):
symbols = payload
else:
symbols = []
if not isinstance(symbols, list):
return []
return _normalise_symbols([str(symbol) for symbol in symbols])
async def _write_cached_symbols(
db: AsyncSession,
universe: str,
symbols: list[str],
source: str,
) -> None:
key = f"ticker_universe_cache_{universe}"
payload = {
"symbols": symbols,
"source": source,
"updated_at": datetime.now(timezone.utc).isoformat(),
}
result = await db.execute(select(SystemSetting).where(SystemSetting.key == key))
setting = result.scalar_one_or_none()
value = json.dumps(payload)
if setting is None:
db.add(SystemSetting(key=key, value=value))
else:
setting.value = value
await db.commit()
async def fetch_universe_symbols(db: AsyncSession, universe: str) -> list[str]:
"""Fetch and normalise symbols for a supported universe with fallbacks.
Fallback order:
1) Free public sources (Wikipedia/NASDAQ trader)
2) FMP endpoints (if available)
3) Cached snapshot in SystemSetting
4) Built-in seed symbols
"""
normalised_universe = _validate_universe(universe)
failures: list[str] = []
public_symbols, public_failures, public_source = await _fetch_universe_symbols_from_public(normalised_universe)
failures.extend(public_failures)
cleaned_public = _normalise_symbols(public_symbols)
if cleaned_public:
await _write_cached_symbols(db, normalised_universe, cleaned_public, public_source or "public")
return cleaned_public
try:
fmp_symbols = await _fetch_universe_symbols_from_fmp(normalised_universe)
cleaned_fmp = _normalise_symbols(fmp_symbols)
if cleaned_fmp:
await _write_cached_symbols(db, normalised_universe, cleaned_fmp, "fmp")
return cleaned_fmp
except (ProviderError, ValidationError) as exc:
failures.append(str(exc))
cached_symbols = await _read_cached_symbols(db, normalised_universe)
if cached_symbols:
logger.warning(
"Using cached universe symbols for %s because live fetch failed: %s",
normalised_universe,
"; ".join(failures[:3]),
)
return cached_symbols
seed_symbols = _normalise_symbols(_SEED_UNIVERSES.get(normalised_universe, []))
if seed_symbols:
logger.warning(
"Using built-in seed symbols for %s because live/cache fetch failed: %s",
normalised_universe,
"; ".join(failures[:3]),
)
return seed_symbols
reason = "; ".join(failures[:6]) if failures else "no provider returned symbols"
raise ProviderError(f"Universe '{normalised_universe}' returned no valid symbols. Attempts: {reason}")
async def bootstrap_universe(
db: AsyncSession,
universe: str,
*,
prune_missing: bool = False,
) -> dict[str, int | str]:
"""Upsert ticker universe into tracked tickers.
Returns summary counts for added/existing/deleted symbols.
"""
normalised_universe = _validate_universe(universe)
symbols = await fetch_universe_symbols(db, normalised_universe)
existing_rows = await db.execute(select(Ticker.symbol))
existing_symbols = set(existing_rows.scalars().all())
target_symbols = set(symbols)
symbols_to_add = sorted(target_symbols - existing_symbols)
symbols_to_delete = sorted(existing_symbols - target_symbols) if prune_missing else []
for symbol in symbols_to_add:
db.add(Ticker(symbol=symbol))
deleted_count = 0
if symbols_to_delete:
result = await db.execute(delete(Ticker).where(Ticker.symbol.in_(symbols_to_delete)))
deleted_count = int(result.rowcount or 0)
await db.commit()
return {
"universe": normalised_universe,
"total_universe_symbols": len(symbols),
"added": len(symbols_to_add),
"already_tracked": len(target_symbols & existing_symbols),
"deleted": deleted_count,
}

View File

@@ -1,5 +1,13 @@
import apiClient from './client';
import type { AdminUser, SystemSetting } from '../lib/types';
import type {
AdminUser,
PipelineReadiness,
RecommendationConfig,
SystemSetting,
TickerUniverse,
TickerUniverseBootstrapResult,
TickerUniverseSetting,
} from '../lib/types';
// Users
export function listUsers() {
@@ -48,6 +56,41 @@ export function updateRegistration(enabled: boolean) {
.then((r) => r.data);
}
export function getRecommendationSettings() {
return apiClient
.get<RecommendationConfig>('admin/settings/recommendations')
.then((r) => r.data);
}
export function updateRecommendationSettings(payload: Partial<RecommendationConfig>) {
return apiClient
.put<RecommendationConfig>('admin/settings/recommendations', payload)
.then((r) => r.data);
}
export function getTickerUniverseSetting() {
return apiClient
.get<TickerUniverseSetting>('admin/settings/ticker-universe')
.then((r) => r.data);
}
export function updateTickerUniverseSetting(universe: TickerUniverse) {
return apiClient
.put<TickerUniverseSetting>('admin/settings/ticker-universe', { universe })
.then((r) => r.data);
}
export function bootstrapTickers(universe: TickerUniverse, pruneMissing: boolean) {
return apiClient
.post<TickerUniverseBootstrapResult>('admin/tickers/bootstrap', null, {
params: {
universe,
prune_missing: pruneMissing,
},
})
.then((r) => r.data);
}
// Jobs
export interface JobStatus {
name: string;
@@ -55,12 +98,31 @@ export interface JobStatus {
enabled: boolean;
next_run_at: string | null;
registered: boolean;
running?: boolean;
runtime_status?: string | null;
runtime_processed?: number | null;
runtime_total?: number | null;
runtime_progress_pct?: number | null;
runtime_current_ticker?: string | null;
runtime_started_at?: string | null;
runtime_finished_at?: string | null;
runtime_message?: string | null;
}
export interface TriggerJobResponse {
job: string;
status: 'triggered' | 'busy' | 'blocked' | 'not_found';
message: string;
}
export function listJobs() {
return apiClient.get<JobStatus[]>('admin/jobs').then((r) => r.data);
}
export function getPipelineReadiness() {
return apiClient.get<PipelineReadiness[]>('admin/pipeline/readiness').then((r) => r.data);
}
export function toggleJob(jobName: string, enabled: boolean) {
return apiClient
.put<{ message: string }>(`admin/jobs/${jobName}/toggle`, { enabled })
@@ -69,7 +131,7 @@ export function toggleJob(jobName: string, enabled: boolean) {
export function triggerJob(jobName: string) {
return apiClient
.post<{ message: string }>(`admin/jobs/${jobName}/trigger`)
.post<TriggerJobResponse>(`admin/jobs/${jobName}/trigger`)
.then((r) => r.data);
}

View File

@@ -1,7 +1,20 @@
import apiClient from './client';
export interface IngestionSourceResult {
status: 'ok' | 'error' | 'skipped';
message?: string | null;
records?: number;
classification?: string;
confidence?: number;
}
export interface FetchDataResult {
symbol: string;
sources: Record<string, IngestionSourceResult>;
}
export function fetchData(symbol: string) {
return apiClient
.post<{ message: string }>(`ingestion/fetch/${symbol}`)
.post<FetchDataResult>(`ingestion/fetch/${symbol}`)
.then((r) => r.data);
}

View File

@@ -1,6 +1,20 @@
import apiClient from './client';
import type { TradeSetup } from '../lib/types';
export function list() {
return apiClient.get<TradeSetup[]>('trades').then((r) => r.data);
export interface TradeListParams {
direction?: 'long' | 'short';
min_confidence?: number;
recommended_action?: 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL';
}
export function list(params?: TradeListParams) {
return apiClient.get<TradeSetup[]>('trades', { params }).then((r) => r.data);
}
export function bySymbol(symbol: string) {
return apiClient.get<TradeSetup[]>(`trades/${symbol.toUpperCase()}`).then((r) => r.data);
}
export function history(symbol: string) {
return apiClient.get<TradeSetup[]>(`trades/${symbol.toUpperCase()}/history`).then((r) => r.data);
}

View File

@@ -17,11 +17,79 @@ export function JobControls() {
const { data: jobs, isLoading } = useJobs();
const toggleJob = useToggleJob();
const triggerJob = useTriggerJob();
const anyJobRunning = (jobs ?? []).some((job) => job.running);
const runningJob = jobs?.find((job) => job.running);
const pausedJob = jobs?.find((job) => !job.running && job.runtime_status === 'rate_limited');
const runningJobLabel = runningJob?.label;
if (isLoading) return <SkeletonTable rows={4} cols={3} />;
return (
<div className="space-y-3">
{runningJob && (
<div className="rounded-xl border border-blue-400/30 bg-blue-500/10 px-4 py-3">
<div className="flex flex-wrap items-center justify-between gap-3">
<div>
<div className="text-xs font-semibold text-blue-300">
Active job: {runningJob.label}
</div>
<div className="mt-0.5 text-[11px] text-blue-100/80">
Manual triggers are blocked until this run finishes.
</div>
</div>
<div className="text-[11px] text-blue-200">
{runningJob.runtime_processed ?? 0}
{typeof runningJob.runtime_total === 'number'
? ` / ${runningJob.runtime_total}`
: ''}
</div>
</div>
<div className="mt-2 h-1.5 w-full rounded-full bg-slate-700/80 overflow-hidden">
<div
className="h-full bg-blue-400 transition-all duration-500"
style={{
width: `${
typeof runningJob.runtime_progress_pct === 'number'
? Math.max(5, Math.min(100, runningJob.runtime_progress_pct))
: 30
}%`,
}}
/>
</div>
{runningJob.runtime_current_ticker && (
<div className="mt-1 text-[11px] text-blue-100/80">
Current: {runningJob.runtime_current_ticker}
</div>
)}
{runningJob.runtime_message && (
<div className="mt-1 text-[11px] text-blue-100/80">
{runningJob.runtime_message}
</div>
)}
</div>
)}
{!runningJob && pausedJob && (
<div className="rounded-xl border border-amber-400/30 bg-amber-500/10 px-4 py-3">
<div className="flex flex-wrap items-center justify-between gap-3">
<div>
<div className="text-xs font-semibold text-amber-300">
Last run paused: {pausedJob.label}
</div>
<div className="mt-0.5 text-[11px] text-amber-100/90">
{pausedJob.runtime_message || 'Rate limit hit. The collector stopped early and will resume from last progress on the next run.'}
</div>
</div>
<div className="text-[11px] text-amber-200">
{pausedJob.runtime_processed ?? 0}
{typeof pausedJob.runtime_total === 'number'
? ` / ${pausedJob.runtime_total}`
: ''}
</div>
</div>
</div>
)}
{jobs?.map((job) => (
<div key={job.name} className="glass p-4 glass-hover">
<div className="flex flex-wrap items-center justify-between gap-4">
@@ -29,7 +97,9 @@ export function JobControls() {
{/* Status dot */}
<span
className={`inline-block h-2.5 w-2.5 rounded-full shrink-0 ${
job.enabled
job.running
? 'bg-blue-400 shadow-lg shadow-blue-400/40'
: job.enabled
? 'bg-emerald-400 shadow-lg shadow-emerald-400/40'
: 'bg-gray-500'
}`}
@@ -37,8 +107,28 @@ export function JobControls() {
<div>
<span className="text-sm font-medium text-gray-200">{job.label}</span>
<div className="flex items-center gap-3 mt-0.5">
<span className={`text-[11px] font-medium ${job.enabled ? 'text-emerald-400' : 'text-gray-500'}`}>
{job.enabled ? 'Active' : 'Inactive'}
<span
className={`text-[11px] font-medium ${
job.running
? 'text-blue-300'
: job.runtime_status === 'rate_limited'
? 'text-amber-300'
: job.runtime_status === 'error'
? 'text-red-300'
: job.enabled
? 'text-emerald-400'
: 'text-gray-500'
}`}
>
{job.running
? 'Running'
: job.runtime_status === 'rate_limited'
? 'Paused (rate-limited)'
: job.runtime_status === 'error'
? 'Last run error'
: job.enabled
? 'Active'
: 'Inactive'}
</span>
{job.enabled && job.next_run_at && (
<span className="text-[11px] text-gray-500">
@@ -49,6 +139,35 @@ export function JobControls() {
<span className="text-[11px] text-red-400">Not registered</span>
)}
</div>
{job.running && (
<div className="mt-2 space-y-1.5">
<div className="flex items-center justify-between text-[11px] text-gray-400">
<span>
{job.runtime_processed ?? 0}
{typeof job.runtime_total === 'number' ? ` / ${job.runtime_total}` : ''}
{' '}processed
</span>
{typeof job.runtime_progress_pct === 'number' && (
<span>{Math.max(0, Math.min(100, job.runtime_progress_pct)).toFixed(0)}%</span>
)}
</div>
<div className="h-1.5 w-56 rounded-full bg-slate-700/80 overflow-hidden">
<div
className="h-full bg-blue-400 transition-all duration-500"
style={{
width: `${
typeof job.runtime_progress_pct === 'number'
? Math.max(5, Math.min(100, job.runtime_progress_pct))
: 30
}%`,
}}
/>
</div>
{job.runtime_current_ticker && (
<div className="text-[11px] text-gray-500">Current: {job.runtime_current_ticker}</div>
)}
</div>
)}
</div>
</div>
@@ -68,13 +187,26 @@ export function JobControls() {
<button
type="button"
onClick={() => triggerJob.mutate(job.name)}
disabled={triggerJob.isPending || !job.enabled}
disabled={triggerJob.isPending || !job.enabled || anyJobRunning}
className="btn-gradient px-3 py-1.5 text-xs disabled:opacity-50 disabled:cursor-not-allowed"
>
<span>{triggerJob.isPending ? 'Triggering…' : 'Trigger Now'}</span>
<span>
{job.running
? 'Running…'
: triggerJob.isPending
? 'Triggering…'
: anyJobRunning
? 'Blocked'
: 'Trigger Now'}
</span>
</button>
</div>
</div>
{anyJobRunning && !job.running && (
<div className="mt-2 text-[11px] text-gray-500">
Manual trigger blocked while {runningJobLabel ?? 'another job'} is running.
</div>
)}
</div>
))}
</div>

View File

@@ -0,0 +1,105 @@
import { useQueryClient } from '@tanstack/react-query';
import { usePipelineReadiness } from '../../hooks/useAdmin';
import { useFetchSymbolData } from '../../hooks/useFetchSymbolData';
import { formatDateTime } from '../../lib/format';
import { SkeletonTable } from '../ui/Skeleton';
function scoreBadge(score: number | null) {
if (score === null) return <span className="text-[11px] text-gray-500"></span>;
const cls = score >= 60 ? 'text-emerald-400' : score >= 40 ? 'text-amber-400' : 'text-red-400';
return <span className={`text-[11px] font-medium ${cls}`}>{score.toFixed(0)}</span>;
}
export function PipelineReadinessPanel() {
const queryClient = useQueryClient();
const { data, isLoading, isError, error, isFetching } = usePipelineReadiness();
const fetchMutation = useFetchSymbolData({
includeSymbolPrefix: true,
invalidatePipelineReadiness: true,
});
if (isLoading) return <SkeletonTable rows={6} cols={6} />;
if (isError) return <p className="text-sm text-red-400">{(error as Error)?.message || 'Failed to load pipeline readiness'}</p>;
const rows = data ?? [];
return (
<div className="glass p-4 space-y-3">
<div className="flex items-center justify-between gap-3">
<div>
<h3 className="text-sm font-semibold text-gray-200">Pipeline Readiness</h3>
<p className="text-xs text-gray-500">Shows why tickers may be missing in scanner/rankings and what is incomplete.</p>
</div>
<button
type="button"
className="rounded border border-white/[0.12] px-3 py-1.5 text-xs text-gray-300 hover:text-white disabled:opacity-50"
onClick={() => queryClient.invalidateQueries({ queryKey: ['admin', 'pipeline-readiness'] })}
disabled={isFetching}
>
{isFetching ? 'Refreshing…' : 'Refresh'}
</button>
</div>
{rows.length === 0 ? (
<p className="text-sm text-gray-500">No tickers available.</p>
) : (
<div className="overflow-x-auto">
<table className="w-full text-left text-xs">
<thead>
<tr className="border-b border-white/[0.08] text-gray-500 uppercase tracking-wider">
<th className="px-2 py-2">Symbol</th>
<th className="px-2 py-2">OHLCV</th>
<th className="px-2 py-2">Dims</th>
<th className="px-2 py-2">S/R</th>
<th className="px-2 py-2">Scanner</th>
<th className="px-2 py-2">Missing Reasons</th>
<th className="px-2 py-2">Action</th>
</tr>
</thead>
<tbody>
{rows.map((row) => (
<tr key={row.symbol} className="border-b border-white/[0.05] align-top">
<td className="px-2 py-2 font-medium text-gray-200">{row.symbol}</td>
<td className="px-2 py-2 text-gray-300">
<div>{row.ohlcv_bars} bars</div>
<div className="text-[11px] text-gray-500">{row.ohlcv_last_date ? formatDateTime(row.ohlcv_last_date) : '—'}</div>
</td>
<td className="px-2 py-2 text-gray-300">
<div className="grid grid-cols-5 gap-1">
{scoreBadge(row.dimensions.technical)}
{scoreBadge(row.dimensions.sr_quality)}
{scoreBadge(row.dimensions.sentiment)}
{scoreBadge(row.dimensions.fundamental)}
{scoreBadge(row.dimensions.momentum)}
</div>
<div className="mt-1 text-[10px] text-gray-500">T SR S F M</div>
</td>
<td className="px-2 py-2 text-gray-300">{row.sr_level_count}</td>
<td className="px-2 py-2">
<span className={`inline-block rounded px-2 py-0.5 text-[11px] ${row.ready_for_scanner ? 'bg-emerald-500/20 text-emerald-300' : 'bg-amber-500/20 text-amber-300'}`}>
{row.ready_for_scanner ? 'Ready' : 'Blocked'}
</span>
<div className="mt-1 text-[11px] text-gray-500">setups: {row.trade_setup_count}</div>
</td>
<td className="px-2 py-2 text-[11px] text-amber-300">
{row.missing_reasons.length ? row.missing_reasons.join(', ') : <span className="text-emerald-300">none</span>}
</td>
<td className="px-2 py-2">
<button
type="button"
className="rounded border border-white/[0.12] px-2.5 py-1 text-[11px] text-gray-300 hover:text-white disabled:opacity-50"
onClick={() => fetchMutation.mutate(row.symbol)}
disabled={fetchMutation.isPending}
>
{fetchMutation.isPending && fetchMutation.variables === row.symbol ? 'Fetching…' : 'Fetch Data'}
</button>
</td>
</tr>
))}
</tbody>
</table>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,101 @@
import { useEffect, useState } from 'react';
import type { RecommendationConfig } from '../../lib/types';
import { useRecommendationSettings, useUpdateRecommendationSettings } from '../../hooks/useAdmin';
import { SkeletonTable } from '../ui/Skeleton';
const DEFAULTS: RecommendationConfig = {
high_confidence_threshold: 70,
moderate_confidence_threshold: 50,
confidence_diff_threshold: 20,
signal_alignment_weight: 0.15,
sr_strength_weight: 0.2,
distance_penalty_factor: 0.1,
momentum_technical_divergence_threshold: 30,
fundamental_technical_divergence_threshold: 40,
};
function NumberInput({
label,
value,
min,
max,
step,
onChange,
}: {
label: string;
value: number;
min: number;
max: number;
step?: number;
onChange: (v: number) => void;
}) {
return (
<label className="block space-y-1">
<span className="text-xs text-gray-400">{label}</span>
<input
type="number"
min={min}
max={max}
step={step}
value={value}
onChange={(e) => onChange(Number(e.target.value))}
className="w-full input-glass px-3 py-2 text-sm"
/>
</label>
);
}
export function RecommendationSettings() {
const { data, isLoading, isError, error } = useRecommendationSettings();
const update = useUpdateRecommendationSettings();
const [form, setForm] = useState<RecommendationConfig>(DEFAULTS);
useEffect(() => {
if (data) setForm(data);
}, [data]);
const setField = (field: keyof RecommendationConfig, value: number) => {
setForm((prev) => ({ ...prev, [field]: value }));
};
const onSave = () => {
update.mutate(form as unknown as Record<string, number>);
};
const onReset = () => {
setForm(DEFAULTS);
update.mutate(DEFAULTS as unknown as Record<string, number>);
};
if (isLoading) return <SkeletonTable rows={6} cols={2} />;
if (isError) return <p className="text-sm text-red-400">{(error as Error)?.message || 'Failed to load recommendation settings'}</p>;
return (
<div className="glass p-5 space-y-4">
<h3 className="text-sm font-semibold text-gray-200">Recommendation Configuration</h3>
<div className="grid gap-4 md:grid-cols-3">
<NumberInput label="High Confidence Threshold (%)" value={form.high_confidence_threshold} min={0} max={100} onChange={(v) => setField('high_confidence_threshold', v)} />
<NumberInput label="Moderate Confidence Threshold (%)" value={form.moderate_confidence_threshold} min={0} max={100} onChange={(v) => setField('moderate_confidence_threshold', v)} />
<NumberInput label="Confidence Difference Threshold (%)" value={form.confidence_diff_threshold} min={0} max={100} onChange={(v) => setField('confidence_diff_threshold', v)} />
<NumberInput label="Signal Alignment Weight" value={form.signal_alignment_weight} min={0} max={1} step={0.01} onChange={(v) => setField('signal_alignment_weight', v)} />
<NumberInput label="S/R Strength Weight" value={form.sr_strength_weight} min={0} max={1} step={0.01} onChange={(v) => setField('sr_strength_weight', v)} />
<NumberInput label="Distance Penalty Factor" value={form.distance_penalty_factor} min={0} max={1} step={0.01} onChange={(v) => setField('distance_penalty_factor', v)} />
<NumberInput label="Momentum-Technical Divergence Threshold" value={form.momentum_technical_divergence_threshold} min={0} max={100} onChange={(v) => setField('momentum_technical_divergence_threshold', v)} />
<NumberInput label="Fundamental-Technical Divergence Threshold" value={form.fundamental_technical_divergence_threshold} min={0} max={100} onChange={(v) => setField('fundamental_technical_divergence_threshold', v)} />
</div>
<div className="flex items-center gap-2">
<button className="btn-gradient px-4 py-2 text-sm" onClick={onSave} disabled={update.isPending}>
{update.isPending ? 'Saving…' : 'Save Configuration'}
</button>
<button className="px-4 py-2 text-sm rounded border border-white/[0.1] text-gray-300 hover:text-white" onClick={onReset} disabled={update.isPending}>
Reset to Defaults
</button>
</div>
</div>
);
}

View File

@@ -0,0 +1,97 @@
import { useEffect, useState } from 'react';
import {
useBootstrapTickers,
useTickerUniverseSetting,
useUpdateTickerUniverseSetting,
} from '../../hooks/useAdmin';
import type { TickerUniverse } from '../../lib/types';
const UNIVERSE_OPTIONS: Array<{ value: TickerUniverse; label: string }> = [
{ value: 'sp500', label: 'S&P 500' },
{ value: 'nasdaq100', label: 'NASDAQ 100' },
{ value: 'nasdaq_all', label: 'NASDAQ All' },
];
export function TickerUniverseBootstrap() {
const { data, isLoading, isError, error } = useTickerUniverseSetting();
const updateDefault = useUpdateTickerUniverseSetting();
const bootstrap = useBootstrapTickers();
const [universe, setUniverse] = useState<TickerUniverse>('sp500');
const [pruneMissing, setPruneMissing] = useState(false);
useEffect(() => {
if (data?.universe) {
setUniverse(data.universe);
}
}, [data]);
const onSaveDefault = () => {
updateDefault.mutate(universe);
};
const onBootstrap = () => {
bootstrap.mutate({ universe, pruneMissing });
};
return (
<div className="glass p-5 space-y-4">
<h3 className="text-sm font-semibold text-gray-200">Ticker Universe Discovery</h3>
<p className="text-xs text-gray-500">
Auto-discover tickers from a predefined universe and keep your registry updated.
</p>
{isError && (
<p className="text-sm text-red-400">
{(error as Error)?.message || 'Failed to load ticker universe setting'}
</p>
)}
<div className="grid gap-4 md:grid-cols-3">
<label className="block space-y-1 md:col-span-2">
<span className="text-xs text-gray-400">Default Universe</span>
<select
value={universe}
onChange={(e) => setUniverse(e.target.value as TickerUniverse)}
className="w-full input-glass px-3 py-2 text-sm"
disabled={isLoading || updateDefault.isPending || bootstrap.isPending}
>
{UNIVERSE_OPTIONS.map((option) => (
<option key={option.value} value={option.value}>
{option.label}
</option>
))}
</select>
</label>
<label className="flex items-end gap-2 pb-2">
<input
type="checkbox"
checked={pruneMissing}
onChange={(e) => setPruneMissing(e.target.checked)}
disabled={bootstrap.isPending}
className="h-4 w-4 rounded border-white/20 bg-transparent"
/>
<span className="text-xs text-gray-400">Prune removed symbols</span>
</label>
</div>
<div className="flex flex-wrap gap-2">
<button
className="btn-gradient px-4 py-2 text-sm disabled:opacity-50"
onClick={onSaveDefault}
disabled={isLoading || updateDefault.isPending || bootstrap.isPending}
>
{updateDefault.isPending ? 'Saving…' : 'Save Default Universe'}
</button>
<button
className="px-4 py-2 text-sm rounded border border-white/[0.1] text-gray-300 hover:text-white disabled:opacity-50"
onClick={onBootstrap}
disabled={isLoading || updateDefault.isPending || bootstrap.isPending}
>
{bootstrap.isPending ? 'Bootstrapping…' : 'Bootstrap Now'}
</button>
</div>
</div>
);
}

View File

@@ -1,8 +1,9 @@
import { Link } from 'react-router-dom';
import type { TradeSetup } from '../../lib/types';
import { formatPrice, formatPercent, formatDateTime } from '../../lib/format';
import { recommendationActionDirection, recommendationActionLabel } from '../../lib/recommendation';
export type SortColumn = 'symbol' | 'direction' | 'entry_price' | 'stop_loss' | 'target' | 'risk_amount' | 'reward_amount' | 'rr_ratio' | 'stop_pct' | 'target_pct' | 'composite_score' | 'detected_at';
export type SortColumn = 'symbol' | 'direction' | 'recommended_action' | 'confidence_score' | 'entry_price' | 'stop_loss' | 'target' | 'best_target_probability' | 'risk_amount' | 'reward_amount' | 'rr_ratio' | 'stop_pct' | 'target_pct' | 'risk_level' | 'composite_score' | 'detected_at';
export type SortDirection = 'asc' | 'desc';
interface TradeTableProps {
@@ -14,15 +15,19 @@ interface TradeTableProps {
const columns: { key: SortColumn; label: string }[] = [
{ key: 'symbol', label: 'Symbol' },
{ key: 'recommended_action', label: 'Recommended Action' },
{ key: 'confidence_score', label: 'Confidence' },
{ key: 'direction', label: 'Direction' },
{ key: 'entry_price', label: 'Entry' },
{ key: 'stop_loss', label: 'Stop Loss' },
{ key: 'target', label: 'Target' },
{ key: 'best_target_probability', label: 'Best Target' },
{ key: 'risk_amount', label: 'Risk $' },
{ key: 'reward_amount', label: 'Reward $' },
{ key: 'rr_ratio', label: 'R:R' },
{ key: 'stop_pct', label: '% to Stop' },
{ key: 'target_pct', label: '% to Target' },
{ key: 'risk_level', label: 'Risk' },
{ key: 'composite_score', label: 'Score' },
{ key: 'detected_at', label: 'Detected' },
];
@@ -53,6 +58,19 @@ function sortIndicator(column: SortColumn, active: SortColumn, dir: SortDirectio
return dir === 'asc' ? ' ▲' : ' ▼';
}
function riskLevelClass(riskLevel: TradeSetup['risk_level']) {
if (riskLevel === 'Low') return 'text-emerald-400';
if (riskLevel === 'Medium') return 'text-amber-400';
if (riskLevel === 'High') return 'text-red-400';
return 'text-gray-400';
}
function bestTargetText(trade: TradeSetup) {
if (!trade.targets || trade.targets.length === 0) return '—';
const best = [...trade.targets].sort((a, b) => b.probability - a.probability)[0];
return `${formatPrice(best.price)} (${best.probability.toFixed(0)}%)`;
}
export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeTableProps) {
if (trades.length === 0) {
return <p className="py-8 text-center text-sm text-gray-500">No trade setups match the current filters.</p>;
@@ -84,6 +102,17 @@ export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeT
{trade.symbol}
</Link>
</td>
<td className="px-4 py-3.5">
<div className="space-y-0.5">
<span className="text-xs font-semibold text-indigo-300">{recommendationActionLabel(trade.recommended_action)}</span>
{recommendationActionDirection(trade.recommended_action) !== 'neutral' && recommendationActionDirection(trade.recommended_action) !== trade.direction && (
<div className="text-[10px] text-amber-400">Alternative setup (not preferred)</div>
)}
</div>
</td>
<td className="px-4 py-3.5">
<span className="font-mono text-gray-200">{trade.confidence_score === null ? '—' : `${trade.confidence_score.toFixed(1)}%`}</span>
</td>
<td className="px-4 py-3.5">
<span className={trade.direction === 'long' ? 'font-medium text-emerald-400' : 'font-medium text-red-400'}>
{trade.direction}
@@ -92,11 +121,13 @@ export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeT
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.entry_price)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.stop_loss)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.target)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{bestTargetText(trade)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(analysis.risk_amount)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(analysis.reward_amount)}</td>
<td className={`px-4 py-3.5 font-mono font-semibold ${rrColorClass(trade.rr_ratio)}`}>{trade.rr_ratio.toFixed(2)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPercent(analysis.stop_pct)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPercent(analysis.target_pct)}</td>
<td className={`px-4 py-3.5 font-semibold ${riskLevelClass(trade.risk_level)}`}>{trade.risk_level ?? '—'}</td>
<td className="px-4 py-3.5">
<span className={`font-semibold ${trade.composite_score > 70 ? 'text-emerald-400' : trade.composite_score >= 40 ? 'text-amber-400' : 'text-red-400'}`}>
{Math.round(trade.composite_score)}

View File

@@ -0,0 +1,168 @@
import type { TradeSetup } from '../../lib/types';
import { formatPrice, formatPercent } from '../../lib/format';
import { recommendationActionDirection, recommendationActionLabel } from '../../lib/recommendation';
interface RecommendationPanelProps {
symbol: string;
longSetup?: TradeSetup;
shortSetup?: TradeSetup;
}
function riskClass(risk: TradeSetup['risk_level']) {
if (risk === 'Low') return 'text-emerald-400';
if (risk === 'Medium') return 'text-amber-400';
if (risk === 'High') return 'text-red-400';
return 'text-gray-400';
}
function isRecommended(setup: TradeSetup | undefined, action: TradeSetup['recommended_action'] | undefined) {
if (!setup || !action) return false;
if (setup.direction === 'long') return action.startsWith('LONG');
return action.startsWith('SHORT');
}
function TargetTable({ setup }: { setup: TradeSetup }) {
if (!setup.targets || setup.targets.length === 0) {
return <p className="text-xs text-gray-500">No target probabilities available.</p>;
}
return (
<div className="overflow-x-auto">
<table className="w-full text-xs">
<thead>
<tr className="text-left text-gray-500 border-b border-white/[0.06]">
<th className="py-2 pr-3">Classification</th>
<th className="py-2 pr-3">Price</th>
<th className="py-2 pr-3">Distance</th>
<th className="py-2 pr-3">R:R</th>
<th className="py-2">Probability</th>
</tr>
</thead>
<tbody>
{setup.targets.map((target) => (
<tr key={`${setup.id}-${target.sr_level_id}-${target.price}`} className="border-b border-white/[0.04]">
<td className="py-2 pr-3 text-gray-300">{target.classification}</td>
<td className="py-2 pr-3 font-mono text-gray-200">{formatPrice(target.price)}</td>
<td className="py-2 pr-3 font-mono text-gray-200">{formatPercent((target.distance_from_entry / setup.entry_price) * 100)}</td>
<td className="py-2 pr-3 font-mono text-gray-200">{target.rr_ratio.toFixed(2)}</td>
<td className="py-2 font-mono text-gray-200">{target.probability.toFixed(1)}%</td>
</tr>
))}
</tbody>
</table>
</div>
);
}
function SetupCard({ setup, action }: { setup?: TradeSetup; action?: TradeSetup['recommended_action'] }) {
if (!setup) {
return (
<div className="glass-sm p-4 text-xs text-gray-500">
Setup unavailable for this direction.
</div>
);
}
const recommended = isRecommended(setup, action);
return (
<div
data-direction={setup.direction}
className={`glass-sm p-4 space-y-3 ${recommended ? 'border border-emerald-500/40' : 'opacity-80'}`}
>
<div className="flex items-center justify-between">
<h4 className={`text-sm font-semibold ${setup.direction === 'long' ? 'text-emerald-400' : 'text-red-400'}`}>
{setup.direction.toUpperCase()}
</h4>
<span className="text-xs text-gray-300">{setup.confidence_score?.toFixed(1) ?? '—'}%</span>
</div>
{!recommended && recommendationActionDirection(action ?? null) !== 'neutral' && (
<p className="text-[11px] text-amber-400">Alternative setup (ticker bias currently favors the opposite direction).</p>
)}
<div className="grid grid-cols-2 gap-2 text-xs">
<div className="text-gray-500">Entry</div><div className="font-mono text-gray-200">{formatPrice(setup.entry_price)}</div>
<div className="text-gray-500">Stop</div><div className="font-mono text-gray-200">{formatPrice(setup.stop_loss)}</div>
<div className="text-gray-500">Primary Target</div><div className="font-mono text-gray-200">{formatPrice(setup.target)}</div>
<div className="text-gray-500">R:R</div><div className="font-mono text-gray-200">{setup.rr_ratio.toFixed(2)}</div>
</div>
<TargetTable setup={setup} />
{setup.conflict_flags.length > 0 && (
<div className="rounded border border-amber-500/30 bg-amber-500/10 p-2 text-[11px] text-amber-300">
{setup.conflict_flags.join(' • ')}
</div>
)}
</div>
);
}
export function RecommendationPanel({ symbol, longSetup, shortSetup }: RecommendationPanelProps) {
const summary = longSetup?.recommendation_summary ?? shortSetup?.recommendation_summary;
const action = (summary?.action ?? 'NEUTRAL') as TradeSetup['recommended_action'];
const preferredDirection = recommendationActionDirection(action);
const preferredSetup =
preferredDirection === 'long'
? longSetup
: preferredDirection === 'short'
? shortSetup
: undefined;
const alternativeSetup =
preferredDirection === 'long'
? shortSetup
: preferredDirection === 'short'
? longSetup
: undefined;
if (!longSetup && !shortSetup) {
return null;
}
return (
<section>
<h2 className="mb-3 text-xs font-medium uppercase tracking-widest text-gray-500">Recommendation</h2>
<div className="glass p-5 space-y-4">
<div className="flex flex-wrap items-center gap-4">
<span className="text-sm font-semibold text-indigo-300">{recommendationActionLabel(action)}</span>
<span className={`text-sm font-semibold ${riskClass(summary?.risk_level ?? null)}`}>
Risk: {summary?.risk_level ?? '—'}
</span>
<span className="text-sm text-gray-300">Composite: {summary?.composite_score?.toFixed(1) ?? '—'}</span>
<span className="text-xs text-gray-500">{symbol.toUpperCase()}</span>
</div>
<p className="text-xs text-gray-500">Recommended Action is the ticker-level bias. The preferred setup is shown first; the opposite side is available under Alternative scenario.</p>
{summary?.reasoning && (
<p className="text-sm text-gray-300">{summary.reasoning}</p>
)}
{preferredDirection !== 'neutral' && preferredSetup ? (
<div className="space-y-3">
<SetupCard setup={preferredSetup} action={action} />
{alternativeSetup && (
<details className="glass-sm p-3">
<summary className="cursor-pointer text-xs font-medium text-gray-300">
Alternative scenario ({alternativeSetup.direction.toUpperCase()})
</summary>
<div className="mt-3">
<SetupCard setup={alternativeSetup} action={action} />
</div>
</details>
)}
</div>
) : (
<div className="grid gap-4 lg:grid-cols-2">
<SetupCard setup={longSetup} action={action} />
<SetupCard setup={shortSetup} action={action} />
</div>
)}
</div>
</section>
);
}

View File

@@ -1,6 +1,7 @@
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query';
import * as adminApi from '../api/admin';
import { useToast } from '../components/ui/Toast';
import type { TickerUniverse } from '../lib/types';
// ── Users ──
@@ -89,13 +90,93 @@ export function useUpdateSetting() {
});
}
export function useRecommendationSettings() {
return useQuery({
queryKey: ['admin', 'recommendation-settings'],
queryFn: () => adminApi.getRecommendationSettings(),
});
}
export function useUpdateRecommendationSettings() {
const qc = useQueryClient();
const { addToast } = useToast();
return useMutation({
mutationFn: (payload: Record<string, number>) =>
adminApi.updateRecommendationSettings(payload),
onSuccess: () => {
qc.invalidateQueries({ queryKey: ['admin', 'recommendation-settings'] });
addToast('success', 'Recommendation settings updated');
},
onError: (error: Error) => {
addToast('error', error.message || 'Failed to update recommendation settings');
},
});
}
export function useTickerUniverseSetting() {
return useQuery({
queryKey: ['admin', 'ticker-universe'],
queryFn: () => adminApi.getTickerUniverseSetting(),
});
}
export function useUpdateTickerUniverseSetting() {
const qc = useQueryClient();
const { addToast } = useToast();
return useMutation({
mutationFn: (universe: TickerUniverse) => adminApi.updateTickerUniverseSetting(universe),
onSuccess: () => {
qc.invalidateQueries({ queryKey: ['admin', 'ticker-universe'] });
addToast('success', 'Default ticker universe updated');
},
onError: (error: Error) => {
addToast('error', error.message || 'Failed to update default ticker universe');
},
});
}
export function useBootstrapTickers() {
const qc = useQueryClient();
const { addToast } = useToast();
return useMutation({
mutationFn: ({ universe, pruneMissing }: { universe: TickerUniverse; pruneMissing: boolean }) =>
adminApi.bootstrapTickers(universe, pruneMissing),
onSuccess: (result) => {
qc.invalidateQueries({ queryKey: ['tickers'] });
qc.invalidateQueries({ queryKey: ['admin', 'ticker-universe'] });
addToast(
'success',
`Bootstrap done: +${result.added}, existing ${result.already_tracked}, deleted ${result.deleted}`,
);
},
onError: (error: Error) => {
addToast('error', error.message || 'Failed to bootstrap tickers');
},
});
}
// ── Jobs ──
export function useJobs() {
return useQuery({
queryKey: ['admin', 'jobs'],
queryFn: () => adminApi.listJobs(),
refetchInterval: 15_000,
refetchInterval: (query) => {
const jobs = (query.state.data ?? []) as adminApi.JobStatus[];
const hasRunning = jobs.some((job) => job.running);
return hasRunning ? 2_000 : 15_000;
},
});
}
export function usePipelineReadiness() {
return useQuery({
queryKey: ['admin', 'pipeline-readiness'],
queryFn: () => adminApi.getPipelineReadiness(),
refetchInterval: 20_000,
});
}
@@ -121,9 +202,13 @@ export function useTriggerJob() {
return useMutation({
mutationFn: (jobName: string) => adminApi.triggerJob(jobName),
onSuccess: () => {
onSuccess: (result) => {
qc.invalidateQueries({ queryKey: ['admin', 'jobs'] });
addToast('success', 'Job triggered successfully');
if (result.status === 'triggered') {
addToast('success', result.message || 'Job triggered successfully');
return;
}
addToast('info', result.message || 'Job could not be triggered');
},
onError: (error: Error) => {
addToast('error', error.message || 'Failed to trigger job');

View File

@@ -0,0 +1,42 @@
import { useMutation, useQueryClient } from '@tanstack/react-query';
import { fetchData, type FetchDataResult } from '../api/ingestion';
import { useToast } from '../components/ui/Toast';
import { summarizeIngestionResult } from '../lib/ingestionStatus';
interface UseFetchSymbolDataOptions {
includeSymbolPrefix?: boolean;
invalidatePipelineReadiness?: boolean;
}
export function useFetchSymbolData(options: UseFetchSymbolDataOptions = {}) {
const { includeSymbolPrefix = false, invalidatePipelineReadiness = false } = options;
const queryClient = useQueryClient();
const { addToast } = useToast();
return useMutation({
mutationFn: (symbol: string) => fetchData(symbol),
onSuccess: (result: FetchDataResult, symbol: string) => {
const normalized = symbol.toUpperCase();
const summary = summarizeIngestionResult(result, normalized);
const toastMessage = includeSymbolPrefix
? `${normalized}: ${summary.message}`
: summary.message;
addToast(summary.toastType, toastMessage);
queryClient.invalidateQueries({ queryKey: ['ohlcv', symbol] });
queryClient.invalidateQueries({ queryKey: ['sentiment', symbol] });
queryClient.invalidateQueries({ queryKey: ['fundamentals', symbol] });
queryClient.invalidateQueries({ queryKey: ['sr-levels', symbol] });
queryClient.invalidateQueries({ queryKey: ['scores', symbol] });
if (invalidatePipelineReadiness) {
queryClient.invalidateQueries({ queryKey: ['admin', 'pipeline-readiness'] });
}
},
onError: (err: Error, symbol: string) => {
const normalized = symbol.toUpperCase();
const prefix = includeSymbolPrefix ? `${normalized}: ` : '';
addToast('error', `${prefix}${err.message || 'Failed to fetch data'}`);
},
});
}

View File

@@ -38,8 +38,8 @@ export function useTickerDetail(symbol: string) {
});
const trades = useQuery({
queryKey: ['trades'],
queryFn: () => tradesApi.list(),
queryKey: ['trades', symbol],
queryFn: () => tradesApi.bySymbol(symbol),
enabled: !!symbol,
});

View File

@@ -0,0 +1,42 @@
import type { FetchDataResult, IngestionSourceResult } from '../api/ingestion';
export type IngestionToastType = 'success' | 'error' | 'info';
export interface IngestionStatusSummary {
toastType: IngestionToastType;
message: string;
}
export function summarizeIngestionResult(
result: FetchDataResult | null | undefined,
fallbackLabel: string,
): IngestionStatusSummary {
const sources = result?.sources;
if (!sources) {
return {
toastType: 'success',
message: `Data fetched for ${fallbackLabel}`,
};
}
const entries = Object.entries(sources) as [string, IngestionSourceResult][];
const parts = entries.map(([name, info]) => {
const label = name.charAt(0).toUpperCase() + name.slice(1);
if (info.status === 'ok') {
return `${label}`;
}
if (info.status === 'skipped') {
return `${label}: skipped${info.message ? ` (${info.message})` : ''}`;
}
return `${label}${info.message ? `: ${info.message}` : ''}`;
});
const hasError = entries.some(([, source]) => source.status === 'error');
const hasSkip = entries.some(([, source]) => source.status === 'skipped');
const toastType: IngestionToastType = hasError ? 'error' : hasSkip ? 'info' : 'success';
return {
toastType,
message: parts.join(' · '),
};
}

View File

@@ -0,0 +1,46 @@
import type { TradeSetup } from './types';
export type RecommendationAction = NonNullable<TradeSetup['recommended_action']>;
export const RECOMMENDATION_ACTION_LABELS: Record<RecommendationAction, string> = {
LONG_HIGH: 'LONG (High Confidence)',
LONG_MODERATE: 'LONG (Moderate Confidence)',
SHORT_HIGH: 'SHORT (High Confidence)',
SHORT_MODERATE: 'SHORT (Moderate Confidence)',
NEUTRAL: 'NEUTRAL (Conflicting Signals)',
};
export const RECOMMENDATION_ACTION_GLOSSARY: Array<{ action: RecommendationAction; description: string }> = [
{
action: 'LONG_HIGH',
description: 'Ticker bias favors LONG strongly. LONG confidence is above the high threshold and clearly above SHORT.',
},
{
action: 'LONG_MODERATE',
description: 'Ticker bias favors LONG, but with moderate conviction.',
},
{
action: 'SHORT_HIGH',
description: 'Ticker bias favors SHORT strongly. SHORT confidence is above the high threshold and clearly above LONG.',
},
{
action: 'SHORT_MODERATE',
description: 'Ticker bias favors SHORT, but with moderate conviction.',
},
{
action: 'NEUTRAL',
description: 'No strong directional edge. Signals are mixed or confidence gap is too small.',
},
];
export function recommendationActionLabel(action: TradeSetup['recommended_action']): string {
if (!action) return RECOMMENDATION_ACTION_LABELS.NEUTRAL;
return RECOMMENDATION_ACTION_LABELS[action] ?? RECOMMENDATION_ACTION_LABELS.NEUTRAL;
}
export function recommendationActionDirection(action: TradeSetup['recommended_action']): 'long' | 'short' | 'neutral' {
if (!action || action === 'NEUTRAL') return 'neutral';
if (action.startsWith('LONG')) return 'long';
if (action.startsWith('SHORT')) return 'short';
return 'neutral';
}

View File

@@ -121,6 +121,32 @@ export interface TradeSetup {
rr_ratio: number;
composite_score: number;
detected_at: string;
confidence_score: number | null;
targets: TradeTarget[];
conflict_flags: string[];
recommended_action: 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL' | null;
reasoning: string | null;
risk_level: 'Low' | 'Medium' | 'High' | null;
actual_outcome: string | null;
recommendation_summary?: RecommendationSummary;
}
export interface TradeTarget {
price: number;
distance_from_entry: number;
distance_atr_multiple: number;
rr_ratio: number;
probability: number;
classification: 'Conservative' | 'Moderate' | 'Aggressive';
sr_level_id: number;
sr_strength: number;
}
export interface RecommendationSummary {
action: string;
reasoning: string | null;
risk_level: 'Low' | 'Medium' | 'High' | null;
composite_score: number;
}
// S/R Levels
@@ -214,3 +240,51 @@ export interface SystemSetting {
value: string;
updated_at: string | null;
}
export interface RecommendationConfig {
high_confidence_threshold: number;
moderate_confidence_threshold: number;
confidence_diff_threshold: number;
signal_alignment_weight: number;
sr_strength_weight: number;
distance_penalty_factor: number;
momentum_technical_divergence_threshold: number;
fundamental_technical_divergence_threshold: number;
}
export type TickerUniverse = 'sp500' | 'nasdaq100' | 'nasdaq_all';
export interface TickerUniverseSetting {
universe: TickerUniverse;
}
export interface TickerUniverseBootstrapResult {
universe: TickerUniverse;
total_universe_symbols: number;
added: number;
already_tracked: number;
deleted: number;
}
export interface PipelineReadiness {
symbol: string;
ohlcv_bars: number;
ohlcv_last_date: string | null;
dimensions: {
technical: number | null;
sr_quality: number | null;
sentiment: number | null;
fundamental: number | null;
momentum: number | null;
};
sentiment_count: number;
sentiment_last_at: string | null;
has_fundamentals: boolean;
fundamentals_fetched_at: string | null;
sr_level_count: number;
has_composite: boolean;
composite_stale: boolean | null;
trade_setup_count: number;
missing_reasons: string[];
ready_for_scanner: boolean;
}

View File

@@ -1,8 +1,11 @@
import { useState } from 'react';
import { DataCleanup } from '../components/admin/DataCleanup';
import { JobControls } from '../components/admin/JobControls';
import { PipelineReadinessPanel } from '../components/admin/PipelineReadinessPanel';
import { RecommendationSettings } from '../components/admin/RecommendationSettings';
import { SettingsForm } from '../components/admin/SettingsForm';
import { TickerManagement } from '../components/admin/TickerManagement';
import { TickerUniverseBootstrap } from '../components/admin/TickerUniverseBootstrap';
import { UserTable } from '../components/admin/UserTable';
const tabs = ['Users', 'Tickers', 'Settings', 'Jobs', 'Cleanup'] as const;
@@ -39,8 +42,19 @@ export default function AdminPage() {
<div className="animate-fade-in">
{activeTab === 'Users' && <UserTable />}
{activeTab === 'Tickers' && <TickerManagement />}
{activeTab === 'Settings' && <SettingsForm />}
{activeTab === 'Jobs' && <JobControls />}
{activeTab === 'Settings' && (
<div className="space-y-4">
<TickerUniverseBootstrap />
<RecommendationSettings />
<SettingsForm />
</div>
)}
{activeTab === 'Jobs' && (
<div className="space-y-4">
<PipelineReadinessPanel />
<JobControls />
</div>
)}
{activeTab === 'Cleanup' && <DataCleanup />}
</div>
</div>

View File

@@ -6,17 +6,23 @@ import { SkeletonTable } from '../components/ui/Skeleton';
import { useToast } from '../components/ui/Toast';
import { triggerJob } from '../api/admin';
import type { TradeSetup } from '../lib/types';
import { RECOMMENDATION_ACTION_GLOSSARY, RECOMMENDATION_ACTION_LABELS } from '../lib/recommendation';
type DirectionFilter = 'both' | 'long' | 'short';
type ActionFilter = 'all' | 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL';
function filterTrades(
trades: TradeSetup[],
minRR: number,
direction: DirectionFilter,
minConfidence: number,
action: ActionFilter,
): TradeSetup[] {
return trades.filter((t) => {
if (t.rr_ratio < minRR) return false;
if (direction !== 'both' && t.direction !== direction) return false;
if (minConfidence > 0 && (t.confidence_score ?? 0) < minConfidence) return false;
if (action !== 'all' && t.recommended_action !== action) return false;
return true;
});
}
@@ -28,6 +34,14 @@ function getComputedValue(trade: TradeSetup, column: SortColumn): number {
case 'reward_amount': return analysis.reward_amount;
case 'stop_pct': return analysis.stop_pct;
case 'target_pct': return analysis.target_pct;
case 'confidence_score': return trade.confidence_score ?? -1;
case 'best_target_probability':
return trade.targets?.length ? Math.max(...trade.targets.map((t) => t.probability)) : -1;
case 'risk_level':
if (trade.risk_level === 'Low') return 1;
if (trade.risk_level === 'Medium') return 2;
if (trade.risk_level === 'High') return 3;
return 0;
default: return 0;
}
}
@@ -46,6 +60,9 @@ function sortTrades(
case 'direction':
cmp = a.direction.localeCompare(b.direction);
break;
case 'recommended_action':
cmp = (a.recommended_action ?? '').localeCompare(b.recommended_action ?? '');
break;
case 'detected_at':
cmp = new Date(a.detected_at).getTime() - new Date(b.detected_at).getTime();
break;
@@ -53,6 +70,9 @@ function sortTrades(
case 'reward_amount':
case 'stop_pct':
case 'target_pct':
case 'confidence_score':
case 'best_target_probability':
case 'risk_level':
cmp = getComputedValue(a, column) - getComputedValue(b, column);
break;
case 'entry_price':
@@ -75,6 +95,8 @@ export default function ScannerPage() {
const [minRR, setMinRR] = useState(0);
const [directionFilter, setDirectionFilter] = useState<DirectionFilter>('both');
const [minConfidence, setMinConfidence] = useState(0);
const [actionFilter, setActionFilter] = useState<ActionFilter>('all');
const [sortColumn, setSortColumn] = useState<SortColumn>('rr_ratio');
const [sortDirection, setSortDirection] = useState<SortDirection>('desc');
@@ -100,9 +122,9 @@ export default function ScannerPage() {
const processed = useMemo(() => {
if (!trades) return [];
const filtered = filterTrades(trades, minRR, directionFilter);
const filtered = filterTrades(trades, minRR, directionFilter, minConfidence, actionFilter);
return sortTrades(filtered, sortColumn, sortDirection);
}, [trades, minRR, directionFilter, sortColumn, sortDirection]);
}, [trades, minRR, directionFilter, minConfidence, actionFilter, sortColumn, sortDirection]);
return (
<div className="space-y-6">
@@ -160,6 +182,51 @@ export default function ScannerPage() {
<option value="short">Short</option>
</select>
</div>
<div>
<label htmlFor="min-confidence" className="mb-1 block text-xs text-gray-400">
Min Confidence
</label>
<input
id="min-confidence"
type="number"
min={0}
max={100}
step={1}
value={minConfidence}
onChange={(e) => setMinConfidence(Number(e.target.value) || 0)}
className="w-24 rounded border border-gray-700 bg-gray-800 px-3 py-1.5 text-sm text-gray-200 focus:border-blue-500 focus:outline-none transition-colors duration-150"
/>
</div>
<div>
<label htmlFor="action" className="mb-1 block text-xs text-gray-400">
Recommended Action
</label>
<select
id="action"
value={actionFilter}
onChange={(e) => setActionFilter(e.target.value as ActionFilter)}
className="rounded border border-gray-700 bg-gray-800 px-3 py-1.5 text-sm text-gray-200 focus:border-blue-500 focus:outline-none transition-colors duration-150"
>
<option value="all">All</option>
<option value="LONG_HIGH">LONG_HIGH</option>
<option value="LONG_MODERATE">LONG_MODERATE</option>
<option value="SHORT_HIGH">SHORT_HIGH</option>
<option value="SHORT_MODERATE">SHORT_MODERATE</option>
<option value="NEUTRAL">NEUTRAL</option>
</select>
</div>
</div>
<div className="rounded-lg border border-white/[0.08] bg-white/[0.02] px-4 py-3">
<p className="text-xs uppercase tracking-wider text-gray-500 mb-2">Recommended Action Glossary (Ticker-Level Bias)</p>
<div className="grid gap-1 md:grid-cols-2">
{RECOMMENDATION_ACTION_GLOSSARY.map((item) => (
<p key={item.action} className="text-xs text-gray-300">
<span className="font-semibold text-indigo-300">{RECOMMENDATION_ACTION_LABELS[item.action]}:</span>{' '}
{item.description}
</p>
))}
</div>
</div>
{/* Content */}

View File

@@ -1,15 +1,14 @@
import { useMemo, useEffect } from 'react';
import { useParams } from 'react-router-dom';
import { useMutation, useQueryClient } from '@tanstack/react-query';
import { useTickerDetail } from '../hooks/useTickerDetail';
import { useFetchSymbolData } from '../hooks/useFetchSymbolData';
import { CandlestickChart } from '../components/charts/CandlestickChart';
import { ScoreCard } from '../components/ui/ScoreCard';
import { SkeletonCard } from '../components/ui/Skeleton';
import { SentimentPanel } from '../components/ticker/SentimentPanel';
import { FundamentalsPanel } from '../components/ticker/FundamentalsPanel';
import { IndicatorSelector } from '../components/ticker/IndicatorSelector';
import { useToast } from '../components/ui/Toast';
import { fetchData } from '../api/ingestion';
import { RecommendationPanel } from '../components/ticker/RecommendationPanel';
import { formatPrice } from '../lib/format';
import type { TradeSetup } from '../lib/types';
@@ -67,43 +66,7 @@ function DataFreshnessBar({ items }: { items: DataStatusItem[] }) {
export default function TickerDetailPage() {
const { symbol = '' } = useParams<{ symbol: string }>();
const { ohlcv, scores, srLevels, sentiment, fundamentals, trades } = useTickerDetail(symbol);
const queryClient = useQueryClient();
const { addToast } = useToast();
const ingestion = useMutation({
mutationFn: () => fetchData(symbol),
onSuccess: (result: any) => {
// Show per-source status breakdown
const sources = result?.sources;
if (sources) {
const parts: string[] = [];
for (const [name, info] of Object.entries(sources) as [string, any][]) {
const label = name.charAt(0).toUpperCase() + name.slice(1);
if (info.status === 'ok') {
parts.push(`${label}`);
} else if (info.status === 'skipped') {
parts.push(`${label}: skipped (${info.message})`);
} else {
parts.push(`${label} ✗: ${info.message}`);
}
}
const hasError = Object.values(sources).some((s: any) => s.status === 'error');
const hasSkip = Object.values(sources).some((s: any) => s.status === 'skipped');
const toastType = hasError ? 'error' : hasSkip ? 'info' : 'success';
addToast(toastType, parts.join(' · '));
} else {
addToast('success', `Data fetched for ${symbol.toUpperCase()}`);
}
queryClient.invalidateQueries({ queryKey: ['ohlcv', symbol] });
queryClient.invalidateQueries({ queryKey: ['sentiment', symbol] });
queryClient.invalidateQueries({ queryKey: ['fundamentals', symbol] });
queryClient.invalidateQueries({ queryKey: ['sr-levels', symbol] });
queryClient.invalidateQueries({ queryKey: ['scores', symbol] });
},
onError: (err: Error) => {
addToast('error', err.message || 'Failed to fetch data');
},
});
const ingestion = useFetchSymbolData();
const dataStatus: DataStatusItem[] = useMemo(() => [
{
@@ -140,18 +103,28 @@ export default function TickerDetailPage() {
}
}, [trades.error]);
// Pick the latest trade setup for the current symbol
const tradeSetup: TradeSetup | undefined = useMemo(() => {
if (trades.error || !trades.data) return undefined;
const matching = trades.data.filter(
(t) => t.symbol.toUpperCase() === symbol.toUpperCase(),
);
if (matching.length === 0) return undefined;
return matching.reduce((latest, t) =>
new Date(t.detected_at) > new Date(latest.detected_at) ? t : latest,
);
const setupsForSymbol: TradeSetup[] = useMemo(() => {
if (trades.error || !trades.data) return [];
return trades.data.filter((t) => t.symbol.toUpperCase() === symbol.toUpperCase());
}, [trades.data, trades.error, symbol]);
const longSetup = useMemo(
() => setupsForSymbol?.find((s) => s.direction === 'long'),
[setupsForSymbol],
);
const shortSetup = useMemo(
() => setupsForSymbol?.find((s) => s.direction === 'short'),
[setupsForSymbol],
);
// Use the highest-confidence setup for chart overlay fallback.
const tradeSetup: TradeSetup | undefined = useMemo(() => {
const candidates = [longSetup, shortSetup].filter(Boolean) as TradeSetup[];
if (candidates.length === 0) return undefined;
return candidates.sort((a, b) => (b.confidence_score ?? 0) - (a.confidence_score ?? 0))[0];
}, [longSetup, shortSetup]);
// Sort visible S/R levels by strength for the table (only levels within chart zones)
const sortedLevels = useMemo(() => {
if (!srLevels.data?.visible_levels) return [];
@@ -167,7 +140,7 @@ export default function TickerDetailPage() {
<p className="text-sm text-gray-500 mt-0.5">Ticker Detail</p>
</div>
<button
onClick={() => ingestion.mutate()}
onClick={() => ingestion.mutate(symbol)}
disabled={ingestion.isPending}
className="btn-gradient inline-flex items-center gap-2 px-5 py-2.5 text-sm disabled:opacity-60 disabled:cursor-not-allowed"
>
@@ -184,6 +157,8 @@ export default function TickerDetailPage() {
{/* Data freshness bar */}
<DataFreshnessBar items={dataStatus} />
<RecommendationPanel symbol={symbol} longSetup={longSetup} shortSetup={shortSetup} />
{/* Chart Section */}
<section>
<h2 className="mb-3 text-xs font-medium uppercase tracking-widest text-gray-500">Price Chart</h2>
@@ -204,39 +179,6 @@ export default function TickerDetailPage() {
)}
</section>
{/* Trade Setup Summary Card */}
{tradeSetup && (
<section>
<h2 className="mb-3 text-xs font-medium uppercase tracking-widest text-gray-500">Trade Setup</h2>
<div className="glass p-5">
<div className="flex flex-wrap items-center gap-6">
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Direction</span>
<span className={`text-sm font-semibold ${tradeSetup.direction === 'long' ? 'text-emerald-400' : 'text-red-400'}`}>
{tradeSetup.direction.toUpperCase()}
</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Entry</span>
<span className="text-sm font-mono text-blue-300">{formatPrice(tradeSetup.entry_price)}</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Stop</span>
<span className="text-sm font-mono text-red-400">{formatPrice(tradeSetup.stop_loss)}</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Target</span>
<span className="text-sm font-mono text-emerald-400">{formatPrice(tradeSetup.target)}</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">R:R</span>
<span className="text-sm font-semibold text-gray-200">{tradeSetup.rr_ratio.toFixed(2)}</span>
</div>
</div>
</div>
</section>
)}
{/* Scores + Side Panels */}
<div className="grid gap-6 lg:grid-cols-3">
<section>

View File

@@ -1,10 +1,52 @@
import { useMemo, useState } from 'react';
import { useWatchlist } from '../hooks/useWatchlist';
import { WatchlistTable } from '../components/watchlist/WatchlistTable';
import { AddTickerForm } from '../components/watchlist/AddTickerForm';
import { SkeletonTable } from '../components/ui/Skeleton';
import type { WatchlistEntry } from '../lib/types';
type SortMode = 'name_asc' | 'name_desc' | 'score_desc' | 'score_asc';
function sortEntries(entries: WatchlistEntry[], mode: SortMode): WatchlistEntry[] {
const sorted = [...entries];
if (mode === 'name_asc') {
sorted.sort((a, b) => a.symbol.localeCompare(b.symbol));
return sorted;
}
if (mode === 'name_desc') {
sorted.sort((a, b) => b.symbol.localeCompare(a.symbol));
return sorted;
}
if (mode === 'score_desc') {
sorted.sort((a, b) => {
const aScore = a.composite_score ?? Number.NEGATIVE_INFINITY;
const bScore = b.composite_score ?? Number.NEGATIVE_INFINITY;
if (aScore === bScore) return a.symbol.localeCompare(b.symbol);
return bScore - aScore;
});
return sorted;
}
sorted.sort((a, b) => {
const aScore = a.composite_score ?? Number.POSITIVE_INFINITY;
const bScore = b.composite_score ?? Number.POSITIVE_INFINITY;
if (aScore === bScore) return a.symbol.localeCompare(b.symbol);
return aScore - bScore;
});
return sorted;
}
export default function WatchlistPage() {
const { data, isLoading, isError, error } = useWatchlist();
const [sortMode, setSortMode] = useState<SortMode>('score_desc');
const sortedEntries = useMemo(
() => (data ? sortEntries(data, sortMode) : []),
[data, sortMode],
);
return (
<div className="space-y-6 animate-slide-up">
@@ -24,7 +66,27 @@ export default function WatchlistPage() {
</div>
)}
{data && <WatchlistTable entries={data} />}
{data && (
<div className="space-y-3">
<div className="flex justify-end">
<label className="flex items-center gap-2 text-xs text-gray-400">
<span>Sort by</span>
<select
value={sortMode}
onChange={(event) => setSortMode(event.target.value as SortMode)}
className="rounded-lg border border-white/10 bg-white/[0.03] px-2 py-1.5 text-xs text-gray-200 outline-none focus:border-blue-500/40"
>
<option value="score_desc">Score (high low)</option>
<option value="score_asc">Score (low high)</option>
<option value="name_asc">Name (A Z)</option>
<option value="name_desc">Name (Z A)</option>
</select>
</label>
</div>
<WatchlistTable entries={sortedEntries} />
</div>
)}
</div>
);
}

View File

@@ -1 +1 @@
{"root":["./src/app.tsx","./src/main.tsx","./src/vite-env.d.ts","./src/api/admin.ts","./src/api/auth.ts","./src/api/client.ts","./src/api/fundamentals.ts","./src/api/health.ts","./src/api/indicators.ts","./src/api/ingestion.ts","./src/api/ohlcv.ts","./src/api/scores.ts","./src/api/sentiment.ts","./src/api/sr-levels.ts","./src/api/tickers.ts","./src/api/trades.ts","./src/api/watchlist.ts","./src/components/admin/datacleanup.tsx","./src/components/admin/jobcontrols.tsx","./src/components/admin/settingsform.tsx","./src/components/admin/tickermanagement.tsx","./src/components/admin/usertable.tsx","./src/components/auth/protectedroute.tsx","./src/components/charts/candlestickchart.tsx","./src/components/layout/appshell.tsx","./src/components/layout/mobilenav.tsx","./src/components/layout/sidebar.tsx","./src/components/rankings/rankingstable.tsx","./src/components/rankings/weightsform.tsx","./src/components/scanner/tradetable.tsx","./src/components/ticker/dimensionbreakdownpanel.tsx","./src/components/ticker/fundamentalspanel.tsx","./src/components/ticker/indicatorselector.tsx","./src/components/ticker/sroverlay.tsx","./src/components/ticker/sentimentpanel.tsx","./src/components/ui/badge.tsx","./src/components/ui/confirmdialog.tsx","./src/components/ui/scorecard.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/toast.tsx","./src/components/watchlist/addtickerform.tsx","./src/components/watchlist/watchlisttable.tsx","./src/hooks/useadmin.ts","./src/hooks/useauth.ts","./src/hooks/usescores.ts","./src/hooks/usetickerdetail.ts","./src/hooks/usetickers.ts","./src/hooks/usetrades.ts","./src/hooks/usewatchlist.ts","./src/lib/format.ts","./src/lib/types.ts","./src/pages/adminpage.tsx","./src/pages/loginpage.tsx","./src/pages/rankingspage.tsx","./src/pages/registerpage.tsx","./src/pages/scannerpage.tsx","./src/pages/tickerdetailpage.tsx","./src/pages/watchlistpage.tsx","./src/stores/authstore.ts"],"version":"5.6.3"}
{"root":["./src/app.tsx","./src/main.tsx","./src/vite-env.d.ts","./src/api/admin.ts","./src/api/auth.ts","./src/api/client.ts","./src/api/fundamentals.ts","./src/api/health.ts","./src/api/indicators.ts","./src/api/ingestion.ts","./src/api/ohlcv.ts","./src/api/scores.ts","./src/api/sentiment.ts","./src/api/sr-levels.ts","./src/api/tickers.ts","./src/api/trades.ts","./src/api/watchlist.ts","./src/components/admin/datacleanup.tsx","./src/components/admin/jobcontrols.tsx","./src/components/admin/pipelinereadinesspanel.tsx","./src/components/admin/recommendationsettings.tsx","./src/components/admin/settingsform.tsx","./src/components/admin/tickermanagement.tsx","./src/components/admin/tickeruniversebootstrap.tsx","./src/components/admin/usertable.tsx","./src/components/auth/protectedroute.tsx","./src/components/charts/candlestickchart.tsx","./src/components/layout/appshell.tsx","./src/components/layout/mobilenav.tsx","./src/components/layout/sidebar.tsx","./src/components/rankings/rankingstable.tsx","./src/components/rankings/weightsform.tsx","./src/components/scanner/tradetable.tsx","./src/components/ticker/dimensionbreakdownpanel.tsx","./src/components/ticker/fundamentalspanel.tsx","./src/components/ticker/indicatorselector.tsx","./src/components/ticker/recommendationpanel.tsx","./src/components/ticker/sroverlay.tsx","./src/components/ticker/sentimentpanel.tsx","./src/components/ui/badge.tsx","./src/components/ui/confirmdialog.tsx","./src/components/ui/scorecard.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/toast.tsx","./src/components/watchlist/addtickerform.tsx","./src/components/watchlist/watchlisttable.tsx","./src/hooks/useadmin.ts","./src/hooks/useauth.ts","./src/hooks/usefetchsymboldata.ts","./src/hooks/usescores.ts","./src/hooks/usetickerdetail.ts","./src/hooks/usetickers.ts","./src/hooks/usetrades.ts","./src/hooks/usewatchlist.ts","./src/lib/format.ts","./src/lib/ingestionstatus.ts","./src/lib/recommendation.ts","./src/lib/types.ts","./src/pages/adminpage.tsx","./src/pages/loginpage.tsx","./src/pages/rankingspage.tsx","./src/pages/registerpage.tsx","./src/pages/scannerpage.tsx","./src/pages/tickerdetailpage.tsx","./src/pages/watchlistpage.tsx","./src/stores/authstore.ts"],"version":"5.6.3"}

View File

@@ -0,0 +1,104 @@
from __future__ import annotations
from hypothesis import given, settings, strategies as st
from app.services.recommendation_service import direction_analyzer, probability_estimator
@settings(max_examples=100, deadline=None)
@given(
technical=st.floats(min_value=0, max_value=100),
momentum=st.floats(min_value=0, max_value=100),
fundamental=st.floats(min_value=0, max_value=100),
sentiment=st.sampled_from(["bearish", "neutral", "bullish", None]),
)
def test_property_confidence_bounds(technical, momentum, fundamental, sentiment):
"""Feature: intelligent-trade-recommendations, Property 3: Confidence Score Bounds."""
scores = {
"technical": technical,
"momentum": momentum,
"fundamental": fundamental,
}
long_conf = direction_analyzer.calculate_confidence("long", scores, sentiment, conflicts=[])
short_conf = direction_analyzer.calculate_confidence("short", scores, sentiment, conflicts=[])
assert 0 <= long_conf <= 100
assert 0 <= short_conf <= 100
@settings(max_examples=100, deadline=None)
@given(
strength_low=st.floats(min_value=0, max_value=50),
strength_high=st.floats(min_value=50, max_value=100),
)
def test_property_strength_monotonic_probability(strength_low, strength_high):
"""Feature: intelligent-trade-recommendations, Property 11: S/R Strength Monotonicity."""
config = {
"recommendation_signal_alignment_weight": 0.15,
"recommendation_sr_strength_weight": 0.20,
"recommendation_distance_penalty_factor": 0.10,
}
scores = {"technical": 65.0, "momentum": 65.0}
base_target = {
"classification": "Moderate",
"distance_atr_multiple": 3.0,
}
low = probability_estimator.estimate_probability(
{**base_target, "sr_strength": strength_low},
scores,
"bullish",
"long",
config,
)
high = probability_estimator.estimate_probability(
{**base_target, "sr_strength": strength_high},
scores,
"bullish",
"long",
config,
)
assert high >= low
@settings(max_examples=100, deadline=None)
@given(
near_distance=st.floats(min_value=1.0, max_value=3.0),
far_distance=st.floats(min_value=3.1, max_value=8.0),
)
def test_property_distance_probability_relationship(near_distance, far_distance):
"""Feature: intelligent-trade-recommendations, Property 12: Distance Probability Relationship."""
config = {
"recommendation_signal_alignment_weight": 0.15,
"recommendation_sr_strength_weight": 0.20,
"recommendation_distance_penalty_factor": 0.10,
}
scores = {"technical": 65.0, "momentum": 65.0}
near_prob = probability_estimator.estimate_probability(
{
"classification": "Conservative",
"sr_strength": 60,
"distance_atr_multiple": near_distance,
},
scores,
"bullish",
"long",
config,
)
far_prob = probability_estimator.estimate_probability(
{
"classification": "Aggressive",
"sr_strength": 60,
"distance_atr_multiple": far_distance,
},
scores,
"bullish",
"long",
config,
)
assert near_prob >= far_prob

View File

@@ -0,0 +1,72 @@
"""Unit tests for chained fundamentals provider fallback behavior."""
from __future__ import annotations
from datetime import datetime, timezone
import pytest
from app.exceptions import ProviderError
from app.providers.fundamentals_chain import ChainedFundamentalProvider
from app.providers.protocol import FundamentalData
class _FailProvider:
def __init__(self, message: str) -> None:
self._message = message
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
raise ProviderError(f"{self._message} ({ticker})")
class _DataProvider:
def __init__(self, data: FundamentalData) -> None:
self._data = data
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
return FundamentalData(
ticker=ticker,
pe_ratio=self._data.pe_ratio,
revenue_growth=self._data.revenue_growth,
earnings_surprise=self._data.earnings_surprise,
market_cap=self._data.market_cap,
fetched_at=self._data.fetched_at,
unavailable_fields=self._data.unavailable_fields,
)
@pytest.mark.asyncio
async def test_chained_provider_uses_fallback_provider_on_primary_failure():
fallback_data = FundamentalData(
ticker="AAPL",
pe_ratio=25.0,
revenue_growth=None,
earnings_surprise=None,
market_cap=1_000_000.0,
fetched_at=datetime.now(timezone.utc),
unavailable_fields={},
)
provider = ChainedFundamentalProvider([
("primary", _FailProvider("primary down")),
("fallback", _DataProvider(fallback_data)),
])
result = await provider.fetch_fundamentals("AAPL")
assert result.pe_ratio == 25.0
assert result.market_cap == 1_000_000.0
assert result.unavailable_fields.get("provider") == "fallback"
@pytest.mark.asyncio
async def test_chained_provider_raises_when_all_providers_fail():
provider = ChainedFundamentalProvider([
("p1", _FailProvider("p1 failed")),
("p2", _FailProvider("p2 failed")),
])
with pytest.raises(ProviderError) as exc:
await provider.fetch_fundamentals("MSFT")
assert "All fundamentals providers failed" in str(exc.value)

View File

@@ -8,6 +8,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from app.exceptions import ProviderError
from app.providers.openai_sentiment import OpenAISentimentProvider
@@ -160,3 +161,42 @@ class TestCitationsExtraction:
assert result.citations == []
assert result.reasoning == "Quiet day"
class TestBatchSentiment:
@pytest.mark.asyncio
async def test_batch_sentiment_parses_multiple_tickers(self, provider):
json_text = (
'[{"ticker":"AAPL","classification":"bullish","confidence":81,"reasoning":"Positive earnings"},'
'{"ticker":"MSFT","classification":"neutral","confidence":52,"reasoning":"Mixed guidance"}]'
)
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment_batch(["AAPL", "MSFT"])
assert set(result.keys()) == {"AAPL", "MSFT"}
assert result["AAPL"].classification == "bullish"
assert result["MSFT"].classification == "neutral"
@pytest.mark.asyncio
async def test_batch_sentiment_skips_invalid_rows(self, provider):
json_text = (
'[{"ticker":"AAPL","classification":"bullish","confidence":81,"reasoning":"Positive earnings"},'
'{"ticker":"TSLA","classification":"invalid","confidence":95,"reasoning":"Bad shape"}]'
)
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment_batch(["AAPL", "MSFT"])
assert set(result.keys()) == {"AAPL"}
@pytest.mark.asyncio
async def test_batch_sentiment_requires_array_json(self, provider):
json_text = '{"ticker":"AAPL","classification":"bullish","confidence":81,"reasoning":"Positive earnings"}'
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
with pytest.raises(ProviderError):
await provider.fetch_sentiment_batch(["AAPL", "MSFT"])

View File

@@ -0,0 +1,130 @@
from __future__ import annotations
from dataclasses import dataclass
from app.services.recommendation_service import (
direction_analyzer,
probability_estimator,
signal_conflict_detector,
target_generator,
)
@dataclass
class _SRLevelStub:
id: int
price_level: float
type: str
strength: int
def test_high_confidence_long_example():
dimension_scores = {
"technical": 75.0,
"momentum": 68.0,
"fundamental": 55.0,
}
confidence = direction_analyzer.calculate_confidence(
direction="long",
dimension_scores=dimension_scores,
sentiment_classification="bullish",
conflicts=[],
)
assert confidence > 70.0
def test_high_confidence_short_example():
dimension_scores = {
"technical": 30.0,
"momentum": 35.0,
"fundamental": 45.0,
}
confidence = direction_analyzer.calculate_confidence(
direction="short",
dimension_scores=dimension_scores,
sentiment_classification="bearish",
conflicts=[],
)
assert confidence > 70.0
def test_detects_sentiment_technical_conflict():
conflicts = signal_conflict_detector.detect_conflicts(
dimension_scores={"technical": 72.0, "momentum": 55.0, "fundamental": 50.0},
sentiment_classification="bearish",
)
assert any("sentiment-technical" in conflict for conflict in conflicts)
def test_generate_targets_respects_direction_and_order():
sr_levels = [
_SRLevelStub(id=1, price_level=110.0, type="resistance", strength=80),
_SRLevelStub(id=2, price_level=115.0, type="resistance", strength=70),
_SRLevelStub(id=3, price_level=120.0, type="resistance", strength=60),
_SRLevelStub(id=4, price_level=95.0, type="support", strength=75),
]
targets = target_generator.generate_targets(
direction="long",
entry_price=100.0,
stop_loss=96.0,
sr_levels=sr_levels, # type: ignore[arg-type]
atr_value=2.0,
)
assert len(targets) >= 1
assert all(target["price"] > 100.0 for target in targets)
distances = [target["distance_from_entry"] for target in targets]
assert distances == sorted(distances)
def test_probability_ranges_by_classification():
config = {
"recommendation_signal_alignment_weight": 0.15,
"recommendation_sr_strength_weight": 0.20,
"recommendation_distance_penalty_factor": 0.10,
}
dimension_scores = {"technical": 70.0, "momentum": 70.0}
conservative = probability_estimator.estimate_probability(
{
"classification": "Conservative",
"sr_strength": 80,
"distance_atr_multiple": 1.5,
},
dimension_scores,
"bullish",
"long",
config,
)
moderate = probability_estimator.estimate_probability(
{
"classification": "Moderate",
"sr_strength": 60,
"distance_atr_multiple": 3.0,
},
dimension_scores,
"bullish",
"long",
config,
)
aggressive = probability_estimator.estimate_probability(
{
"classification": "Aggressive",
"sr_strength": 40,
"distance_atr_multiple": 6.0,
},
dimension_scores,
"bullish",
"long",
config,
)
assert conservative > 60
assert 40 <= moderate <= 70
assert aggressive < 50

View File

@@ -228,23 +228,23 @@ async def test_scan_ticker_full_flow_quality_selection_and_persistence(
)
# -- Assert: database persistence --
# Old dummy setup should be gone, only the 2 new setups should exist
# History is preserved: old setup remains, 2 new setups are appended
db_result = await scan_session.execute(
select(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
persisted = list(db_result.scalars().all())
assert len(persisted) == 2, (
f"Expected 2 persisted setups (old deleted), got {len(persisted)}"
assert len(persisted) == 3, (
f"Expected 3 persisted setups (1 old + 2 new), got {len(persisted)}"
)
persisted_directions = sorted(s.direction for s in persisted)
assert persisted_directions == ["long", "short"], (
f"Expected ['long', 'short'] persisted, got {persisted_directions}"
assert persisted_directions == ["long", "long", "short"], (
f"Expected ['long', 'long', 'short'] persisted, got {persisted_directions}"
)
# Verify persisted records match returned setups
persisted_long = [s for s in persisted if s.direction == "long"][0]
persisted_short = [s for s in persisted if s.direction == "short"][0]
# Verify latest persisted records match returned setups
persisted_long = max((s for s in persisted if s.direction == "long"), key=lambda s: s.id)
persisted_short = max((s for s in persisted if s.direction == "short"), key=lambda s: s.id)
assert persisted_long.target == long_setup.target
assert persisted_long.rr_ratio == long_setup.rr_ratio

View File

@@ -68,7 +68,7 @@ class TestResumeTickers:
class TestConfigureScheduler:
def test_configure_adds_four_jobs(self):
def test_configure_adds_five_jobs(self):
# Remove any existing jobs first
scheduler.remove_all_jobs()
configure_scheduler()
@@ -79,6 +79,7 @@ class TestConfigureScheduler:
"sentiment_collector",
"fundamental_collector",
"rr_scanner",
"ticker_universe_sync",
}
def test_configure_is_idempotent(self):
@@ -92,4 +93,5 @@ class TestConfigureScheduler:
"fundamental_collector",
"rr_scanner",
"sentiment_collector",
"ticker_universe_sync",
])

View File

@@ -0,0 +1,123 @@
"""Unit tests for ticker_universe_service bootstrap logic."""
from __future__ import annotations
import json
from collections.abc import AsyncGenerator
import pytest
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from app.database import Base
from app.exceptions import ProviderError
from app.models.settings import SystemSetting
from app.models.ticker import Ticker
from app.services import ticker_universe_service
_engine = create_async_engine("sqlite+aiosqlite://", echo=False)
_session_factory = async_sessionmaker(_engine, class_=AsyncSession, expire_on_commit=False)
@pytest.fixture(autouse=True)
async def _setup_tables() -> AsyncGenerator[None, None]:
async with _engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
yield
async with _engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
@pytest.fixture
async def session() -> AsyncGenerator[AsyncSession, None]:
async with _session_factory() as s:
yield s
@pytest.mark.asyncio
async def test_bootstrap_universe_adds_missing_symbols(session: AsyncSession, monkeypatch: pytest.MonkeyPatch):
session.add(Ticker(symbol="AAPL"))
await session.commit()
async def _fake_fetch(_db: AsyncSession, _universe: str) -> list[str]:
return ["AAPL", "MSFT", "NVDA"]
monkeypatch.setattr(ticker_universe_service, "fetch_universe_symbols", _fake_fetch)
result = await ticker_universe_service.bootstrap_universe(session, "sp500")
assert result["added"] == 2
assert result["already_tracked"] == 1
assert result["deleted"] == 0
rows = await session.execute(select(Ticker.symbol).order_by(Ticker.symbol.asc()))
assert list(rows.scalars().all()) == ["AAPL", "MSFT", "NVDA"]
@pytest.mark.asyncio
async def test_bootstrap_universe_prunes_missing_symbols(session: AsyncSession, monkeypatch: pytest.MonkeyPatch):
session.add_all([Ticker(symbol="AAPL"), Ticker(symbol="MSFT"), Ticker(symbol="TSLA")])
await session.commit()
async def _fake_fetch(_db: AsyncSession, _universe: str) -> list[str]:
return ["AAPL", "MSFT"]
monkeypatch.setattr(ticker_universe_service, "fetch_universe_symbols", _fake_fetch)
result = await ticker_universe_service.bootstrap_universe(
session,
"sp500",
prune_missing=True,
)
assert result["added"] == 0
assert result["already_tracked"] == 2
assert result["deleted"] == 1
rows = await session.execute(select(Ticker.symbol).order_by(Ticker.symbol.asc()))
assert list(rows.scalars().all()) == ["AAPL", "MSFT"]
@pytest.mark.asyncio
async def test_fetch_universe_symbols_uses_cached_snapshot_when_live_sources_fail(
session: AsyncSession,
monkeypatch: pytest.MonkeyPatch,
):
session.add(
SystemSetting(
key="ticker_universe_cache_sp500",
value=json.dumps({"symbols": ["AAPL", "MSFT"], "source": "test"}),
)
)
await session.commit()
async def _fake_public(_universe: str):
return [], ["public failed"], None
async def _fake_fmp(_universe: str):
raise ProviderError("fmp failed")
monkeypatch.setattr(ticker_universe_service, "_fetch_universe_symbols_from_public", _fake_public)
monkeypatch.setattr(ticker_universe_service, "_fetch_universe_symbols_from_fmp", _fake_fmp)
symbols = await ticker_universe_service.fetch_universe_symbols(session, "sp500")
assert symbols == ["AAPL", "MSFT"]
@pytest.mark.asyncio
async def test_fetch_universe_symbols_uses_seed_when_live_and_cache_fail(
session: AsyncSession,
monkeypatch: pytest.MonkeyPatch,
):
async def _fake_public(_universe: str):
return [], ["public failed"], None
async def _fake_fmp(_universe: str):
raise ProviderError("fmp failed")
monkeypatch.setattr(ticker_universe_service, "_fetch_universe_symbols_from_public", _fake_public)
monkeypatch.setattr(ticker_universe_service, "_fetch_universe_symbols_from_fmp", _fake_fmp)
symbols = await ticker_universe_service.fetch_universe_symbols(session, "sp500")
assert "AAPL" in symbols
assert len(symbols) > 10