diff --git a/.env.example b/.env.example index e6cee4c..ccc4a0e 100644 --- a/.env.example +++ b/.env.example @@ -13,14 +13,27 @@ ALPACA_API_SECRET= GEMINI_API_KEY= GEMINI_MODEL=gemini-2.0-flash +# Sentiment Provider — OpenAI +OPENAI_API_KEY= +OPENAI_MODEL=gpt-4o-mini +OPENAI_SENTIMENT_BATCH_SIZE=5 + # Fundamentals Provider — Financial Modeling Prep FMP_API_KEY= +# Fundamentals Provider — Finnhub (optional fallback) +FINNHUB_API_KEY= + +# Fundamentals Provider — Alpha Vantage (optional fallback) +ALPHA_VANTAGE_API_KEY= + # Scheduled Jobs DATA_COLLECTOR_FREQUENCY=daily SENTIMENT_POLL_INTERVAL_MINUTES=30 FUNDAMENTAL_FETCH_FREQUENCY=daily RR_SCAN_FREQUENCY=daily +FUNDAMENTAL_RATE_LIMIT_RETRIES=3 +FUNDAMENTAL_RATE_LIMIT_BACKOFF_SECONDS=15 # Scoring Defaults DEFAULT_WATCHLIST_AUTO_SIZE=10 diff --git a/.gitea/workflows/deploy.yml b/.gitea/workflows/deploy.yml index ac51152..3d0c326 100644 --- a/.gitea/workflows/deploy.yml +++ b/.gitea/workflows/deploy.yml @@ -40,6 +40,9 @@ jobs: - uses: actions/setup-python@v5 with: python-version: "3.12" + - uses: actions/setup-node@v4 + with: + node-version: "20" - run: pip install -e ".[dev]" - run: alembic upgrade head env: @@ -47,6 +50,15 @@ jobs: - run: pytest --tb=short env: DATABASE_URL: postgresql+asyncpg://test_user:test_pass@localhost:5432/test_db + - run: | + cd frontend + npm ci + if node -e "require.resolve('vitest/package.json')" >/dev/null 2>&1; then + npm test + else + echo "vitest not configured; skipping frontend tests" + fi + npm run build deploy: needs: test @@ -65,4 +77,8 @@ jobs: source .venv/bin/activate pip install -e . alembic upgrade head + cd frontend + npm ci + npm run build + cd .. sudo systemctl restart stock-data-backend diff --git a/.kiro/hooks/code-quality-analyzer.kiro.hook b/.kiro/hooks/code-quality-analyzer.kiro.hook new file mode 100644 index 0000000..2888151 --- /dev/null +++ b/.kiro/hooks/code-quality-analyzer.kiro.hook @@ -0,0 +1,27 @@ +{ + "enabled": true, + "name": "Code Quality Analyzer", + "description": "Analyzes modified source code files for potential improvements including code smells, design patterns, best practices, readability, maintainability, and performance optimizations", + "version": "1", + "when": { + "type": "fileEdited", + "patterns": [ + "*.py", + "*.ts", + "*.tsx", + "*.js", + "*.jsx", + "*.java", + "*.go", + "*.rs", + "*.cpp", + "*.c", + "*.h", + "*.cs" + ] + }, + "then": { + "type": "askAgent", + "prompt": "Analyze the modified code for potential improvements. Check for: 1) Code smells and anti-patterns, 2) Opportunities to apply design patterns, 3) Best practices violations, 4) Readability improvements, 5) Maintainability concerns, 6) Performance optimization opportunities. Provide specific, actionable suggestions while ensuring functionality remains intact." + } +} \ No newline at end of file diff --git a/.kiro/hooks/update-docs-on-change.kiro.hook b/.kiro/hooks/update-docs-on-change.kiro.hook new file mode 100644 index 0000000..cbb4200 --- /dev/null +++ b/.kiro/hooks/update-docs-on-change.kiro.hook @@ -0,0 +1,19 @@ +{ + "enabled": true, + "name": "Update Docs on Code Change", + "description": "Monitors Python source files and prompts agent to update README.md or docs folder when code changes are saved", + "version": "1", + "when": { + "type": "fileEdited", + "patterns": [ + "*.py", + "requirements.txt", + "pyproject.toml", + "alembic.ini" + ] + }, + "then": { + "type": "askAgent", + "prompt": "A source file was just modified. Review the changes and update the documentation in README.md to reflect any new features, API changes, configuration updates, or important implementation details. Keep the documentation clear, accurate, and up-to-date." + } +} \ No newline at end of file diff --git a/.kiro/settings/mcp.json b/.kiro/settings/mcp.json index 991f52d..1f4957f 100644 --- a/.kiro/settings/mcp.json +++ b/.kiro/settings/mcp.json @@ -1,80 +1,5 @@ { "mcpServers": { - "context7": { - "gallery": true, - "command": "npx", - "args": [ - "-y", - "@upstash/context7-mcp@latest" - ], - "env": { - "HTTP_PROXY": "http://aproxy.corproot.net:8080", - "HTTPS_PROXY": "http://aproxy.corproot.net:8080" - }, - "type": "stdio" - }, - "aws.mcp": { - "command": "uvx", - "timeout": 100000, - "transport": "stdio", - "args": [ - "mcp-proxy-for-aws@latest", - "https://aws-mcp.us-east-1.api.aws/mcp" - ], - "env": { - "AWS_PROFILE": "409330224121_sc-ps-standard-admin", - "AWS_REGION": "eu-central-2", - "HTTP_PROXY": "http://aproxy.corproot.net:8080", - "HTTPS_PROXY": "http://aproxy.corproot.net:8080", - "SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem", - "REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem" - - }, - "disabled": false, - "autoApprove": [] - }, - "aws.eks.mcp": { - "command": "uvx", - "timeout": 100000, - "transport": "stdio", - "args": [ - "mcp-proxy-for-aws@latest", - "https://eks-mcp.eu-central-1.api.aws/mcp", - "--service", - "eks-mcp" - ], - "env": { - "AWS_PROFILE": "409330224121_sc-ps-standard-admin", - "AWS_REGION": "eu-central-2", - "HTTP_PROXY": "http://aproxy.corproot.net:8080", - "HTTPS_PROXY": "http://aproxy.corproot.net:8080", - "SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem", - "REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem" - }, - "disabled": false, - "autoApprove": [] - }, - "aws.ecs.mcp": { - "command": "uvx", - "timeout": 100000, - "transport": "stdio", - "args": [ - "mcp-proxy-for-aws@latest", - "https://ecs-mcp.us-east-1.api.aws/mcp", - "--service", - "ecs-mcp" - ], - "env": { - "AWS_PROFILE": "409330224121_sc-ps-standard-admin", - "AWS_REGION": "eu-central-2", - "HTTP_PROXY": "http://aproxy.corproot.net:8080", - "HTTPS_PROXY": "http://aproxy.corproot.net:8080", - "SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem", - "REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem" - }, - "disabled": false, - "autoApprove": [] - }, "iaws.support.agent": { "command": "uvx", "args": [ diff --git a/.kiro/specs/intelligent-trade-recommendations/.config.kiro b/.kiro/specs/intelligent-trade-recommendations/.config.kiro new file mode 100644 index 0000000..9f9ba88 --- /dev/null +++ b/.kiro/specs/intelligent-trade-recommendations/.config.kiro @@ -0,0 +1 @@ +{"specId": "71b6e4c6-56fa-4d43-b1ca-c4a89e8c8b5e", "workflowType": "requirements-first", "specType": "feature"} diff --git a/.kiro/specs/intelligent-trade-recommendations/design.md b/.kiro/specs/intelligent-trade-recommendations/design.md new file mode 100644 index 0000000..3e76603 --- /dev/null +++ b/.kiro/specs/intelligent-trade-recommendations/design.md @@ -0,0 +1,2079 @@ +# Design Document: Intelligent Trade Recommendation System + +## Overview + +The Intelligent Trade Recommendation System enhances the Signal Dashboard's trade setup generation by providing bidirectional analysis (LONG and SHORT), confidence scoring, multiple price targets with probability estimates, and signal conflict detection. This system transforms raw multi-dimensional signals into actionable trading recommendations suitable for non-professional traders. + +### Goals + +- Generate both LONG and SHORT trade setups for every ticker with independent confidence scores +- Identify 3-5 price targets at S/R levels with probability estimates for staged profit-taking +- Detect and flag contradictions between sentiment, technical, momentum, and fundamental signals +- Provide clear recommendation summaries with action, reasoning, and risk level +- Maintain performance targets: 500ms per ticker, 10 tickers/second batch processing + +### Non-Goals + +- Real-time trade execution or order management +- Backtesting or historical performance tracking (deferred to future phase) +- Machine learning-based prediction models +- Integration with external trading platforms + +### Key Design Decisions + +1. **Extend TradeSetup model** rather than create new tables to maintain backward compatibility +2. **Synchronous recommendation generation** during R:R scanner job (no separate scheduled job) +3. **Quality-score based target selection** combining R:R ratio, S/R strength, and proximity +4. **Rule-based confidence scoring** using dimension score thresholds and alignment checks +5. **JSON fields for flexible data** (targets array, conflict flags) to avoid complex schema changes + + + +## Architecture + +### System Context + +```mermaid +graph TB + subgraph "Existing System" + RR[R:R Scanner Service] + SCORE[Scoring Service] + SR[S/R Service] + IND[Indicator Service] + SENT[Sentiment Service] + FUND[Fundamental Service] + end + + subgraph "New Components" + REC[Recommendation Engine] + DIR[Direction Analyzer] + TGT[Target Generator] + PROB[Probability Estimator] + CONF[Signal Conflict Detector] + end + + subgraph "Data Layer" + TS[(TradeSetup Model)] + DS[(DimensionScore)] + SRL[(SRLevel)] + SENT_M[(SentimentScore)] + end + + RR --> REC + SCORE --> REC + SR --> REC + + REC --> DIR + REC --> TGT + REC --> PROB + REC --> CONF + + DIR --> TS + TGT --> TS + PROB --> TS + CONF --> TS + + DS --> DIR + DS --> CONF + SRL --> TGT + SRL --> PROB + SENT_M --> CONF +``` + +### Integration Strategy + +The recommendation system integrates into the existing R:R scanner workflow: + +1. **Trigger Point**: `rr_scanner_service.scan_ticker()` generates base LONG/SHORT setups +2. **Enhancement Phase**: New `recommendation_service.enhance_trade_setup()` enriches each setup +3. **Persistence**: Extended TradeSetup model stores all recommendation data +4. **API Layer**: Existing `/api/v1/trades` endpoints return enhanced data + +This approach ensures: +- Zero breaking changes to existing scanner logic +- Backward compatibility with current TradeSetup consumers +- Single transaction for setup generation and enhancement +- No additional scheduled jobs required + + + +## Components and Interfaces + +### Recommendation Engine (recommendation_service.py) + +**Responsibility**: Orchestrate the recommendation generation process for a trade setup. + +**Interface**: +```python +async def enhance_trade_setup( + db: AsyncSession, + ticker: Ticker, + setup: TradeSetup, + dimension_scores: dict[str, float], + sr_levels: list[SRLevel], + sentiment_classification: str | None, + atr_value: float, +) -> TradeSetup: + """Enhance a base trade setup with recommendation data. + + Args: + db: Database session + ticker: Ticker model instance + setup: Base TradeSetup with direction, entry, stop, target, rr_ratio + dimension_scores: Dict of dimension -> score (technical, sentiment, momentum, etc.) + sr_levels: All S/R levels for the ticker + sentiment_classification: Latest sentiment (bearish/neutral/bullish) + atr_value: Current ATR for volatility adjustment + + Returns: + Enhanced TradeSetup with confidence_score, targets, conflict_flags, etc. + """ +``` + +**Algorithm**: +1. Call `direction_analyzer.calculate_confidence()` to get confidence score +2. Call `target_generator.generate_targets()` to get 3-5 targets +3. Call `probability_estimator.estimate_probabilities()` for each target +4. Call `signal_conflict_detector.detect_conflicts()` to identify contradictions +5. Generate recommendation summary based on confidence and conflicts +6. Update setup model with all recommendation data +7. Return enhanced setup + +**Dependencies**: All four sub-components, SystemSetting for thresholds + + + +### Direction Analyzer + +**Responsibility**: Calculate confidence scores for LONG and SHORT directions based on signal alignment. + +**Interface**: +```python +def calculate_confidence( + direction: str, + dimension_scores: dict[str, float], + sentiment_classification: str | None, +) -> float: + """Calculate confidence score (0-100%) for a trade direction. + + Args: + direction: "long" or "short" + dimension_scores: Dict with keys: technical, sentiment, momentum, fundamental + sentiment_classification: "bearish", "neutral", "bullish", or None + + Returns: + Confidence score 0-100% + """ +``` + +**Algorithm**: +``` +Base confidence = 50.0 + +For LONG direction: + - If technical > 60: add 15 points + - If technical > 70: add additional 10 points + - If momentum > 60: add 15 points + - If sentiment is "bullish": add 15 points + - If fundamental > 60: add 10 points + +For SHORT direction: + - If technical < 40: add 15 points + - If technical < 30: add additional 10 points + - If momentum < 40: add 15 points + - If sentiment is "bearish": add 15 points + - If fundamental < 40: add 10 points + +Clamp result to [0, 100] +``` + +**Rationale**: Rule-based scoring provides transparency and predictability. Weights favor technical and momentum (15 points each) as they reflect price action, with sentiment and fundamentals as supporting factors. + + + +### Target Generator + +**Responsibility**: Identify 3-5 price targets at S/R levels with classification and R:R calculation. + +**Interface**: +```python +def generate_targets( + direction: str, + entry_price: float, + stop_loss: float, + sr_levels: list[SRLevel], + atr_value: float, +) -> list[dict]: + """Generate multiple price targets for a trade setup. + + Args: + direction: "long" or "short" + entry_price: Entry price for the trade + stop_loss: Stop loss price + sr_levels: All S/R levels for the ticker + atr_value: Current ATR value + + Returns: + List of target dicts with keys: + - price: Target price level + - distance_from_entry: Absolute distance + - distance_atr_multiple: Distance as multiple of ATR + - rr_ratio: Risk-reward ratio for this target + - classification: "Conservative", "Moderate", or "Aggressive" + - sr_level_id: ID of the S/R level used + - sr_strength: Strength score of the S/R level + """ +``` + +**Algorithm**: +``` +1. Filter S/R levels by direction: + - LONG: resistance levels above entry (type="resistance", price > entry) + - SHORT: support levels below entry (type="support", price < entry) + +2. Apply volatility filter: + - Exclude levels within 1x ATR of entry (too close) + - If ATR > 5% of price: include levels up to 10x ATR + - If ATR < 2% of price: limit to levels within 3x ATR + +3. Calculate quality score for each candidate: + quality = 0.35 * norm_rr + 0.35 * norm_strength + 0.30 * norm_proximity + where: + - norm_rr = min(rr_ratio / 10.0, 1.0) + - norm_strength = strength / 100.0 + - norm_proximity = 1.0 - min(distance / entry, 1.0) + +4. Sort candidates by quality score descending + +5. Select top 3-5 targets: + - Take top 5 if available + - Minimum 3 required (flag setup if fewer) + +6. Classify targets by distance: + - Conservative: nearest 1-2 targets + - Aggressive: furthest 1-2 targets + - Moderate: middle targets + +7. Calculate R:R ratio for each target: + risk = abs(entry_price - stop_loss) + reward = abs(target_price - entry_price) + rr_ratio = reward / risk +``` + +**Rationale**: Quality-based selection ensures targets balance multiple factors. ATR-based filtering adapts to volatility. Classification helps traders plan staged exits. + + + +### Probability Estimator + +**Responsibility**: Calculate probability (0-100%) of reaching each price target. + +**Interface**: +```python +def estimate_probability( + target: dict, + dimension_scores: dict[str, float], + sentiment_classification: str | None, + direction: str, + config: dict, +) -> float: + """Estimate probability of reaching a price target. + + Args: + target: Target dict from generate_targets() + dimension_scores: Current dimension scores + sentiment_classification: Latest sentiment + direction: "long" or "short" + config: Configuration dict with weights from SystemSetting + + Returns: + Probability percentage 0-100% + """ +``` + +**Algorithm**: +``` +Base probability calculation: + +1. Distance factor (40% weight): + - Conservative targets (nearest): base = 70% + - Moderate targets (middle): base = 55% + - Aggressive targets (furthest): base = 40% + +2. S/R strength factor (30% weight): + - strength >= 80: add 15% + - strength 60-79: add 10% + - strength 40-59: add 5% + - strength < 40: subtract 10% + +3. Signal alignment factor (20% weight): + - Check if signals support direction: + * LONG: technical > 60 AND (sentiment bullish OR momentum > 60) + * SHORT: technical < 40 AND (sentiment bearish OR momentum < 40) + - If aligned: add 15% + - If conflicted: subtract 15% + +4. Volatility factor (10% weight): + - If distance_atr_multiple > 5: add 5% (high volatility favors distant targets) + - If distance_atr_multiple < 2: add 5% (low volatility favors near targets) + +Final probability = base + strength_adj + alignment_adj + volatility_adj +Clamp to [10, 90] (never 0% or 100% to reflect uncertainty) +``` + +**Configuration Parameters** (stored in SystemSetting): +- `signal_alignment_weight`: Default 0.15 (15%) +- `sr_strength_weight`: Default 0.20 (20%) +- `distance_penalty_factor`: Default 0.10 (10%) + +**Rationale**: Multi-factor approach balances distance (primary), S/R quality (secondary), and signal confirmation (tertiary). Clamping to [10, 90] acknowledges market uncertainty. + + + +### Signal Conflict Detector + +**Responsibility**: Identify contradictions between sentiment, technical, momentum, and fundamental signals. + +**Interface**: +```python +def detect_conflicts( + dimension_scores: dict[str, float], + sentiment_classification: str | None, +) -> list[str]: + """Detect signal conflicts across dimensions. + + Args: + dimension_scores: Dict with technical, sentiment, momentum, fundamental scores + sentiment_classification: "bearish", "neutral", "bullish", or None + + Returns: + List of conflict descriptions, e.g.: + - "sentiment-technical: Bearish sentiment conflicts with bullish technical (72)" + - "momentum-technical: Momentum (35) diverges from technical (68) by 33 points" + """ +``` + +**Algorithm**: +``` +Conflicts detected: + +1. Sentiment-Technical conflict: + - If sentiment="bearish" AND technical > 60: flag conflict + - If sentiment="bullish" AND technical < 40: flag conflict + - Message: "sentiment-technical: {sentiment} sentiment conflicts with {direction} technical ({score})" + +2. Momentum-Technical divergence: + - If abs(momentum - technical) > 30: flag conflict + - Message: "momentum-technical: Momentum ({momentum}) diverges from technical ({technical}) by {diff} points" + +3. Sentiment-Momentum conflict: + - If sentiment="bearish" AND momentum > 60: flag conflict + - If sentiment="bullish" AND momentum < 40: flag conflict + - Message: "sentiment-momentum: {sentiment} sentiment conflicts with momentum ({score})" + +4. Fundamental-Technical divergence (informational only): + - If abs(fundamental - technical) > 40: flag as "weak conflict" + - Message: "fundamental-technical: Fundamental ({fund}) diverges significantly from technical ({tech})" + +Return list of all detected conflicts +``` + +**Impact on Confidence**: +- Each conflict reduces confidence by 15-25%: + - Sentiment-Technical: -20% + - Momentum-Technical: -15% + - Sentiment-Momentum: -20% + - Fundamental-Technical: -10% (weaker impact) +- Applied in `direction_analyzer.calculate_confidence()` after base calculation + +**Rationale**: Conflicts indicate uncertainty and increase risk. Sentiment-technical conflicts are most serious as they represent narrative vs. price action divergence. + + + +## Data Models + +### Extended TradeSetup Model + +**New Fields**: +```python +class TradeSetup(Base): + __tablename__ = "trade_setups" + + # Existing fields (unchanged) + id: Mapped[int] = mapped_column(primary_key=True) + ticker_id: Mapped[int] = mapped_column(ForeignKey("tickers.id", ondelete="CASCADE")) + direction: Mapped[str] = mapped_column(String(10), nullable=False) + entry_price: Mapped[float] = mapped_column(Float, nullable=False) + stop_loss: Mapped[float] = mapped_column(Float, nullable=False) + target: Mapped[float] = mapped_column(Float, nullable=False) # Primary target + rr_ratio: Mapped[float] = mapped_column(Float, nullable=False) # Primary R:R + composite_score: Mapped[float] = mapped_column(Float, nullable=False) + detected_at: Mapped[datetime] = mapped_column(DateTime(timezone=True)) + + # NEW: Recommendation fields + confidence_score: Mapped[float | None] = mapped_column(Float, nullable=True) + targets_json: Mapped[str | None] = mapped_column(Text, nullable=True) + conflict_flags_json: Mapped[str | None] = mapped_column(Text, nullable=True) + recommended_action: Mapped[str | None] = mapped_column(String(20), nullable=True) + reasoning: Mapped[str | None] = mapped_column(Text, nullable=True) + risk_level: Mapped[str | None] = mapped_column(String(10), nullable=True) +``` + +**Field Descriptions**: + +- `confidence_score`: Float 0-100, confidence in this direction +- `targets_json`: JSON array of target objects (see schema below) +- `conflict_flags_json`: JSON array of conflict strings +- `recommended_action`: Enum-like string: "LONG_HIGH", "LONG_MODERATE", "SHORT_HIGH", "SHORT_MODERATE", "NEUTRAL" +- `reasoning`: Human-readable explanation of recommendation +- `risk_level`: "Low", "Medium", or "High" based on conflicts + +**Targets JSON Schema**: +```json +[ + { + "price": 150.25, + "distance_from_entry": 5.25, + "distance_atr_multiple": 2.5, + "rr_ratio": 3.5, + "probability": 65.0, + "classification": "Conservative", + "sr_level_id": 42, + "sr_strength": 75 + }, + ... +] +``` + +**Conflict Flags JSON Schema**: +```json +[ + "sentiment-technical: Bearish sentiment conflicts with bullish technical (72)", + "momentum-technical: Momentum (35) diverges from technical (68) by 33 points" +] +``` + +**Backward Compatibility**: +- All new fields are nullable +- Existing `target` and `rr_ratio` fields remain as primary target data +- Old consumers can ignore new fields +- New consumers use `targets_json` for full target list + + + +### SystemSetting Extensions + +**New Configuration Keys**: + +```python +# Recommendation thresholds +"recommendation_high_confidence_threshold": 70.0 # % for "High Confidence" +"recommendation_moderate_confidence_threshold": 50.0 # % for "Moderate Confidence" +"recommendation_confidence_diff_threshold": 20.0 # % difference for directional recommendation + +# Probability calculation weights +"recommendation_signal_alignment_weight": 0.15 # 15% +"recommendation_sr_strength_weight": 0.20 # 20% +"recommendation_distance_penalty_factor": 0.10 # 10% + +# Conflict detection thresholds +"recommendation_momentum_technical_divergence_threshold": 30.0 # points +"recommendation_fundamental_technical_divergence_threshold": 40.0 # points +``` + +**Access Pattern**: +```python +async def get_recommendation_config(db: AsyncSession) -> dict: + """Load all recommendation configuration from SystemSetting.""" + # Query all keys starting with "recommendation_" + # Return dict with defaults for missing keys +``` + +### No New Tables Required + +The design intentionally avoids new tables to minimize schema complexity: +- TradeSetup extensions handle all recommendation data +- JSON fields provide flexibility for evolving data structures +- SystemSetting stores configuration +- Existing relationships (Ticker → TradeSetup) remain unchanged + + + +## Algorithm Design + +### Confidence Scoring Formula + +**Detailed Implementation**: + +```python +def calculate_confidence( + direction: str, + dimension_scores: dict[str, float], + sentiment_classification: str | None, + conflicts: list[str], +) -> float: + """Calculate confidence score with conflict penalties.""" + + base = 50.0 + technical = dimension_scores.get("technical", 50.0) + momentum = dimension_scores.get("momentum", 50.0) + fundamental = dimension_scores.get("fundamental", 50.0) + + if direction == "long": + # Technical contribution + if technical > 70: + base += 25.0 + elif technical > 60: + base += 15.0 + + # Momentum contribution + if momentum > 70: + base += 20.0 + elif momentum > 60: + base += 15.0 + + # Sentiment contribution + if sentiment_classification == "bullish": + base += 15.0 + elif sentiment_classification == "neutral": + base += 5.0 + + # Fundamental contribution + if fundamental > 60: + base += 10.0 + + elif direction == "short": + # Technical contribution + if technical < 30: + base += 25.0 + elif technical < 40: + base += 15.0 + + # Momentum contribution + if momentum < 30: + base += 20.0 + elif momentum < 40: + base += 15.0 + + # Sentiment contribution + if sentiment_classification == "bearish": + base += 15.0 + elif sentiment_classification == "neutral": + base += 5.0 + + # Fundamental contribution + if fundamental < 40: + base += 10.0 + + # Apply conflict penalties + for conflict in conflicts: + if "sentiment-technical" in conflict: + base -= 20.0 + elif "momentum-technical" in conflict: + base -= 15.0 + elif "sentiment-momentum" in conflict: + base -= 20.0 + elif "fundamental-technical" in conflict: + base -= 10.0 + + return max(0.0, min(100.0, base)) +``` + +**Scoring Breakdown**: +- Base: 50 points (neutral starting point) +- Technical: up to 25 points (most important - reflects price action) +- Momentum: up to 20 points (confirms trend strength) +- Sentiment: up to 15 points (narrative support) +- Fundamental: up to 10 points (value support) +- Maximum possible: 120 points before conflicts +- Conflicts: -10 to -20 points each + + + +### Probability Calculation Formula + +**Detailed Implementation**: + +```python +def estimate_probability( + target: dict, + dimension_scores: dict[str, float], + sentiment_classification: str | None, + direction: str, + config: dict, +) -> float: + """Estimate probability of reaching a price target.""" + + # 1. Base probability from classification (40% weight) + classification = target["classification"] + if classification == "Conservative": + base_prob = 70.0 + elif classification == "Moderate": + base_prob = 55.0 + else: # Aggressive + base_prob = 40.0 + + # 2. S/R strength adjustment (30% weight) + strength = target["sr_strength"] + if strength >= 80: + strength_adj = 15.0 + elif strength >= 60: + strength_adj = 10.0 + elif strength >= 40: + strength_adj = 5.0 + else: + strength_adj = -10.0 + + # 3. Signal alignment adjustment (20% weight) + technical = dimension_scores.get("technical", 50.0) + momentum = dimension_scores.get("momentum", 50.0) + + alignment_adj = 0.0 + if direction == "long": + if technical > 60 and (sentiment_classification == "bullish" or momentum > 60): + alignment_adj = 15.0 + elif technical < 40 or (sentiment_classification == "bearish" and momentum < 40): + alignment_adj = -15.0 + elif direction == "short": + if technical < 40 and (sentiment_classification == "bearish" or momentum < 40): + alignment_adj = 15.0 + elif technical > 60 or (sentiment_classification == "bullish" and momentum > 60): + alignment_adj = -15.0 + + # 4. Volatility adjustment (10% weight) + atr_multiple = target["distance_atr_multiple"] + volatility_adj = 0.0 + if atr_multiple > 5: + volatility_adj = 5.0 # High volatility favors distant targets + elif atr_multiple < 2: + volatility_adj = 5.0 # Low volatility favors near targets + + # Combine all factors + probability = base_prob + strength_adj + alignment_adj + volatility_adj + + # Clamp to [10, 90] to reflect uncertainty + return max(10.0, min(90.0, probability)) +``` + +**Probability Ranges by Classification**: +- Conservative: 60-90% (typically 70-85%) +- Moderate: 40-70% (typically 50-65%) +- Aggressive: 10-50% (typically 30-45%) + + + +### Signal Alignment Logic + +**Implementation**: + +```python +def check_signal_alignment( + direction: str, + dimension_scores: dict[str, float], + sentiment_classification: str | None, +) -> tuple[bool, str]: + """Check if signals align with the trade direction. + + Returns: + (is_aligned, description) + """ + technical = dimension_scores.get("technical", 50.0) + momentum = dimension_scores.get("momentum", 50.0) + + if direction == "long": + tech_bullish = technical > 60 + momentum_bullish = momentum > 60 + sentiment_bullish = sentiment_classification == "bullish" + + # Need at least 2 of 3 signals aligned + aligned_count = sum([tech_bullish, momentum_bullish, sentiment_bullish]) + + if aligned_count >= 2: + return True, f"Signals aligned for LONG: technical={technical:.0f}, momentum={momentum:.0f}, sentiment={sentiment_classification}" + else: + return False, f"Mixed signals for LONG: technical={technical:.0f}, momentum={momentum:.0f}, sentiment={sentiment_classification}" + + elif direction == "short": + tech_bearish = technical < 40 + momentum_bearish = momentum < 40 + sentiment_bearish = sentiment_classification == "bearish" + + # Need at least 2 of 3 signals aligned + aligned_count = sum([tech_bearish, momentum_bearish, sentiment_bearish]) + + if aligned_count >= 2: + return True, f"Signals aligned for SHORT: technical={technical:.0f}, momentum={momentum:.0f}, sentiment={sentiment_classification}" + else: + return False, f"Mixed signals for SHORT: technical={technical:.0f}, momentum={momentum:.0f}, sentiment={sentiment_classification}" + + return False, "Unknown direction" +``` + +**Alignment Criteria**: +- LONG: At least 2 of [technical > 60, momentum > 60, sentiment bullish] +- SHORT: At least 2 of [technical < 40, momentum < 40, sentiment bearish] +- Fundamental score is informational but not required for alignment + + + +## API Design + +### Enhanced Trade Setup Endpoints + +**GET /api/v1/trades** + +Returns all trade setups with recommendation data. + +**Query Parameters**: +- `direction`: Optional filter ("long" or "short") +- `min_confidence`: Optional minimum confidence score (0-100) +- `recommended_action`: Optional filter ("LONG_HIGH", "LONG_MODERATE", "SHORT_HIGH", "SHORT_MODERATE", "NEUTRAL") + +**Response Schema**: +```json +{ + "status": "success", + "data": [ + { + "id": 1, + "symbol": "AAPL", + "direction": "long", + "entry_price": 145.00, + "stop_loss": 142.50, + "target": 150.00, + "rr_ratio": 2.0, + "composite_score": 75.5, + "detected_at": "2024-01-15T10:30:00Z", + + "confidence_score": 72.5, + "recommended_action": "LONG_HIGH", + "reasoning": "Strong technical (75) and bullish sentiment align with upward momentum (68). No major conflicts detected.", + "risk_level": "Low", + + "targets": [ + { + "price": 147.50, + "distance_from_entry": 2.50, + "distance_atr_multiple": 1.5, + "rr_ratio": 1.0, + "probability": 75.0, + "classification": "Conservative", + "sr_level_id": 42, + "sr_strength": 80 + }, + { + "price": 150.00, + "distance_from_entry": 5.00, + "distance_atr_multiple": 3.0, + "rr_ratio": 2.0, + "probability": 60.0, + "classification": "Moderate", + "sr_level_id": 43, + "sr_strength": 70 + }, + { + "price": 155.00, + "distance_from_entry": 10.00, + "distance_atr_multiple": 6.0, + "rr_ratio": 4.0, + "probability": 35.0, + "classification": "Aggressive", + "sr_level_id": 44, + "sr_strength": 60 + } + ], + + "conflict_flags": [] + } + ] +} +``` + +**GET /api/v1/trades/{symbol}** + +Returns trade setups for a specific ticker (both LONG and SHORT if available). + +**Response**: Same schema as above, filtered by symbol. + + + +### Admin Configuration Endpoints + +**GET /api/v1/admin/settings/recommendations** + +Get current recommendation configuration. + +**Response**: +```json +{ + "status": "success", + "data": { + "high_confidence_threshold": 70.0, + "moderate_confidence_threshold": 50.0, + "confidence_diff_threshold": 20.0, + "signal_alignment_weight": 0.15, + "sr_strength_weight": 0.20, + "distance_penalty_factor": 0.10, + "momentum_technical_divergence_threshold": 30.0, + "fundamental_technical_divergence_threshold": 40.0 + } +} +``` + +**PUT /api/v1/admin/settings/recommendations** + +Update recommendation configuration. + +**Request Body**: +```json +{ + "high_confidence_threshold": 75.0, + "signal_alignment_weight": 0.20 +} +``` + +**Response**: Updated configuration object. + +**Validation**: +- All thresholds must be 0-100 +- All weights must be 0-1 +- Returns 400 error for invalid values + + + +## Frontend Components + +### Ticker Detail Page Enhancement + +**Location**: `frontend/src/components/ticker/RecommendationPanel.tsx` + +**Component Structure**: +```tsx +interface RecommendationPanelProps { + symbol: string; + longSetup?: TradeSetup; + shortSetup?: TradeSetup; +} + +export function RecommendationPanel({ symbol, longSetup, shortSetup }: RecommendationPanelProps) { + // Display recommendation summary at top + // Show LONG and SHORT setups side-by-side + // Highlight recommended direction + // Display targets table for each direction + // Show conflict warnings if present +} +``` + +**Visual Design**: +- Recommendation summary card at top with large action text and confidence badge +- Two-column layout: LONG setup on left, SHORT setup on right +- Recommended direction has green border and subtle glow +- Non-recommended direction has muted opacity +- Risk level badge: green (Low), yellow (Medium), red (High) +- Targets table with sortable columns +- Conflict warnings in amber alert box + +**Data Flow**: +```tsx +// In TickerDetailPage.tsx +const { data: tradeSetups } = useTradeSetups(symbol); + +const longSetup = tradeSetups?.find(s => s.direction === 'long'); +const shortSetup = tradeSetups?.find(s => s.direction === 'short'); + + +``` + + + +### Scanner Page Enhancement + +**Location**: `frontend/src/components/scanner/TradeTable.tsx` + +**New Columns**: +- Recommended Action (with badge) +- Confidence Score (with progress bar) +- Best Target (highest probability target) +- Risk Level (with color-coded badge) + +**Filtering Controls**: +```tsx +interface ScannerFilters { + direction?: 'long' | 'short'; + minConfidence?: number; + recommendedAction?: 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL'; + riskLevel?: 'Low' | 'Medium' | 'High'; +} +``` + +**Table Enhancement**: +```tsx + + + + Symbol + Recommended Action + Confidence + Entry + Stop + Best Target + R:R + Risk Level + Composite + + + + {setups.map(setup => ( + navigate(`/ticker/${setup.symbol}`)} + className="cursor-pointer hover:bg-white/5" + > + {setup.symbol} + + + + + + + {/* ... other cells ... */} + + ))} + +
+``` + +**Sorting**: +- Default: Confidence score descending +- Secondary: R:R ratio descending +- Allow sorting by any column + + + +### Admin Settings Page Enhancement + +**Location**: `frontend/src/components/admin/RecommendationSettings.tsx` + +**Form Fields**: +```tsx +
+
+

Confidence Thresholds

+ + + +
+ +
+

Probability Calculation Weights

+ + + +
+ +
+

Conflict Detection Thresholds

+ + +
+ + + +
+``` + +**Validation**: +- Client-side validation before submission +- Toast notification on success/error +- Confirmation dialog for reset to defaults + + + +## Database Schema Changes + +### Migration Strategy + +**Alembic Migration**: `alembic revision -m "add_recommendation_fields_to_trade_setup"` + +**Migration Script**: +```python +"""add_recommendation_fields_to_trade_setup + +Revision ID: abc123def456 +Revises: previous_revision +Create Date: 2024-01-15 10:00:00.000000 +""" +from alembic import op +import sqlalchemy as sa + +def upgrade(): + # Add new columns to trade_setups table + op.add_column('trade_setups', + sa.Column('confidence_score', sa.Float(), nullable=True)) + op.add_column('trade_setups', + sa.Column('targets_json', sa.Text(), nullable=True)) + op.add_column('trade_setups', + sa.Column('conflict_flags_json', sa.Text(), nullable=True)) + op.add_column('trade_setups', + sa.Column('recommended_action', sa.String(20), nullable=True)) + op.add_column('trade_setups', + sa.Column('reasoning', sa.Text(), nullable=True)) + op.add_column('trade_setups', + sa.Column('risk_level', sa.String(10), nullable=True)) + +def downgrade(): + # Remove columns if rolling back + op.drop_column('trade_setups', 'risk_level') + op.drop_column('trade_setups', 'reasoning') + op.drop_column('trade_setups', 'recommended_action') + op.drop_column('trade_setups', 'conflict_flags_json') + op.drop_column('trade_setups', 'targets_json') + op.drop_column('trade_setups', 'confidence_score') +``` + +**Deployment Steps**: +1. Run migration: `alembic upgrade head` +2. Deploy new backend code with recommendation_service +3. Trigger R:R scanner job to populate recommendation data +4. Deploy frontend with new components +5. Verify data in admin panel + +**Rollback Plan**: +- New fields are nullable, so old code continues to work +- If issues arise, run `alembic downgrade -1` +- Frontend gracefully handles missing recommendation fields + + + +## Integration Points + +### Integration with R:R Scanner Service + +**Modified `rr_scanner_service.scan_ticker()`**: + +```python +async def scan_ticker( + db: AsyncSession, + symbol: str, + rr_threshold: float = 1.5, + atr_multiplier: float = 1.5, +) -> list[TradeSetup]: + """Scan a single ticker for trade setups with recommendations.""" + + # ... existing code to generate base setups ... + + # NEW: Fetch data needed for recommendations + dimension_scores = await _get_dimension_scores(db, ticker.id) + sentiment_classification = await _get_latest_sentiment(db, ticker.id) + + # NEW: Enhance each setup with recommendations + from app.services.recommendation_service import enhance_trade_setup + + enhanced_setups = [] + for setup in setups: + enhanced = await enhance_trade_setup( + db=db, + ticker=ticker, + setup=setup, + dimension_scores=dimension_scores, + sr_levels=sr_levels, + sentiment_classification=sentiment_classification, + atr_value=atr_value, + ) + enhanced_setups.append(enhanced) + + # Delete old setups and persist enhanced ones + await db.execute( + delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id) + ) + for setup in enhanced_setups: + db.add(setup) + + await db.commit() + + for s in enhanced_setups: + await db.refresh(s) + + return enhanced_setups +``` + +**Helper Functions**: +```python +async def _get_dimension_scores(db: AsyncSession, ticker_id: int) -> dict[str, float]: + """Fetch all dimension scores for a ticker.""" + result = await db.execute( + select(DimensionScore).where(DimensionScore.ticker_id == ticker_id) + ) + scores = {ds.dimension: ds.score for ds in result.scalars().all()} + return scores + +async def _get_latest_sentiment(db: AsyncSession, ticker_id: int) -> str | None: + """Fetch the most recent sentiment classification.""" + result = await db.execute( + select(SentimentScore) + .where(SentimentScore.ticker_id == ticker_id) + .order_by(SentimentScore.timestamp.desc()) + .limit(1) + ) + sentiment = result.scalar_one_or_none() + return sentiment.classification if sentiment else None +``` + + + +### Integration with Scoring Service + +**Data Dependencies**: +- `DimensionScore` table: technical, sentiment, momentum, fundamental scores +- Accessed via `_get_dimension_scores()` helper +- No modifications to scoring_service required + +**Staleness Handling**: +- Recommendation generation uses current dimension scores +- If dimension scores are stale, recommendations reflect that uncertainty +- Consider triggering score recomputation before R:R scan in scheduler + +### Integration with S/R Service + +**Data Dependencies**: +- `SRLevel` table: price_level, type, strength, detection_method +- Already fetched in `rr_scanner_service.scan_ticker()` +- No modifications to sr_service required + +**Usage**: +- Target generation filters S/R levels by type and direction +- Probability estimation uses strength scores +- Quality scoring combines strength with R:R and proximity + +### Integration with Sentiment Service + +**Data Dependencies**: +- `SentimentScore` table: classification, confidence, timestamp +- Accessed via `_get_latest_sentiment()` helper +- No modifications to sentiment_service required + +**Usage**: +- Conflict detection compares sentiment with technical/momentum +- Confidence scoring adds/subtracts points based on sentiment alignment +- Probability estimation adjusts for sentiment support + +### Integration with Indicator Service + +**Data Dependencies**: +- ATR calculation already performed in rr_scanner_service +- No additional calls needed + +**Usage**: +- Target generation uses ATR for volatility filtering +- Probability estimation uses distance_atr_multiple + + + +## Correctness Properties + +A property is a characteristic or behavior that should hold true across all valid executions of a system—essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees. + +### Property Reflection + +After analyzing all acceptance criteria, I identified several areas of redundancy: + +**Redundancy Group 1: Confidence Score Range Validation** +- Properties 2.1 and 2.2 both test that confidence scores are 0-100% +- **Resolution**: Combine into single property testing both LONG and SHORT + +**Redundancy Group 2: Target Count and Direction** +- Properties 3.1 and 3.2 both test target count (3-5) and direction relationship +- **Resolution**: Combine into single property covering both directions + +**Redundancy Group 3: Probability Range by Classification** +- Properties 4.6, 4.7, 4.8 all test probability ranges for different classifications +- **Resolution**: Combine into single property testing all classifications + +**Redundancy Group 4: Schema Validation** +- Properties 15.1-15.6 all test individual field existence +- **Resolution**: Combine into single property testing complete schema + +**Redundancy Group 5: API Response Schema** +- Properties 7.2-7.6 all test individual response fields +- **Resolution**: Combine into single property testing complete response schema + +**Redundancy Group 6: S/R Strength Impact** +- Properties 8.2 and 8.3 both test strength score impact on probability +- **Resolution**: Combine into single monotonicity property + +After reflection, 60+ criteria reduce to 35 unique properties. + + + +### Property 1: Bidirectional Setup Generation + +For any ticker with sufficient OHLCV data and S/R levels, the recommendation engine shall generate exactly two trade setups: one LONG and one SHORT, each with distinct direction fields. + +**Validates: Requirements 1.1, 1.5** + +### Property 2: Direction-Appropriate S/R Level Usage + +For any LONG setup, all targets shall be resistance levels above entry price. For any SHORT setup, all targets shall be support levels below entry price. + +**Validates: Requirements 1.3, 1.4** + +### Property 3: Confidence Score Bounds + +For any trade setup (LONG or SHORT), the confidence score shall be within the range [0, 100]. + +**Validates: Requirements 2.1, 2.2** + +### Property 4: Conflict Impact on Confidence + +For any trade setup, when signal conflicts are detected, the confidence score shall be reduced compared to the same setup without conflicts. + +**Validates: Requirements 2.5, 5.7** + +### Property 5: Confidence Persistence + +For any generated trade setup, the confidence_score field shall be populated in the TradeSetup model. + +**Validates: Requirements 2.6** + +### Property 6: Target Count and Direction + +For any trade setup, the targets array shall contain 3 to 5 targets, all positioned in the correct direction relative to entry (above for LONG, below for SHORT). + +**Validates: Requirements 3.1, 3.2, 7.3** + +### Property 7: Target Classification Correctness + +For any targets array, targets shall be classified such that Conservative targets are nearest to entry, Aggressive targets are furthest, and Moderate targets are in between, based on distance ordering. + +**Validates: Requirements 3.3** + +### Property 8: R:R Ratio Calculation + +For any target in a trade setup, the R:R ratio shall equal (abs(target_price - entry_price)) / (abs(entry_price - stop_loss)). + +**Validates: Requirements 3.4** + +### Property 9: Target Distance Ordering + +For any targets array, targets shall be ordered by increasing distance from entry price. + +**Validates: Requirements 3.6** + +### Property 10: Probability Bounds + +For any target, the probability percentage shall be within the range [10, 90] to reflect market uncertainty. + +**Validates: Requirements 4.1** + +### Property 11: S/R Strength Monotonicity + +For any two targets at the same distance with different S/R strength scores, the target with higher strength shall have equal or higher probability. + +**Validates: Requirements 4.2, 8.2, 8.3** + +### Property 12: Distance Probability Relationship + +For any two targets with the same S/R strength, the target closer to entry shall have higher probability than the target further from entry. + +**Validates: Requirements 4.3** + +### Property 13: Signal Alignment Impact + +For any target, when signals are aligned with the trade direction, the probability shall be higher than when signals are not aligned, all other factors being equal. + +**Validates: Requirements 4.4** + +### Property 14: Probability Classification Ranges + +For any Conservative target, probability shall be above 60%. For any Moderate target, probability shall be between 40% and 70%. For any Aggressive target, probability shall be below 50%. + +**Validates: Requirements 4.6, 4.7, 4.8** + +### Property 15: Sentiment-Technical Conflict Detection + +For any ticker, when sentiment is bearish and technical score is above 60, OR when sentiment is bullish and technical score is below 40, a sentiment-technical conflict shall be flagged. + +**Validates: Requirements 5.1, 5.2, 5.3** + +### Property 16: Momentum-Technical Divergence Detection + +For any ticker, when the absolute difference between momentum score and technical score exceeds 30 points, a momentum-technical conflict shall be flagged. + +**Validates: Requirements 5.4, 5.5** + +### Property 17: Conflict Persistence + +For any trade setup with detected conflicts, the conflict_flags array shall contain descriptions of all detected conflicts. + +**Validates: Requirements 5.6** + +### Property 18: Recommended Action Validity + +For any trade setup, the recommended_action field shall be one of: "LONG_HIGH", "LONG_MODERATE", "SHORT_HIGH", "SHORT_MODERATE", or "NEUTRAL". + +**Validates: Requirements 6.1** + +### Property 19: Reasoning Presence + +For any trade setup, the reasoning field shall be populated with non-empty text explaining the recommendation. + +**Validates: Requirements 6.5** + +### Property 20: Risk Level Validity + +For any trade setup, the risk_level field shall be one of: "Low", "Medium", or "High". + +**Validates: Requirements 6.6** + +### Property 21: Composite Score Inclusion + +For any trade setup, the composite_score field shall be populated with the ticker's composite score. + +**Validates: Requirements 6.7** + +### Property 22: API Bidirectional Response + +For any ticker with trade setups, the API endpoint shall return both LONG and SHORT setups. + +**Validates: Requirements 7.1** + +### Property 23: API Response Schema Completeness + +For any trade setup returned by the API, the response shall include: confidence_score, targets array, conflict_flags array, recommended_action, reasoning, risk_level, and composite_score fields. + +**Validates: Requirements 7.2, 7.3, 7.5, 7.6** + +### Property 24: API Target Object Schema + +For any target object in the API response, it shall include: price, distance_from_entry, rr_ratio, probability, and classification fields. + +**Validates: Requirements 7.4** + +### Property 25: API Response Ordering + +For any API response containing multiple setups, setups shall be ordered by confidence score in descending order. + +**Validates: Requirements 7.7** + +### Property 26: S/R Strength Retrieval + +For any target generated, the S/R strength score shall be correctly retrieved from the SRLevel model and included in the target object. + +**Validates: Requirements 8.1** + +### Property 27: Strength Score Normalization + +For any S/R strength score used in probability calculation, it shall be normalized to the range [0, 1] before application. + +**Validates: Requirements 8.4** + +### Property 28: ATR Retrieval + +For any ticker being analyzed, the current ATR value shall be retrieved and used in target generation. + +**Validates: Requirements 9.1** + +### Property 29: High Volatility Target Inclusion + +For any ticker where ATR exceeds 5% of current price, the target generator shall include S/R levels up to 10x ATR distance as valid targets. + +**Validates: Requirements 9.2** + +### Property 30: Low Volatility Target Restriction + +For any ticker where ATR is below 2% of current price, the target generator shall limit targets to S/R levels within 3x ATR distance. + +**Validates: Requirements 9.3** + +### Property 31: ATR Multiple Calculation + +For any target, the distance_atr_multiple field shall equal (abs(target_price - entry_price)) / ATR. + +**Validates: Requirements 9.4** + +### Property 32: Minimum Distance Filter + +For any generated targets, no target shall be closer than 1x ATR from the entry price. + +**Validates: Requirements 9.5** + +### Property 33: Timestamp Presence + +For any generated trade setup, the detected_at field shall be populated with a timestamp. + +**Validates: Requirements 10.1** + +### Property 34: Single Ticker Performance + +For any single ticker recommendation generation, the operation shall complete within 500 milliseconds. + +**Validates: Requirements 14.1** + +### Property 35: Batch Processing Resilience + +For any batch of tickers, if recommendation generation fails for one ticker, the engine shall continue processing remaining tickers without stopping. + +**Validates: Requirements 14.5** + +### Property 36: Dimension Score Query Efficiency + +For any ticker recommendation generation, all required dimension scores shall be retrieved in a single database query. + +**Validates: Requirements 14.3** + +### Property 37: TradeSetup Model Schema + +The TradeSetup model shall include all required fields: confidence_score (Float), targets_json (Text), conflict_flags_json (Text), recommended_action (String), reasoning (Text), and risk_level (String). + +**Validates: Requirements 15.1, 15.2, 15.3, 15.4, 15.5, 15.6** + +### Property 38: Backward Compatibility + +For any trade setup, the existing fields (entry_price, stop_loss, target, rr_ratio) shall remain populated with the primary target data for backward compatibility. + +**Validates: Requirements 15.7** + + + +## Error Handling + +### Service-Level Error Handling + +**recommendation_service.py**: +```python +async def enhance_trade_setup(...) -> TradeSetup: + """Enhance trade setup with error handling.""" + try: + # Calculate confidence + confidence = direction_analyzer.calculate_confidence(...) + + # Generate targets + targets = target_generator.generate_targets(...) + + # Estimate probabilities + for target in targets: + target["probability"] = probability_estimator.estimate_probability(...) + + # Detect conflicts + conflicts = signal_conflict_detector.detect_conflicts(...) + + # Generate recommendation summary + recommendation = _generate_recommendation_summary(...) + + # Update setup model + setup.confidence_score = confidence + setup.targets_json = json.dumps(targets) + setup.conflict_flags_json = json.dumps(conflicts) + setup.recommended_action = recommendation["action"] + setup.reasoning = recommendation["reasoning"] + setup.risk_level = recommendation["risk_level"] + + return setup + + except Exception as e: + logger.exception(f"Error enhancing trade setup for {ticker.symbol}: {e}") + # Return setup with minimal recommendation data + setup.confidence_score = None + setup.reasoning = f"Recommendation generation failed: {str(e)}" + setup.risk_level = "High" + return setup +``` + +**Graceful Degradation**: +- If recommendation enhancement fails, return base setup without recommendation data +- Log error for debugging but don't fail the entire scan +- Set risk_level to "High" to warn users of incomplete analysis + + + +### API-Level Error Handling + +**Trade Setup Endpoints**: +```python +@router.get("/trades") +async def get_trade_setups( + direction: str | None = None, + min_confidence: float | None = None, + recommended_action: str | None = None, + db: AsyncSession = Depends(get_db), +): + """Get trade setups with validation.""" + try: + # Validate parameters + if min_confidence is not None and not (0 <= min_confidence <= 100): + raise ValidationError("min_confidence must be between 0 and 100") + + if recommended_action is not None: + valid_actions = ["LONG_HIGH", "LONG_MODERATE", "SHORT_HIGH", "SHORT_MODERATE", "NEUTRAL"] + if recommended_action not in valid_actions: + raise ValidationError(f"recommended_action must be one of: {', '.join(valid_actions)}") + + # Fetch and filter setups + setups = await rr_scanner_service.get_trade_setups(db, direction) + + # Apply filters + if min_confidence is not None: + setups = [s for s in setups if s.get("confidence_score", 0) >= min_confidence] + + if recommended_action is not None: + setups = [s for s in setups if s.get("recommended_action") == recommended_action] + + return {"status": "success", "data": setups} + + except ValidationError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.exception(f"Error fetching trade setups: {e}") + raise HTTPException(status_code=500, detail="Internal server error") +``` + +**Admin Configuration Endpoints**: +```python +@router.put("/admin/settings/recommendations") +async def update_recommendation_config( + config: RecommendationConfigUpdate, + db: AsyncSession = Depends(get_db), + _: User = Depends(require_admin), +): + """Update recommendation configuration with validation.""" + try: + # Validate thresholds (0-100) + for key in ["high_confidence_threshold", "moderate_confidence_threshold", "confidence_diff_threshold"]: + if hasattr(config, key): + value = getattr(config, key) + if value is not None and not (0 <= value <= 100): + raise ValidationError(f"{key} must be between 0 and 100") + + # Validate weights (0-1) + for key in ["signal_alignment_weight", "sr_strength_weight", "distance_penalty_factor"]: + if hasattr(config, key): + value = getattr(config, key) + if value is not None and not (0 <= value <= 1): + raise ValidationError(f"{key} must be between 0 and 1") + + # Update settings + updated = await settings_service.update_recommendation_config(db, config.dict(exclude_unset=True)) + + return {"status": "success", "data": updated} + + except ValidationError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.exception(f"Error updating recommendation config: {e}") + raise HTTPException(status_code=500, detail="Internal server error") +``` + +### Data Validation + +**JSON Field Validation**: +- Validate targets_json structure before parsing +- Handle malformed JSON gracefully +- Provide default empty arrays for missing data + +**Null Handling**: +- All new TradeSetup fields are nullable +- Frontend checks for null before rendering +- API returns null fields explicitly (not omitted) + + + +## Testing Strategy + +### Dual Testing Approach + +This feature requires both unit tests and property-based tests for comprehensive coverage: + +**Unit Tests**: Verify specific examples, edge cases, and integration points +**Property Tests**: Verify universal properties across all inputs using randomization + +Together, these approaches ensure both concrete correctness (unit tests) and general correctness (property tests). + +### Property-Based Testing + +**Framework**: Hypothesis (Python property-based testing library) + +**Configuration**: Each property test shall run minimum 100 iterations to ensure comprehensive input coverage. + +**Test Organization**: `tests/property/test_recommendation_properties.py` + +**Example Property Test**: +```python +from hypothesis import given, strategies as st +import pytest + +@given( + technical=st.floats(min_value=0, max_value=100), + momentum=st.floats(min_value=0, max_value=100), + sentiment=st.sampled_from(["bearish", "neutral", "bullish", None]), +) +@pytest.mark.property +def test_confidence_score_bounds(technical, momentum, sentiment): + """Feature: intelligent-trade-recommendations, Property 3: Confidence Score Bounds + + For any trade setup (LONG or SHORT), the confidence score shall be + within the range [0, 100]. + """ + from app.services.recommendation_service import direction_analyzer + + dimension_scores = { + "technical": technical, + "momentum": momentum, + "fundamental": 50.0, + } + + # Test LONG direction + long_confidence = direction_analyzer.calculate_confidence( + direction="long", + dimension_scores=dimension_scores, + sentiment_classification=sentiment, + conflicts=[], + ) + assert 0 <= long_confidence <= 100, f"LONG confidence {long_confidence} out of bounds" + + # Test SHORT direction + short_confidence = direction_analyzer.calculate_confidence( + direction="short", + dimension_scores=dimension_scores, + sentiment_classification=sentiment, + conflicts=[], + ) + assert 0 <= short_confidence <= 100, f"SHORT confidence {short_confidence} out of bounds" +``` + +**Property Test Tags**: Each test includes a comment with format: +```python +"""Feature: intelligent-trade-recommendations, Property {N}: {Property Title} + +{Property description from design document} +""" +``` + + + +### Unit Testing + +**Test Organization**: `tests/unit/test_recommendation_service.py` + +**Unit Test Focus**: +- Specific examples from requirements (e.g., bullish sentiment + high technical = high LONG confidence) +- Edge cases (e.g., fewer than 3 S/R levels available) +- Integration points (e.g., R:R scanner calling recommendation service) +- Error conditions (e.g., missing dimension scores, malformed data) + +**Example Unit Tests**: + +```python +import pytest +from app.services.recommendation_service import direction_analyzer + +def test_high_confidence_long_example(): + """Feature: intelligent-trade-recommendations, Requirement 2.3 + + WHEN sentiment is bullish AND technical score is above 60 AND momentum + score is above 60, THE Direction_Analyzer SHALL assign LONG confidence + above 70%. + """ + dimension_scores = { + "technical": 75.0, + "momentum": 68.0, + "fundamental": 50.0, + } + + confidence = direction_analyzer.calculate_confidence( + direction="long", + dimension_scores=dimension_scores, + sentiment_classification="bullish", + conflicts=[], + ) + + assert confidence > 70.0, f"Expected LONG confidence > 70%, got {confidence}" + + +def test_high_confidence_short_example(): + """Feature: intelligent-trade-recommendations, Requirement 2.4 + + WHEN sentiment is bearish AND technical score is below 40 AND momentum + score is below 40, THE Direction_Analyzer SHALL assign SHORT confidence + above 70%. + """ + dimension_scores = { + "technical": 32.0, + "momentum": 35.0, + "fundamental": 50.0, + } + + confidence = direction_analyzer.calculate_confidence( + direction="short", + dimension_scores=dimension_scores, + sentiment_classification="bearish", + conflicts=[], + ) + + assert confidence > 70.0, f"Expected SHORT confidence > 70%, got {confidence}" + + +def test_limited_targets_edge_case(): + """Feature: intelligent-trade-recommendations, Requirement 3.5 + + WHEN fewer than 3 S/R levels exist in the target direction, THE + Target_Generator SHALL use the available levels and flag the setup + as having limited targets. + """ + from app.services.recommendation_service import target_generator + + # Only 2 resistance levels available + sr_levels = [ + SRLevel(price_level=150.0, type="resistance", strength=70), + SRLevel(price_level=155.0, type="resistance", strength=65), + ] + + targets = target_generator.generate_targets( + direction="long", + entry_price=145.0, + stop_loss=142.0, + sr_levels=sr_levels, + atr_value=2.0, + ) + + assert len(targets) == 2, f"Expected 2 targets, got {len(targets)}" + # Check for limited targets flag in reasoning or metadata +``` + +**Test Fixtures** (`tests/conftest.py`): +```python +@pytest.fixture +def sample_dimension_scores(): + """Sample dimension scores for testing.""" + return { + "technical": 65.0, + "sr_quality": 70.0, + "sentiment": 60.0, + "fundamental": 55.0, + "momentum": 62.0, + } + +@pytest.fixture +def sample_sr_levels(): + """Sample S/R levels for testing.""" + return [ + SRLevel(id=1, price_level=140.0, type="support", strength=75), + SRLevel(id=2, price_level=145.0, type="support", strength=80), + SRLevel(id=3, price_level=155.0, type="resistance", strength=70), + SRLevel(id=4, price_level=160.0, type="resistance", strength=65), + SRLevel(id=5, price_level=165.0, type="resistance", strength=60), + ] +``` + + + +### Frontend Testing + +**Framework**: Vitest with React Testing Library + +**Test Organization**: `frontend/src/components/**/*.test.tsx` + +**Component Tests**: + +```typescript +import { describe, it, expect } from 'vitest'; +import { render, screen } from '@testing-library/react'; +import { RecommendationPanel } from './RecommendationPanel'; + +describe('RecommendationPanel', () => { + it('displays LONG and SHORT setups side-by-side', () => { + const longSetup = { + direction: 'long', + confidence_score: 75.0, + recommended_action: 'LONG_HIGH', + // ... other fields + }; + + const shortSetup = { + direction: 'short', + confidence_score: 45.0, + recommended_action: 'SHORT_MODERATE', + // ... other fields + }; + + render( + + ); + + expect(screen.getByText(/LONG/i)).toBeInTheDocument(); + expect(screen.getByText(/SHORT/i)).toBeInTheDocument(); + expect(screen.getByText('75.0')).toBeInTheDocument(); + expect(screen.getByText('45.0')).toBeInTheDocument(); + }); + + it('highlights recommended direction with visual emphasis', () => { + const longSetup = { + direction: 'long', + confidence_score: 75.0, + recommended_action: 'LONG_HIGH', + // ... other fields + }; + + const shortSetup = { + direction: 'short', + confidence_score: 45.0, + recommended_action: 'SHORT_MODERATE', + // ... other fields + }; + + const { container } = render( + + ); + + // LONG should have recommended styling + const longCard = container.querySelector('[data-direction="long"]'); + expect(longCard).toHaveClass('border-green-500'); + + // SHORT should have muted styling + const shortCard = container.querySelector('[data-direction="short"]'); + expect(shortCard).toHaveClass('opacity-60'); + }); + + it('displays conflict warnings when present', () => { + const setupWithConflicts = { + direction: 'long', + confidence_score: 55.0, + conflict_flags: [ + 'sentiment-technical: Bearish sentiment conflicts with bullish technical (72)', + ], + // ... other fields + }; + + render( + + ); + + expect(screen.getByText(/Bearish sentiment conflicts/i)).toBeInTheDocument(); + }); +}); +``` + +### Integration Testing + +**End-to-End Flow Test**: +```python +@pytest.mark.asyncio +async def test_recommendation_generation_e2e(db_session, sample_ticker): + """Test complete recommendation generation flow.""" + from app.services.rr_scanner_service import scan_ticker + + # Setup: Ensure ticker has all required data + # - OHLCV records + # - Dimension scores + # - S/R levels + # - Sentiment scores + + # Execute: Run scanner with recommendation enhancement + setups = await scan_ticker( + db=db_session, + symbol=sample_ticker.symbol, + rr_threshold=1.5, + atr_multiplier=1.5, + ) + + # Verify: Both LONG and SHORT setups generated + assert len(setups) == 2 + long_setup = next(s for s in setups if s.direction == "long") + short_setup = next(s for s in setups if s.direction == "short") + + # Verify: Recommendation fields populated + assert long_setup.confidence_score is not None + assert long_setup.targets_json is not None + assert long_setup.recommended_action is not None + assert long_setup.reasoning is not None + assert long_setup.risk_level is not None + + # Verify: Targets structure + targets = json.loads(long_setup.targets_json) + assert 3 <= len(targets) <= 5 + for target in targets: + assert "price" in target + assert "probability" in target + assert "classification" in target +``` + +### Performance Testing + +**Benchmark Tests**: +```python +import time +import pytest + +@pytest.mark.benchmark +@pytest.mark.asyncio +async def test_single_ticker_performance(db_session, sample_ticker): + """Feature: intelligent-trade-recommendations, Property 34 + + For any single ticker recommendation generation, the operation shall + complete within 500 milliseconds. + """ + from app.services.rr_scanner_service import scan_ticker + + start = time.time() + await scan_ticker(db=db_session, symbol=sample_ticker.symbol) + elapsed = (time.time() - start) * 1000 # Convert to ms + + assert elapsed < 500, f"Recommendation generation took {elapsed}ms, expected < 500ms" + + +@pytest.mark.benchmark +@pytest.mark.asyncio +async def test_batch_processing_throughput(db_session, sample_tickers): + """Feature: intelligent-trade-recommendations, Property 14.2 + + WHEN the scheduled job generates recommendations for all tickers, THE + Trade_Recommendation_Engine SHALL process at least 10 tickers per second. + """ + from app.services.rr_scanner_service import scan_all_tickers + + start = time.time() + await scan_all_tickers(db=db_session) + elapsed = time.time() - start + + throughput = len(sample_tickers) / elapsed + assert throughput >= 10, f"Throughput {throughput:.2f} tickers/sec, expected >= 10" +``` + +### Test Coverage Goals + +- **Unit Tests**: 80%+ code coverage for recommendation_service +- **Property Tests**: 100% coverage of all 38 correctness properties +- **Integration Tests**: Complete E2E flow from scanner to API response +- **Frontend Tests**: 70%+ coverage for recommendation components +- **Performance Tests**: Verify both single-ticker and batch performance targets + + + +## Implementation Roadmap + +### Phase 1: Backend Core (Week 1) +1. Create Alembic migration for TradeSetup model extensions +2. Implement direction_analyzer module with confidence calculation +3. Implement signal_conflict_detector module +4. Write unit tests for confidence scoring and conflict detection + +### Phase 2: Target Generation (Week 1-2) +1. Implement target_generator module with quality scoring +2. Implement probability_estimator module +3. Add volatility-based filtering logic +4. Write unit tests and property tests for target generation + +### Phase 3: Integration (Week 2) +1. Create recommendation_service orchestrator +2. Integrate with rr_scanner_service +3. Add SystemSetting configuration support +4. Write integration tests for complete flow + +### Phase 4: API Layer (Week 2-3) +1. Extend trade setup endpoints with filtering +2. Create admin configuration endpoints +3. Update Pydantic schemas for responses +4. Write API tests + +### Phase 5: Frontend (Week 3) +1. Create RecommendationPanel component +2. Enhance Scanner table with new columns +3. Add RecommendationSettings admin page +4. Write component tests + +### Phase 6: Testing & Optimization (Week 3-4) +1. Implement all 38 property-based tests +2. Run performance benchmarks +3. Optimize database queries if needed +4. Complete integration testing + +### Phase 7: Deployment (Week 4) +1. Run migration on staging database +2. Deploy backend to staging +3. Deploy frontend to staging +4. User acceptance testing +5. Production deployment + +## Success Metrics + +- **Correctness**: All 38 properties pass with 100 iterations +- **Performance**: Single ticker < 500ms, batch >= 10 tickers/sec +- **Coverage**: 80%+ backend unit test coverage, 70%+ frontend coverage +- **User Adoption**: 80%+ of users interact with recommendation features within first month +- **Accuracy**: Track recommendation outcomes for future validation (Phase 2 feature) + diff --git a/.kiro/specs/intelligent-trade-recommendations/requirements.md b/.kiro/specs/intelligent-trade-recommendations/requirements.md new file mode 100644 index 0000000..4fd9811 --- /dev/null +++ b/.kiro/specs/intelligent-trade-recommendations/requirements.md @@ -0,0 +1,222 @@ +# Requirements Document + +## Introduction + +The Intelligent Trade Recommendation System enhances the Signal Dashboard platform by providing clear, actionable trading recommendations with confidence scoring, multiple price targets, and probability estimates. The system addresses critical gaps in the current trade setup generation: contradictory signal detection, single-target limitations, and lack of directional guidance for non-professional traders. + +The system analyzes multi-dimensional signals (sentiment, technical, momentum, S/R positioning) to recommend both LONG and SHORT directions with confidence scores, identifies multiple S/R-based price targets with probability estimates, and detects signal conflicts to prevent contradictory recommendations. + +## Glossary + +- **Trade_Recommendation_Engine**: The core system component that analyzes signals and generates directional recommendations with confidence scores +- **Direction_Analyzer**: Component that evaluates LONG vs SHORT direction based on signal alignment +- **Target_Generator**: Component that identifies multiple S/R levels as price targets +- **Probability_Estimator**: Component that calculates likelihood of reaching each target +- **Signal_Conflict_Detector**: Component that identifies contradictions between sentiment, technical, and momentum signals +- **Recommendation_Summary**: User-facing output containing recommended action, confidence, reasoning, and risk level +- **S/R_Level**: Support/Resistance level with strength score and price +- **Signal_Alignment**: Degree of agreement between sentiment, technical, momentum, and fundamental dimensions +- **Confidence_Score**: Percentage (0-100%) indicating likelihood of success for a directional recommendation +- **Target_Probability**: Percentage likelihood of price reaching a specific target level +- **ATR**: Average True Range, volatility measure used for stop-loss calculation +- **R:R_Ratio**: Risk-to-Reward ratio comparing potential profit to potential loss +- **Composite_Score**: Weighted aggregate score (0-100) from all dimensions +- **Dimension_Score**: Individual score for technical, sr_quality, sentiment, fundamental, or momentum dimension + +## Requirements + +### Requirement 1: Bidirectional Trade Setup Generation + +**User Story:** As a trader, I want to see both LONG and SHORT trade setups for each ticker, so that I can evaluate opportunities in both directions regardless of market conditions. + +#### Acceptance Criteria + +1. WHEN the Trade_Recommendation_Engine analyzes a ticker, THE Trade_Recommendation_Engine SHALL generate both a LONG setup and a SHORT setup +2. THE Trade_Recommendation_Engine SHALL calculate separate entry prices, stop losses, and targets for each direction +3. WHEN generating a LONG setup, THE Trade_Recommendation_Engine SHALL use resistance levels as targets and support levels for stop-loss calculation +4. WHEN generating a SHORT setup, THE Trade_Recommendation_Engine SHALL use support levels as targets and resistance levels for stop-loss calculation +5. THE Trade_Recommendation_Engine SHALL store both setups in the TradeSetup model with distinct direction fields + +### Requirement 2: Direction Confidence Scoring + +**User Story:** As a non-professional trader, I want to see confidence scores for LONG vs SHORT directions, so that I can understand which direction has higher probability of success. + +#### Acceptance Criteria + +1. THE Direction_Analyzer SHALL calculate a confidence score (0-100%) for the LONG direction +2. THE Direction_Analyzer SHALL calculate a confidence score (0-100%) for the SHORT direction +3. WHEN sentiment is bullish AND technical score is above 60 AND momentum score is above 60, THE Direction_Analyzer SHALL assign LONG confidence above 70% +4. WHEN sentiment is bearish AND technical score is below 40 AND momentum score is below 40, THE Direction_Analyzer SHALL assign SHORT confidence above 70% +5. WHEN signal dimensions contradict each other, THE Direction_Analyzer SHALL reduce confidence scores for both directions below 60% +6. THE Direction_Analyzer SHALL store confidence scores in the TradeSetup model for each direction + +### Requirement 3: Multiple Price Target Identification + +**User Story:** As a trader, I want multiple price targets at different S/R levels, so that I can implement staged profit-taking and proper risk management. + +#### Acceptance Criteria + +1. WHEN generating targets for a LONG setup, THE Target_Generator SHALL identify 3 to 5 resistance levels above the entry price +2. WHEN generating targets for a SHORT setup, THE Target_Generator SHALL identify 3 to 5 support levels below the entry price +3. THE Target_Generator SHALL classify targets as Conservative (nearest), Moderate (mid-range), or Aggressive (furthest) +4. THE Target_Generator SHALL calculate the R:R ratio for each target level +5. WHEN fewer than 3 S/R levels exist in the target direction, THE Target_Generator SHALL use the available levels and flag the setup as having limited targets +6. THE Target_Generator SHALL order targets by distance from entry price + +### Requirement 4: Target Probability Estimation + +**User Story:** As a trader, I want to know the probability of reaching each price target, so that I can set realistic expectations and plan my exits. + +#### Acceptance Criteria + +1. THE Probability_Estimator SHALL calculate a target probability percentage (0-100%) for each price target +2. WHEN calculating probability, THE Probability_Estimator SHALL consider S/R level strength score (higher strength increases probability) +3. WHEN calculating probability, THE Probability_Estimator SHALL consider distance from entry (closer targets receive higher probability) +4. WHEN calculating probability, THE Probability_Estimator SHALL consider signal alignment (aligned signals increase probability by 10-20%) +5. WHEN calculating probability, THE Probability_Estimator SHALL consider ATR (higher volatility increases probability for distant targets) +6. THE Probability_Estimator SHALL assign Conservative targets probability above 60% +7. THE Probability_Estimator SHALL assign Moderate targets probability between 40% and 70% +8. THE Probability_Estimator SHALL assign Aggressive targets probability below 50% + +### Requirement 5: Signal Conflict Detection + +**User Story:** As a trader, I want to be warned when signals contradict each other, so that I can avoid high-risk trades with mixed indicators. + +#### Acceptance Criteria + +1. THE Signal_Conflict_Detector SHALL compare sentiment classification (bearish/neutral/bullish) with technical score direction +2. WHEN sentiment is bearish AND technical score is above 60, THE Signal_Conflict_Detector SHALL flag a sentiment-technical conflict +3. WHEN sentiment is bullish AND technical score is below 40, THE Signal_Conflict_Detector SHALL flag a sentiment-technical conflict +4. THE Signal_Conflict_Detector SHALL compare momentum score with technical score +5. WHEN momentum score and technical score differ by more than 30 points, THE Signal_Conflict_Detector SHALL flag a momentum-technical conflict +6. THE Signal_Conflict_Detector SHALL store conflict flags in the TradeSetup model +7. WHEN conflicts are detected, THE Signal_Conflict_Detector SHALL reduce confidence scores by 15-25% + +### Requirement 6: Recommendation Summary Generation + +**User Story:** As a non-professional trader, I want a clear recommendation summary with action, confidence, and reasoning, so that I can make informed trading decisions without analyzing raw data. + +#### Acceptance Criteria + +1. THE Recommendation_Summary SHALL include a recommended action field with values: "LONG (High Confidence)", "LONG (Moderate Confidence)", "SHORT (High Confidence)", "SHORT (Moderate Confidence)", or "NEUTRAL (Conflicting Signals)" +2. WHEN LONG confidence is above 70% AND LONG confidence exceeds SHORT confidence by 20%, THE Recommendation_Summary SHALL recommend "LONG (High Confidence)" +3. WHEN SHORT confidence is above 70% AND SHORT confidence exceeds LONG confidence by 20%, THE Recommendation_Summary SHALL recommend "SHORT (High Confidence)" +4. WHEN confidence scores differ by less than 20%, THE Recommendation_Summary SHALL recommend "NEUTRAL (Conflicting Signals)" +5. THE Recommendation_Summary SHALL include reasoning text explaining the recommendation based on signal alignment +6. THE Recommendation_Summary SHALL include a risk level assessment: Low (all signals aligned), Medium (minor conflicts), or High (major conflicts) +7. THE Recommendation_Summary SHALL display the composite score alongside the recommendation + +### Requirement 7: Trade Setup API Enhancement + +**User Story:** As a frontend developer, I want the trade setup API to return enhanced recommendation data, so that I can display confidence scores, multiple targets, and probabilities in the UI. + +#### Acceptance Criteria + +1. WHEN the trade setup API endpoint is called for a ticker, THE API SHALL return both LONG and SHORT setups +2. THE API SHALL include confidence_score field for each setup +3. THE API SHALL include a targets array with 3-5 target objects for each setup +4. WHEN returning target objects, THE API SHALL include price, distance_from_entry, rr_ratio, probability, and classification (Conservative/Moderate/Aggressive) fields +5. THE API SHALL include conflict_flags array listing detected signal conflicts +6. THE API SHALL include recommendation_summary object with action, reasoning, and risk_level fields +7. THE API SHALL return setups ordered by confidence score (highest first) + +### Requirement 8: Historical S/R Strength Integration + +**User Story:** As a system, I want to use historical S/R level strength in probability calculations, so that targets at stronger levels receive higher probability estimates. + +#### Acceptance Criteria + +1. THE Probability_Estimator SHALL retrieve the strength score from the SRLevel model for each target +2. WHEN an S/R level has strength score above 80, THE Probability_Estimator SHALL increase target probability by 10-15% +3. WHEN an S/R level has strength score below 40, THE Probability_Estimator SHALL decrease target probability by 10-15% +4. THE Probability_Estimator SHALL normalize strength scores to a 0-1 scale before applying to probability calculation +5. WHEN an S/R level has been tested multiple times historically, THE Probability_Estimator SHALL increase its weight in probability calculation + +### Requirement 9: Volatility-Adjusted Target Selection + +**User Story:** As a trader, I want target selection to account for volatility, so that targets are realistic given the ticker's typical price movement. + +#### Acceptance Criteria + +1. THE Target_Generator SHALL retrieve the current ATR value for the ticker +2. WHEN ATR is high (above 5% of current price), THE Target_Generator SHALL include more distant S/R levels as valid targets +3. WHEN ATR is low (below 2% of current price), THE Target_Generator SHALL limit targets to S/R levels within 3x ATR distance +4. THE Target_Generator SHALL calculate target distance as a multiple of ATR +5. THE Target_Generator SHALL exclude S/R levels that are less than 1x ATR from entry price + +### Requirement 10: Recommendation Persistence and History + +**User Story:** As a trader, I want to track how recommendations change over time, so that I can evaluate the system's accuracy and learn from past recommendations. + +#### Acceptance Criteria + +1. THE Trade_Recommendation_Engine SHALL store each generated recommendation with a timestamp +2. THE Trade_Recommendation_Engine SHALL preserve previous recommendations when generating new ones +3. THE API SHALL provide an endpoint to retrieve recommendation history for a ticker +4. WHEN retrieving history, THE API SHALL return recommendations ordered by timestamp (newest first) +5. THE API SHALL include actual_outcome field indicating whether targets were reached (to be updated post-trade) + +### Requirement 11: Frontend Recommendation Display + +**User Story:** As a trader, I want to see recommendations clearly displayed in the ticker detail page, so that I can quickly understand the suggested action and targets. + +#### Acceptance Criteria + +1. THE Ticker_Detail_Page SHALL display the recommendation summary prominently at the top +2. THE Ticker_Detail_Page SHALL show LONG and SHORT setups side-by-side with confidence scores +3. THE Ticker_Detail_Page SHALL display targets in a table with columns: Classification, Price, Distance, R:R, Probability +4. WHEN signal conflicts exist, THE Ticker_Detail_Page SHALL display a warning badge with conflict details +5. THE Ticker_Detail_Page SHALL highlight the recommended direction with visual emphasis (border, background color) +6. THE Ticker_Detail_Page SHALL display risk level with color coding: green (Low), yellow (Medium), red (High) + +### Requirement 12: Scanner Integration with Recommendations + +**User Story:** As a trader, I want the scanner to show recommended direction and confidence, so that I can quickly filter for high-confidence opportunities. + +#### Acceptance Criteria + +1. THE Scanner_Page SHALL display a "Recommended Action" column showing the recommended direction and confidence level +2. THE Scanner_Page SHALL allow filtering by recommended action (LONG High, LONG Moderate, SHORT High, SHORT Moderate, NEUTRAL) +3. THE Scanner_Page SHALL allow filtering by minimum confidence score +4. THE Scanner_Page SHALL display the highest-probability target for each setup in the table +5. WHEN a user clicks a setup row, THE Scanner_Page SHALL navigate to the ticker detail page with the recommendation expanded + +### Requirement 13: Admin Configuration for Recommendation Thresholds + +**User Story:** As an admin, I want to configure confidence score thresholds and probability calculation weights, so that I can tune the recommendation system based on market conditions. + +#### Acceptance Criteria + +1. THE Admin_Settings_Page SHALL provide inputs for high confidence threshold (default: 70%) +2. THE Admin_Settings_Page SHALL provide inputs for moderate confidence threshold (default: 50%) +3. THE Admin_Settings_Page SHALL provide inputs for signal alignment weight in probability calculation (default: 15%) +4. THE Admin_Settings_Page SHALL provide inputs for S/R strength weight in probability calculation (default: 20%) +5. THE Admin_Settings_Page SHALL provide inputs for distance penalty factor in probability calculation (default: 0.1) +6. WHEN admin saves settings, THE Settings_Service SHALL update the configuration in the Settings model +7. THE Trade_Recommendation_Engine SHALL retrieve current thresholds from Settings before generating recommendations + +### Requirement 14: Recommendation Calculation Performance + +**User Story:** As a system, I want recommendation generation to complete within acceptable time limits, so that users receive timely updates without delays. + +#### Acceptance Criteria + +1. WHEN generating recommendations for a single ticker, THE Trade_Recommendation_Engine SHALL complete within 500 milliseconds +2. WHEN the scheduled job generates recommendations for all tickers, THE Trade_Recommendation_Engine SHALL process at least 10 tickers per second +3. THE Trade_Recommendation_Engine SHALL use database query optimization to retrieve all required dimension scores in a single query +4. THE Trade_Recommendation_Engine SHALL cache S/R levels for each ticker to avoid repeated database queries +5. WHEN recommendation generation fails for a ticker, THE Trade_Recommendation_Engine SHALL log the error and continue processing remaining tickers + +### Requirement 15: Recommendation Data Model Extension + +**User Story:** As a developer, I want the TradeSetup model extended to store recommendation data, so that all recommendation information persists in the database. + +#### Acceptance Criteria + +1. THE TradeSetup model SHALL include a confidence_score field (Float, 0-100) +2. THE TradeSetup model SHALL include a targets field (JSON array of target objects) +3. THE TradeSetup model SHALL include a conflict_flags field (JSON array of strings) +4. THE TradeSetup model SHALL include a recommended_action field (String: LONG_HIGH, LONG_MODERATE, SHORT_HIGH, SHORT_MODERATE, NEUTRAL) +5. THE TradeSetup model SHALL include a reasoning field (Text) +6. THE TradeSetup model SHALL include a risk_level field (String: Low, Medium, High) +7. THE TradeSetup model SHALL maintain backward compatibility with existing entry_price, stop_loss, target, and rr_ratio fields for the primary target diff --git a/.kiro/steering/product.md b/.kiro/steering/product.md new file mode 100644 index 0000000..747042d --- /dev/null +++ b/.kiro/steering/product.md @@ -0,0 +1,30 @@ +# Product Overview + +Signal Dashboard is an investing-signal platform for NASDAQ stocks that surfaces optimal trading opportunities through multi-dimensional scoring. + +## Core Philosophy + +Don't predict price. Find the path of least resistance, key support/resistance zones, and asymmetric risk:reward setups. + +## Key Features + +- Multi-dimensional scoring engine (technical, S/R quality, sentiment, fundamental, momentum) +- Risk:Reward scanner with ATR-based stops (default 3:1 threshold) +- Support/Resistance detection with strength scoring and merge-within-tolerance +- Sentiment analysis with time-decay weighted scoring (Gemini 2.0 Flash with search grounding) +- Auto-populated watchlist (top-10 by composite score) + manual entries (cap: 20) +- Interactive candlestick chart with S/R overlays +- JWT auth with admin role and user access control +- Scheduled jobs: OHLCV collection, sentiment polling, fundamentals fetch, R:R scanning + +## Data Providers + +- Alpaca: OHLCV price data +- Gemini 2.0 Flash: Sentiment analysis via search grounding +- Financial Modeling Prep: Fundamental data (P/E, revenue growth, earnings surprise, market cap) + +## User Roles + +- Admin: Full access including user management, job control, data cleanup, system settings +- User: Access to watchlist, scanner, rankings, ticker details (when has_access=true) +- Registration: Configurable via admin settings diff --git a/.kiro/steering/structure.md b/.kiro/steering/structure.md new file mode 100644 index 0000000..c00ec7e --- /dev/null +++ b/.kiro/steering/structure.md @@ -0,0 +1,87 @@ +# Project Structure + +## Backend Architecture + +``` +app/ +├── main.py # FastAPI app, lifespan, router registration +├── config.py # Pydantic settings from .env +├── database.py # Async SQLAlchemy engine + session factory +├── dependencies.py # DI: DB session, auth guards (require_access, require_admin) +├── exceptions.py # Custom exception hierarchy (ValidationError, NotFoundError, etc.) +├── middleware.py # Global error handler → JSON envelope +├── cache.py # LRU cache with per-ticker invalidation +├── scheduler.py # APScheduler job definitions +├── models/ # SQLAlchemy ORM models +├── schemas/ # Pydantic request/response schemas +├── services/ # Business logic layer +├── providers/ # External data provider integrations +└── routers/ # FastAPI route handlers +``` + +## Frontend Architecture + +``` +frontend/src/ +├── App.tsx # Route definitions +├── main.tsx # React entry point +├── api/ # Axios API client modules (one per resource) +├── components/ +│ ├── admin/ # User table, job controls, settings, data cleanup +│ ├── auth/ # Protected route wrapper +│ ├── charts/ # Canvas candlestick chart +│ ├── layout/ # App shell, sidebar, mobile nav +│ ├── rankings/ # Rankings table, weights form +│ ├── scanner/ # Trade table +│ ├── ticker/ # Sentiment panel, fundamentals, indicators, S/R overlay +│ ├── ui/ # Badge, toast, skeleton, score card, confirm dialog +│ └── watchlist/ # Watchlist table, add ticker form +├── hooks/ # React Query hooks (one per resource) +├── lib/ # Types, formatting utilities +├── pages/ # Page components (Login, Register, Watchlist, Ticker, Scanner, Rankings, Admin) +├── stores/ # Zustand auth store +└── styles/ # Global CSS with glassmorphism classes +``` + +## Key Patterns + +### Backend + +- **Layered architecture**: Router → Service → Model +- **Dependency injection**: FastAPI Depends() for DB session and auth +- **Exception handling**: Custom exceptions caught by global middleware, returned as JSON envelope +- **API envelope**: All responses wrapped in `{ status: "success"|"error", data: any, error?: string }` +- **Cascade deletes**: Ticker deletion cascades to all related data (OHLCV, sentiment, fundamentals, S/R, scores, trades, watchlist) +- **Async everywhere**: All DB operations use async/await with asyncpg + +### Frontend + +- **API client**: Axios interceptors for JWT injection and envelope unwrapping +- **Server state**: TanStack React Query with query keys per resource +- **Client state**: Zustand for auth (token, user, login/logout) +- **Error handling**: ApiError class, toast notifications for mutations +- **Protected routes**: ProtectedRoute wrapper checks auth, redirects to /login +- **Glassmorphism**: Frosted glass panels, gradient text, ambient glow, mesh gradient background + +## Database Models + +All models inherit from `Base` (SQLAlchemy declarative base): + +- `Ticker`: Registry of tracked symbols (cascade delete parent) +- `OHLCVRecord`: Price data (open, high, low, close, volume) +- `SentimentScore`: Sentiment analysis results with time-decay +- `FundamentalData`: P/E, revenue growth, earnings surprise, market cap +- `SRLevel`: Support/Resistance levels with strength scoring +- `DimensionScore`: Individual dimension scores (technical, sr_quality, sentiment, fundamental, momentum) +- `CompositeScore`: Weighted composite score +- `TradeSetup`: Detected R:R setups (long/short, entry, stop, target) +- `WatchlistEntry`: User watchlist entries (auto/manual) +- `User`: Auth and access control +- `Settings`: System-wide configuration + +## Testing + +- Backend tests: `tests/unit/` and `tests/property/` +- Frontend tests: `frontend/src/**/*.test.tsx` +- Fixtures in `tests/conftest.py` +- Hypothesis strategies for property-based testing diff --git a/.kiro/steering/tech.md b/.kiro/steering/tech.md new file mode 100644 index 0000000..132eae0 --- /dev/null +++ b/.kiro/steering/tech.md @@ -0,0 +1,86 @@ +# Tech Stack + +## Backend + +- Python 3.12+ +- FastAPI with Uvicorn +- SQLAlchemy 2.0 (async) with asyncpg +- PostgreSQL database +- Alembic for migrations +- APScheduler for scheduled jobs +- JWT auth (python-jose, passlib with bcrypt) +- Pydantic for validation and settings + +## Frontend + +- React 18 with TypeScript +- Vite 5 (build tool) +- TanStack React Query v5 (server state) +- Zustand (client state, auth) +- React Router v6 (SPA routing) +- Axios with JWT interceptor +- Tailwind CSS 3 with custom glassmorphism design system +- Canvas 2D for candlestick charts + +## Testing + +- Backend: pytest, pytest-asyncio, Hypothesis (property-based testing) +- Frontend: Vitest +- Test database: In-memory SQLite (no PostgreSQL needed for tests) + +## Common Commands + +### Backend + +```bash +# Setup +python -m venv .venv +source .venv/bin/activate +pip install -e ".[dev]" + +# Database +createdb stock_data_backend +alembic upgrade head + +# Run +uvicorn app.main:app --reload --host 0.0.0.0 --port 8000 + +# Test +pytest tests/ -v +``` + +### Frontend + +```bash +cd frontend + +# Setup +npm install + +# Run dev server (proxies /api/v1/ to backend) +npm run dev + +# Build +npm run build + +# Test +npm test # Single run +npm run test:watch # Watch mode +``` + +## Environment Variables + +Required in `.env`: +- `DATABASE_URL`: PostgreSQL connection string (postgresql+asyncpg://...) +- `JWT_SECRET`: Random secret for JWT signing +- `ALPACA_API_KEY`, `ALPACA_API_SECRET`: For OHLCV data +- `GEMINI_API_KEY`: For sentiment analysis +- `FMP_API_KEY`: For fundamental data + +See `.env.example` for full list with defaults. + +## API Documentation + +- Swagger UI: http://localhost:8000/docs +- ReDoc: http://localhost:8000/redoc +- All endpoints under `/api/v1/` diff --git a/README.md b/README.md index 54c1b43..c136c1a 100644 --- a/README.md +++ b/README.md @@ -17,19 +17,20 @@ Investing-signal platform for NASDAQ stocks. Surfaces the best trading opportuni | Charts | Canvas 2D candlestick chart with S/R overlays | | Routing | React Router v6 (SPA) | | HTTP | Axios with JWT interceptor | -| Data providers | Alpaca (OHLCV), Gemini 2.0 Flash (sentiment via search grounding), Financial Modeling Prep (fundamentals) | +| Data providers | Alpaca (OHLCV), OpenAI (sentiment, optional micro-batch), Fundamentals chain: FMP → Finnhub → Alpha Vantage | ## Features ### Backend - Ticker registry with full cascade delete +- Universe bootstrap for `sp500`, `nasdaq100`, `nasdaq_all` via admin endpoint - OHLCV price storage with upsert and validation - Technical indicators: ADX, EMA, RSI, ATR, Volume Profile, Pivot Points, EMA Cross - Support/Resistance detection with strength scoring and merge-within-tolerance - Sentiment analysis with time-decay weighted scoring - Fundamental data tracking (P/E, revenue growth, earnings surprise, market cap) - 5-dimension scoring engine (technical, S/R quality, sentiment, fundamental, momentum) with configurable weights -- Risk:Reward scanner — long and short setups, ATR-based stops, configurable R:R threshold (default 3:1) +- Risk:Reward scanner — long and short setups, ATR-based stops, configurable R:R threshold (default 1.5:1) - Auto-populated watchlist (top-10 by composite score) + manual entries (cap: 20) - JWT auth with admin role, configurable registration, user access control - Scheduled jobs with enable/disable control and status monitoring @@ -79,7 +80,7 @@ All under `/api/v1/`. Interactive docs at `/docs` (Swagger) and `/redoc`. | Scores | `GET /scores/{symbol}`, `GET /rankings`, `PUT /scores/weights` | | Trades | `GET /trades` | | Watchlist | `GET /watchlist`, `POST /watchlist/{symbol}`, `DELETE /watchlist/{symbol}` | -| Admin | `GET /admin/users`, `PUT /admin/users/{id}/role`, `PUT /admin/users/{id}/access`, `DELETE /admin/data/{symbol}`, `POST /admin/jobs/{name}/trigger`, `PUT /admin/jobs/{name}/toggle`, `GET /admin/jobs`, `GET /admin/settings`, `PUT /admin/settings` | +| Admin | `GET /admin/users`, `POST /admin/users`, `PUT /admin/users/{id}/access`, `PUT /admin/users/{id}/password`, `PUT /admin/settings/registration`, `GET /admin/settings`, `PUT /admin/settings/{key}`, `GET/PUT /admin/settings/recommendations`, `GET/PUT /admin/settings/ticker-universe`, `POST /admin/tickers/bootstrap`, `POST /admin/data/cleanup`, `GET /admin/jobs`, `POST /admin/jobs/{name}/trigger`, `PUT /admin/jobs/{name}/toggle`, `GET /admin/pipeline/readiness` | ## Development Setup @@ -157,11 +158,18 @@ Configure in `.env` (copy from `.env.example`): | `ALPACA_API_SECRET` | For OHLCV | — | Alpaca Markets API secret | | `GEMINI_API_KEY` | For sentiment | — | Google Gemini API key | | `GEMINI_MODEL` | No | `gemini-2.0-flash` | Gemini model name | -| `FMP_API_KEY` | For fundamentals | — | Financial Modeling Prep API key | +| `OPENAI_API_KEY` | For sentiment (OpenAI path) | — | OpenAI API key | +| `OPENAI_MODEL` | No | `gpt-4o-mini` | OpenAI model name | +| `OPENAI_SENTIMENT_BATCH_SIZE` | No | `5` | Micro-batch size for sentiment collector | +| `FMP_API_KEY` | Optional (fundamentals) | — | Financial Modeling Prep API key (first provider in chain) | +| `FINNHUB_API_KEY` | Optional (fundamentals) | — | Finnhub API key (fallback provider) | +| `ALPHA_VANTAGE_API_KEY` | Optional (fundamentals) | — | Alpha Vantage API key (fallback provider) | | `DATA_COLLECTOR_FREQUENCY` | No | `daily` | OHLCV collection schedule | | `SENTIMENT_POLL_INTERVAL_MINUTES` | No | `30` | Sentiment polling interval | | `FUNDAMENTAL_FETCH_FREQUENCY` | No | `daily` | Fundamentals fetch schedule | | `RR_SCAN_FREQUENCY` | No | `daily` | R:R scanner schedule | +| `FUNDAMENTAL_RATE_LIMIT_RETRIES` | No | `3` | Retries per ticker on fundamentals rate-limit | +| `FUNDAMENTAL_RATE_LIMIT_BACKOFF_SECONDS` | No | `15` | Base backoff seconds for fundamentals retry (exponential) | | `DEFAULT_WATCHLIST_AUTO_SIZE` | No | `10` | Auto-watchlist size | | `DEFAULT_RR_THRESHOLD` | No | `3.0` | Minimum R:R ratio for setups | | `DB_POOL_SIZE` | No | `5` | Database connection pool size | diff --git a/alembic/versions/003_add_trade_recommendation_fields.py b/alembic/versions/003_add_trade_recommendation_fields.py new file mode 100644 index 0000000..c664566 --- /dev/null +++ b/alembic/versions/003_add_trade_recommendation_fields.py @@ -0,0 +1,59 @@ +"""add recommendation fields to trade_setups + +Revision ID: 003 +Revises: 002 +Create Date: 2026-03-03 00:00:00.000000 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "003" +down_revision: Union[str, None] = "002" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.add_column( + "trade_setups", + sa.Column("confidence_score", sa.Float(), nullable=True), + ) + op.add_column( + "trade_setups", + sa.Column("targets_json", sa.Text(), nullable=True), + ) + op.add_column( + "trade_setups", + sa.Column("conflict_flags_json", sa.Text(), nullable=True), + ) + op.add_column( + "trade_setups", + sa.Column("recommended_action", sa.String(length=20), nullable=True), + ) + op.add_column( + "trade_setups", + sa.Column("reasoning", sa.Text(), nullable=True), + ) + op.add_column( + "trade_setups", + sa.Column("risk_level", sa.String(length=10), nullable=True), + ) + op.add_column( + "trade_setups", + sa.Column("actual_outcome", sa.String(length=20), nullable=True), + ) + + +def downgrade() -> None: + op.drop_column("trade_setups", "actual_outcome") + op.drop_column("trade_setups", "risk_level") + op.drop_column("trade_setups", "reasoning") + op.drop_column("trade_setups", "recommended_action") + op.drop_column("trade_setups", "conflict_flags_json") + op.drop_column("trade_setups", "targets_json") + op.drop_column("trade_setups", "confidence_score") diff --git a/app/config.py b/app/config.py index 64f855f..21e4c28 100644 --- a/app/config.py +++ b/app/config.py @@ -22,15 +22,24 @@ class Settings(BaseSettings): # Sentiment Provider — OpenAI openai_api_key: str = "" openai_model: str = "gpt-4o-mini" + openai_sentiment_batch_size: int = 5 # Fundamentals Provider — Financial Modeling Prep fmp_api_key: str = "" + # Fundamentals Provider — Finnhub (optional fallback) + finnhub_api_key: str = "" + + # Fundamentals Provider — Alpha Vantage (optional fallback) + alpha_vantage_api_key: str = "" + # Scheduled Jobs data_collector_frequency: str = "daily" sentiment_poll_interval_minutes: int = 30 fundamental_fetch_frequency: str = "daily" rr_scan_frequency: str = "daily" + fundamental_rate_limit_retries: int = 3 + fundamental_rate_limit_backoff_seconds: int = 15 # Scoring Defaults default_watchlist_auto_size: int = 10 diff --git a/app/models/trade_setup.py b/app/models/trade_setup.py index 308057d..375761f 100644 --- a/app/models/trade_setup.py +++ b/app/models/trade_setup.py @@ -1,6 +1,8 @@ from datetime import datetime -from sqlalchemy import DateTime, Float, ForeignKey, String +import json + +from sqlalchemy import DateTime, Float, ForeignKey, String, Text from sqlalchemy.orm import Mapped, mapped_column, relationship from app.database import Base @@ -23,4 +25,34 @@ class TradeSetup(Base): DateTime(timezone=True), nullable=False ) + confidence_score: Mapped[float | None] = mapped_column(Float, nullable=True) + targets_json: Mapped[str | None] = mapped_column(Text, nullable=True) + conflict_flags_json: Mapped[str | None] = mapped_column(Text, nullable=True) + recommended_action: Mapped[str | None] = mapped_column(String(20), nullable=True) + reasoning: Mapped[str | None] = mapped_column(Text, nullable=True) + risk_level: Mapped[str | None] = mapped_column(String(10), nullable=True) + actual_outcome: Mapped[str | None] = mapped_column(String(20), nullable=True) + ticker = relationship("Ticker", back_populates="trade_setups") + + @property + def targets(self) -> list[dict]: + if not self.targets_json: + return [] + try: + parsed = json.loads(self.targets_json) + except (TypeError, ValueError): + return [] + return parsed if isinstance(parsed, list) else [] + + @property + def conflict_flags(self) -> list[str]: + if not self.conflict_flags_json: + return [] + try: + parsed = json.loads(self.conflict_flags_json) + except (TypeError, ValueError): + return [] + if not isinstance(parsed, list): + return [] + return [str(item) for item in parsed] diff --git a/app/providers/fundamentals_chain.py b/app/providers/fundamentals_chain.py new file mode 100644 index 0000000..8be9382 --- /dev/null +++ b/app/providers/fundamentals_chain.py @@ -0,0 +1,253 @@ +"""Chained fundamentals provider with fallback adapters. + +Order: +1) FMP (if configured) +2) Finnhub (if configured) +3) Alpha Vantage (if configured) +""" + +from __future__ import annotations + +import logging +import os +from datetime import datetime, timezone +from pathlib import Path + +import httpx + +from app.config import settings +from app.exceptions import ProviderError, RateLimitError +from app.providers.fmp import FMPFundamentalProvider +from app.providers.protocol import FundamentalData, FundamentalProvider + +logger = logging.getLogger(__name__) + +_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "") +if not _CA_BUNDLE or not Path(_CA_BUNDLE).exists(): + _CA_BUNDLE_PATH: str | bool = True +else: + _CA_BUNDLE_PATH = _CA_BUNDLE + + +def _safe_float(value: object) -> float | None: + if value is None: + return None + try: + return float(value) + except (TypeError, ValueError): + return None + + +class FinnhubFundamentalProvider: + """Fundamentals provider backed by Finnhub free endpoints.""" + + def __init__(self, api_key: str) -> None: + if not api_key: + raise ProviderError("Finnhub API key is required") + self._api_key = api_key + self._base_url = "https://finnhub.io/api/v1" + + async def fetch_fundamentals(self, ticker: str) -> FundamentalData: + unavailable: dict[str, str] = {} + + async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client: + profile_resp = await client.get( + f"{self._base_url}/stock/profile2", + params={"symbol": ticker, "token": self._api_key}, + ) + metric_resp = await client.get( + f"{self._base_url}/stock/metric", + params={"symbol": ticker, "metric": "all", "token": self._api_key}, + ) + earnings_resp = await client.get( + f"{self._base_url}/stock/earnings", + params={"symbol": ticker, "limit": 1, "token": self._api_key}, + ) + + for resp, endpoint in ( + (profile_resp, "profile2"), + (metric_resp, "stock/metric"), + (earnings_resp, "stock/earnings"), + ): + if resp.status_code == 429: + raise RateLimitError(f"Finnhub rate limit hit for {ticker} ({endpoint})") + if resp.status_code in (401, 403): + raise ProviderError(f"Finnhub access denied for {ticker} ({endpoint}): HTTP {resp.status_code}") + if resp.status_code != 200: + raise ProviderError(f"Finnhub error for {ticker} ({endpoint}): HTTP {resp.status_code}") + + profile_payload = profile_resp.json() if profile_resp.text else {} + metric_payload = metric_resp.json() if metric_resp.text else {} + earnings_payload = earnings_resp.json() if earnings_resp.text else [] + + metrics = metric_payload.get("metric", {}) if isinstance(metric_payload, dict) else {} + market_cap = _safe_float((profile_payload or {}).get("marketCapitalization")) + pe_ratio = _safe_float(metrics.get("peTTM") or metrics.get("peNormalizedAnnual")) + revenue_growth = _safe_float(metrics.get("revenueGrowthTTMYoy") or metrics.get("revenueGrowth5Y")) + + earnings_surprise = None + if isinstance(earnings_payload, list) and earnings_payload: + first = earnings_payload[0] if isinstance(earnings_payload[0], dict) else {} + earnings_surprise = _safe_float(first.get("surprisePercent")) + + if pe_ratio is None: + unavailable["pe_ratio"] = "not available from provider payload" + if revenue_growth is None: + unavailable["revenue_growth"] = "not available from provider payload" + if earnings_surprise is None: + unavailable["earnings_surprise"] = "not available from provider payload" + if market_cap is None: + unavailable["market_cap"] = "not available from provider payload" + + return FundamentalData( + ticker=ticker, + pe_ratio=pe_ratio, + revenue_growth=revenue_growth, + earnings_surprise=earnings_surprise, + market_cap=market_cap, + fetched_at=datetime.now(timezone.utc), + unavailable_fields=unavailable, + ) + + +class AlphaVantageFundamentalProvider: + """Fundamentals provider backed by Alpha Vantage free endpoints.""" + + def __init__(self, api_key: str) -> None: + if not api_key: + raise ProviderError("Alpha Vantage API key is required") + self._api_key = api_key + self._base_url = "https://www.alphavantage.co/query" + + async def fetch_fundamentals(self, ticker: str) -> FundamentalData: + unavailable: dict[str, str] = {} + + async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client: + overview_resp = await client.get( + self._base_url, + params={"function": "OVERVIEW", "symbol": ticker, "apikey": self._api_key}, + ) + earnings_resp = await client.get( + self._base_url, + params={"function": "EARNINGS", "symbol": ticker, "apikey": self._api_key}, + ) + income_resp = await client.get( + self._base_url, + params={"function": "INCOME_STATEMENT", "symbol": ticker, "apikey": self._api_key}, + ) + + for resp, endpoint in ( + (overview_resp, "OVERVIEW"), + (earnings_resp, "EARNINGS"), + (income_resp, "INCOME_STATEMENT"), + ): + if resp.status_code == 429: + raise RateLimitError(f"Alpha Vantage rate limit hit for {ticker} ({endpoint})") + if resp.status_code != 200: + raise ProviderError(f"Alpha Vantage error for {ticker} ({endpoint}): HTTP {resp.status_code}") + + overview = overview_resp.json() if overview_resp.text else {} + earnings = earnings_resp.json() if earnings_resp.text else {} + income = income_resp.json() if income_resp.text else {} + + if isinstance(overview, dict) and overview.get("Information"): + raise ProviderError(f"Alpha Vantage unavailable for {ticker}: {overview.get('Information')}") + if isinstance(overview, dict) and overview.get("Note"): + raise RateLimitError(f"Alpha Vantage rate limit for {ticker}: {overview.get('Note')}") + + pe_ratio = _safe_float((overview or {}).get("PERatio")) + market_cap = _safe_float((overview or {}).get("MarketCapitalization")) + + earnings_surprise = None + quarterly = earnings.get("quarterlyEarnings", []) if isinstance(earnings, dict) else [] + if isinstance(quarterly, list) and quarterly: + first = quarterly[0] if isinstance(quarterly[0], dict) else {} + earnings_surprise = _safe_float(first.get("surprisePercentage")) + + revenue_growth = None + annual = income.get("annualReports", []) if isinstance(income, dict) else [] + if isinstance(annual, list) and len(annual) >= 2: + curr = _safe_float((annual[0] or {}).get("totalRevenue")) + prev = _safe_float((annual[1] or {}).get("totalRevenue")) + if curr is not None and prev not in (None, 0): + revenue_growth = ((curr - prev) / abs(prev)) * 100.0 + + if pe_ratio is None: + unavailable["pe_ratio"] = "not available from provider payload" + if revenue_growth is None: + unavailable["revenue_growth"] = "not available from provider payload" + if earnings_surprise is None: + unavailable["earnings_surprise"] = "not available from provider payload" + if market_cap is None: + unavailable["market_cap"] = "not available from provider payload" + + return FundamentalData( + ticker=ticker, + pe_ratio=pe_ratio, + revenue_growth=revenue_growth, + earnings_surprise=earnings_surprise, + market_cap=market_cap, + fetched_at=datetime.now(timezone.utc), + unavailable_fields=unavailable, + ) + + +class ChainedFundamentalProvider: + """Try multiple fundamental providers in order until one succeeds.""" + + def __init__(self, providers: list[tuple[str, FundamentalProvider]]) -> None: + if not providers: + raise ProviderError("No fundamental providers configured") + self._providers = providers + + async def fetch_fundamentals(self, ticker: str) -> FundamentalData: + errors: list[str] = [] + + for provider_name, provider in self._providers: + try: + data = await provider.fetch_fundamentals(ticker) + + has_any_metric = any( + value is not None + for value in (data.pe_ratio, data.revenue_growth, data.earnings_surprise, data.market_cap) + ) + if not has_any_metric: + errors.append(f"{provider_name}: no usable metrics returned") + continue + + unavailable = dict(data.unavailable_fields) + unavailable["provider"] = provider_name + + return FundamentalData( + ticker=data.ticker, + pe_ratio=data.pe_ratio, + revenue_growth=data.revenue_growth, + earnings_surprise=data.earnings_surprise, + market_cap=data.market_cap, + fetched_at=data.fetched_at, + unavailable_fields=unavailable, + ) + except Exception as exc: + errors.append(f"{provider_name}: {type(exc).__name__}: {exc}") + + attempts = "; ".join(errors[:6]) if errors else "no provider attempts" + raise ProviderError(f"All fundamentals providers failed for {ticker}. Attempts: {attempts}") + + +def build_fundamental_provider_chain() -> FundamentalProvider: + providers: list[tuple[str, FundamentalProvider]] = [] + + if settings.fmp_api_key: + providers.append(("fmp", FMPFundamentalProvider(settings.fmp_api_key))) + if settings.finnhub_api_key: + providers.append(("finnhub", FinnhubFundamentalProvider(settings.finnhub_api_key))) + if settings.alpha_vantage_api_key: + providers.append(("alpha_vantage", AlphaVantageFundamentalProvider(settings.alpha_vantage_api_key))) + + if not providers: + raise ProviderError( + "No fundamentals provider configured. Set one of FMP_API_KEY, FINNHUB_API_KEY, ALPHA_VANTAGE_API_KEY" + ) + + logger.info("Fundamentals provider chain configured: %s", [name for name, _ in providers]) + return ChainedFundamentalProvider(providers) diff --git a/app/providers/openai_sentiment.py b/app/providers/openai_sentiment.py index 4d779d4..99c32d7 100644 --- a/app/providers/openai_sentiment.py +++ b/app/providers/openai_sentiment.py @@ -33,6 +33,24 @@ Rules: - reasoning should cite specific recent news or events you found """ +_SENTIMENT_BATCH_PROMPT = """\ +Search the web for the LATEST news, analyst opinions, and market developments \ +about each stock ticker from the past 24-48 hours. + +Tickers: +{tickers_csv} + +Respond ONLY with a JSON array (no markdown, no extra text), one object per ticker: +[{{"ticker":"AAPL","classification":"bullish|bearish|neutral","confidence":0-100,"reasoning":"brief explanation"}}] + +Rules: +- Include every ticker exactly once +- ticker must be uppercase symbol +- classification must be exactly one of: bullish, bearish, neutral +- confidence must be an integer from 0 to 100 +- reasoning should cite specific recent news or events you found +""" + VALID_CLASSIFICATIONS = {"bullish", "bearish", "neutral"} @@ -49,6 +67,59 @@ class OpenAISentimentProvider: self._client = AsyncOpenAI(api_key=api_key, http_client=http_client) self._model = model + @staticmethod + def _extract_raw_text(response: object, ticker_context: str) -> str: + raw_text = "" + for item in response.output: + if item.type == "message" and item.content: + for block in item.content: + if hasattr(block, "text") and block.text: + raw_text = block.text + break + if raw_text: + break + + if not raw_text: + raise ProviderError(f"No text output from OpenAI for {ticker_context}") + + clean = raw_text.strip() + if clean.startswith("```"): + clean = clean.split("\n", 1)[1] if "\n" in clean else clean[3:] + if clean.endswith("```"): + clean = clean[:-3] + return clean.strip() + + @staticmethod + def _normalize_single_result(parsed: dict, ticker: str, citations: list[dict[str, str]]) -> SentimentData: + classification = str(parsed.get("classification", "")).lower() + if classification not in VALID_CLASSIFICATIONS: + raise ProviderError( + f"Invalid classification '{classification}' from OpenAI for {ticker}" + ) + + confidence = int(parsed.get("confidence", 50)) + confidence = max(0, min(100, confidence)) + reasoning = str(parsed.get("reasoning", "")) + + if reasoning: + logger.info( + "OpenAI sentiment for %s: %s (confidence=%d) — %s", + ticker, + classification, + confidence, + reasoning, + ) + + return SentimentData( + ticker=ticker, + classification=classification, + confidence=confidence, + source="openai", + timestamp=datetime.now(timezone.utc), + reasoning=reasoning, + citations=citations, + ) + async def fetch_sentiment(self, ticker: str) -> SentimentData: """Use the Responses API with web_search_preview to get live sentiment.""" try: @@ -58,48 +129,10 @@ class OpenAISentimentProvider: instructions="You are a financial sentiment analyst. Always respond with valid JSON only, no markdown fences.", input=_SENTIMENT_PROMPT.format(ticker=ticker), ) - - # Extract text from the ResponseOutputMessage in the output - raw_text = "" - for item in response.output: - if item.type == "message" and item.content: - for block in item.content: - if hasattr(block, "text") and block.text: - raw_text = block.text - break - if raw_text: - break - - if not raw_text: - raise ProviderError(f"No text output from OpenAI for {ticker}") - - raw_text = raw_text.strip() - logger.debug("OpenAI raw response for %s: %s", ticker, raw_text) - - # Strip markdown fences if present - clean = raw_text - if clean.startswith("```"): - clean = clean.split("\n", 1)[1] if "\n" in clean else clean[3:] - if clean.endswith("```"): - clean = clean[:-3] - clean = clean.strip() - + clean = self._extract_raw_text(response, ticker) + logger.debug("OpenAI raw response for %s: %s", ticker, clean) parsed = json.loads(clean) - classification = parsed.get("classification", "").lower() - if classification not in VALID_CLASSIFICATIONS: - raise ProviderError( - f"Invalid classification '{classification}' from OpenAI for {ticker}" - ) - - confidence = int(parsed.get("confidence", 50)) - confidence = max(0, min(100, confidence)) - - reasoning = parsed.get("reasoning", "") - if reasoning: - logger.info("OpenAI sentiment for %s: %s (confidence=%d) — %s", - ticker, classification, confidence, reasoning) - # Extract url_citation annotations from response output citations: list[dict[str, str]] = [] for item in response.output: @@ -112,19 +145,10 @@ class OpenAISentimentProvider: "url": getattr(annotation, "url", ""), "title": getattr(annotation, "title", ""), }) - - return SentimentData( - ticker=ticker, - classification=classification, - confidence=confidence, - source="openai", - timestamp=datetime.now(timezone.utc), - reasoning=reasoning, - citations=citations, - ) + return self._normalize_single_result(parsed, ticker, citations) except json.JSONDecodeError as exc: - logger.error("Failed to parse OpenAI JSON for %s: %s — raw: %s", ticker, exc, raw_text) + logger.error("Failed to parse OpenAI JSON for %s: %s", ticker, exc) raise ProviderError(f"Invalid JSON from OpenAI for {ticker}") from exc except ProviderError: raise @@ -134,3 +158,49 @@ class OpenAISentimentProvider: raise RateLimitError(f"OpenAI rate limit hit for {ticker}") from exc logger.error("OpenAI provider error for %s: %s", ticker, exc) raise ProviderError(f"OpenAI provider error for {ticker}: {exc}") from exc + + async def fetch_sentiment_batch(self, tickers: list[str]) -> dict[str, SentimentData]: + """Fetch sentiment for multiple tickers in one OpenAI request. + + Returns a map keyed by uppercase ticker symbol. Invalid/missing rows are skipped. + """ + normalized = [t.strip().upper() for t in tickers if t and t.strip()] + if not normalized: + return {} + + ticker_context = ",".join(normalized) + try: + response = await self._client.responses.create( + model=self._model, + tools=[{"type": "web_search_preview"}], + instructions="You are a financial sentiment analyst. Always respond with valid JSON only, no markdown fences.", + input=_SENTIMENT_BATCH_PROMPT.format(tickers_csv=", ".join(normalized)), + ) + clean = self._extract_raw_text(response, ticker_context) + logger.debug("OpenAI batch raw response for %s: %s", ticker_context, clean) + parsed = json.loads(clean) + if not isinstance(parsed, list): + raise ProviderError("Batch sentiment response must be a JSON array") + + out: dict[str, SentimentData] = {} + requested = set(normalized) + for row in parsed: + if not isinstance(row, dict): + continue + symbol = str(row.get("ticker", "")).strip().upper() + if symbol not in requested: + continue + try: + out[symbol] = self._normalize_single_result(row, symbol, citations=[]) + except Exception: + continue + return out + except json.JSONDecodeError as exc: + raise ProviderError(f"Invalid batch JSON from OpenAI for {ticker_context}") from exc + except ProviderError: + raise + except Exception as exc: + msg = str(exc).lower() + if "429" in msg or "rate" in msg or "quota" in msg: + raise RateLimitError(f"OpenAI rate limit hit for batch {ticker_context}") from exc + raise ProviderError(f"OpenAI batch provider error for {ticker_context}: {exc}") from exc diff --git a/app/routers/admin.py b/app/routers/admin.py index e69f634..f76d31f 100644 --- a/app/routers/admin.py +++ b/app/routers/admin.py @@ -3,7 +3,7 @@ All endpoints require admin role. """ -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, Query from sqlalchemy.ext.asyncio import AsyncSession from app.dependencies import get_db, require_admin @@ -12,13 +12,16 @@ from app.schemas.admin import ( CreateUserRequest, DataCleanupRequest, JobToggle, + RecommendationConfigUpdate, PasswordReset, RegistrationToggle, SystemSettingUpdate, + TickerUniverseUpdate, UserManagement, ) from app.schemas.common import APIEnvelope from app.services import admin_service +from app.services import ticker_universe_service router = APIRouter(tags=["admin"]) @@ -123,6 +126,47 @@ async def list_settings( ) +@router.get("/admin/settings/recommendations", response_model=APIEnvelope) +async def get_recommendation_settings( + _admin: User = Depends(require_admin), + db: AsyncSession = Depends(get_db), +): + config = await admin_service.get_recommendation_config(db) + return APIEnvelope(status="success", data=config) + + +@router.put("/admin/settings/recommendations", response_model=APIEnvelope) +async def update_recommendation_settings( + body: RecommendationConfigUpdate, + _admin: User = Depends(require_admin), + db: AsyncSession = Depends(get_db), +): + updated = await admin_service.update_recommendation_config( + db, + body.model_dump(exclude_unset=True), + ) + return APIEnvelope(status="success", data=updated) + + +@router.get("/admin/settings/ticker-universe", response_model=APIEnvelope) +async def get_ticker_universe_setting( + _admin: User = Depends(require_admin), + db: AsyncSession = Depends(get_db), +): + data = await admin_service.get_ticker_universe_default(db) + return APIEnvelope(status="success", data=data) + + +@router.put("/admin/settings/ticker-universe", response_model=APIEnvelope) +async def update_ticker_universe_setting( + body: TickerUniverseUpdate, + _admin: User = Depends(require_admin), + db: AsyncSession = Depends(get_db), +): + data = await admin_service.update_ticker_universe_default(db, body.universe) + return APIEnvelope(status="success", data=data) + + @router.put("/admin/settings/{key}", response_model=APIEnvelope) async def update_setting( key: str, @@ -138,6 +182,21 @@ async def update_setting( ) +@router.post("/admin/tickers/bootstrap", response_model=APIEnvelope) +async def bootstrap_tickers( + universe: str = Query("sp500", pattern="^(sp500|nasdaq100|nasdaq_all)$"), + prune_missing: bool = Query(False), + _admin: User = Depends(require_admin), + db: AsyncSession = Depends(get_db), +): + result = await ticker_universe_service.bootstrap_universe( + db, + universe, + prune_missing=prune_missing, + ) + return APIEnvelope(status="success", data=result) + + # --------------------------------------------------------------------------- # Data cleanup # --------------------------------------------------------------------------- @@ -167,6 +226,15 @@ async def list_jobs( return APIEnvelope(status="success", data=jobs) +@router.get("/admin/pipeline/readiness", response_model=APIEnvelope) +async def get_pipeline_readiness( + _admin: User = Depends(require_admin), + db: AsyncSession = Depends(get_db), +): + data = await admin_service.get_pipeline_readiness(db) + return APIEnvelope(status="success", data=data) + + @router.post("/admin/jobs/{job_name}/trigger", response_model=APIEnvelope) async def trigger_job( job_name: str, diff --git a/app/routers/ingestion.py b/app/routers/ingestion.py index c2e8785..2ae79be 100644 --- a/app/routers/ingestion.py +++ b/app/routers/ingestion.py @@ -18,10 +18,17 @@ from app.dependencies import get_db, require_access from app.exceptions import ProviderError from app.models.user import User from app.providers.alpaca import AlpacaOHLCVProvider -from app.providers.fmp import FMPFundamentalProvider +from app.providers.fundamentals_chain import build_fundamental_provider_chain from app.providers.openai_sentiment import OpenAISentimentProvider +from app.services.rr_scanner_service import scan_ticker from app.schemas.common import APIEnvelope -from app.services import fundamental_service, ingestion_service, sentiment_service +from app.services import ( + fundamental_service, + ingestion_service, + scoring_service, + sentiment_service, + sr_service, +) logger = logging.getLogger(__name__) @@ -99,10 +106,10 @@ async def fetch_symbol( } # --- Fundamentals --- - if settings.fmp_api_key: + if settings.fmp_api_key or settings.finnhub_api_key or settings.alpha_vantage_api_key: try: - fmp_provider = FMPFundamentalProvider(settings.fmp_api_key) - fdata = await fmp_provider.fetch_fundamentals(symbol_upper) + fundamentals_provider = build_fundamental_provider_chain() + fdata = await fundamentals_provider.fetch_fundamentals(symbol_upper) await fundamental_service.store_fundamental( db, symbol=symbol_upper, @@ -119,9 +126,50 @@ async def fetch_symbol( else: sources["fundamentals"] = { "status": "skipped", - "message": "FMP API key not configured", + "message": "No fundamentals provider key configured", } + # --- Derived pipeline: S/R levels --- + try: + levels = await sr_service.recalculate_sr_levels(db, symbol_upper) + sources["sr_levels"] = { + "status": "ok", + "count": len(levels), + "message": None, + } + except Exception as exc: + logger.error("S/R recalc failed for %s: %s", symbol_upper, exc) + sources["sr_levels"] = {"status": "error", "message": str(exc)} + + # --- Derived pipeline: scores --- + try: + score_payload = await scoring_service.get_score(db, symbol_upper) + sources["scores"] = { + "status": "ok", + "composite_score": score_payload.get("composite_score"), + "missing_dimensions": score_payload.get("missing_dimensions", []), + "message": None, + } + except Exception as exc: + logger.error("Score recompute failed for %s: %s", symbol_upper, exc) + sources["scores"] = {"status": "error", "message": str(exc)} + + # --- Derived pipeline: scanner --- + try: + setups = await scan_ticker( + db, + symbol_upper, + rr_threshold=settings.default_rr_threshold, + ) + sources["scanner"] = { + "status": "ok", + "setups_found": len(setups), + "message": None, + } + except Exception as exc: + logger.error("Scanner run failed for %s: %s", symbol_upper, exc) + sources["scanner"] = {"status": "error", "message": str(exc)} + # Always return success — per-source breakdown tells the full story return APIEnvelope( status="success", diff --git a/app/routers/trades.py b/app/routers/trades.py index 5694df7..fd44c87 100644 --- a/app/routers/trades.py +++ b/app/routers/trades.py @@ -5,8 +5,8 @@ from sqlalchemy.ext.asyncio import AsyncSession from app.dependencies import get_db, require_access from app.schemas.common import APIEnvelope -from app.schemas.trade_setup import TradeSetupResponse -from app.services.rr_scanner_service import get_trade_setups +from app.schemas.trade_setup import RecommendationSummaryResponse, TradeSetupResponse +from app.services.rr_scanner_service import get_trade_setup_history, get_trade_setups router = APIRouter(tags=["trades"]) @@ -16,13 +16,73 @@ async def list_trade_setups( direction: str | None = Query( None, description="Filter by direction: long or short" ), + min_confidence: float | None = Query( + None, ge=0, le=100, description="Minimum confidence score" + ), + recommended_action: str | None = Query( + None, + description="Filter by action: LONG_HIGH, LONG_MODERATE, SHORT_HIGH, SHORT_MODERATE, NEUTRAL", + ), _user=Depends(require_access), db: AsyncSession = Depends(get_db), ) -> APIEnvelope: - """Get all trade setups sorted by R:R desc, secondary composite desc. + """Get latest trade setups with recommendation data.""" + rows = await get_trade_setups( + db, + direction=direction, + min_confidence=min_confidence, + recommended_action=recommended_action, + ) + + data = [] + for row in rows: + summary = RecommendationSummaryResponse( + action=row.get("recommended_action") or "NEUTRAL", + reasoning=row.get("reasoning"), + risk_level=row.get("risk_level"), + composite_score=row["composite_score"], + ) + payload = {**row, "recommendation_summary": summary} + data.append(TradeSetupResponse(**payload).model_dump(mode="json")) - Optional direction filter (long/short). - """ - rows = await get_trade_setups(db, direction=direction) - data = [TradeSetupResponse(**r).model_dump(mode="json") for r in rows] + return APIEnvelope(status="success", data=data) + + +@router.get("/trades/{symbol}", response_model=APIEnvelope) +async def get_ticker_trade_setups( + symbol: str, + _user=Depends(require_access), + db: AsyncSession = Depends(get_db), +) -> APIEnvelope: + rows = await get_trade_setups(db, symbol=symbol) + data = [] + for row in rows: + summary = RecommendationSummaryResponse( + action=row.get("recommended_action") or "NEUTRAL", + reasoning=row.get("reasoning"), + risk_level=row.get("risk_level"), + composite_score=row["composite_score"], + ) + payload = {**row, "recommendation_summary": summary} + data.append(TradeSetupResponse(**payload).model_dump(mode="json")) + return APIEnvelope(status="success", data=data) + + +@router.get("/trades/{symbol}/history", response_model=APIEnvelope) +async def get_ticker_trade_history( + symbol: str, + _user=Depends(require_access), + db: AsyncSession = Depends(get_db), +) -> APIEnvelope: + rows = await get_trade_setup_history(db, symbol=symbol) + data = [] + for row in rows: + summary = RecommendationSummaryResponse( + action=row.get("recommended_action") or "NEUTRAL", + reasoning=row.get("reasoning"), + risk_level=row.get("risk_level"), + composite_score=row["composite_score"], + ) + payload = {**row, "recommendation_summary": summary} + data.append(TradeSetupResponse(**payload).model_dump(mode="json")) return APIEnvelope(status="success", data=data) diff --git a/app/scheduler.py b/app/scheduler.py index 59ddfb5..c61d34c 100644 --- a/app/scheduler.py +++ b/app/scheduler.py @@ -15,21 +15,27 @@ from __future__ import annotations import json import logging -from datetime import date, timedelta +import asyncio +from datetime import date, datetime, timedelta, timezone from apscheduler.schedulers.asyncio import AsyncIOScheduler -from sqlalchemy import select +from sqlalchemy import case, func, select from sqlalchemy.ext.asyncio import AsyncSession from app.config import settings from app.database import async_session_factory +from app.models.fundamental import FundamentalData +from app.models.ohlcv import OHLCVRecord from app.models.settings import SystemSetting +from app.models.sentiment import SentimentScore from app.models.ticker import Ticker from app.providers.alpaca import AlpacaOHLCVProvider -from app.providers.fmp import FMPFundamentalProvider +from app.providers.fundamentals_chain import build_fundamental_provider_chain from app.providers.openai_sentiment import OpenAISentimentProvider +from app.providers.protocol import SentimentData from app.services import fundamental_service, ingestion_service, sentiment_service from app.services.rr_scanner_service import scan_all_tickers +from app.services.ticker_universe_service import bootstrap_universe logger = logging.getLogger(__name__) @@ -43,6 +49,64 @@ _last_successful: dict[str, str | None] = { "fundamental_collector": None, } +_job_runtime: dict[str, dict[str, object]] = { + "data_collector": { + "running": False, + "status": "idle", + "processed": 0, + "total": None, + "progress_pct": None, + "current_ticker": None, + "started_at": None, + "finished_at": None, + "message": None, + }, + "sentiment_collector": { + "running": False, + "status": "idle", + "processed": 0, + "total": None, + "progress_pct": None, + "current_ticker": None, + "started_at": None, + "finished_at": None, + "message": None, + }, + "fundamental_collector": { + "running": False, + "status": "idle", + "processed": 0, + "total": None, + "progress_pct": None, + "current_ticker": None, + "started_at": None, + "finished_at": None, + "message": None, + }, + "rr_scanner": { + "running": False, + "status": "idle", + "processed": 0, + "total": None, + "progress_pct": None, + "current_ticker": None, + "started_at": None, + "finished_at": None, + "message": None, + }, + "ticker_universe_sync": { + "running": False, + "status": "idle", + "processed": 0, + "total": None, + "progress_pct": None, + "current_ticker": None, + "started_at": None, + "finished_at": None, + "message": None, + }, +} + # --------------------------------------------------------------------------- # Helpers @@ -62,6 +126,71 @@ def _log_job_error(job_name: str, ticker: str, error: Exception) -> None: ) +def _runtime_start(job_name: str, total: int | None = None, message: str | None = None) -> None: + now = datetime.now(timezone.utc).isoformat() + _job_runtime[job_name] = { + "running": True, + "status": "running", + "processed": 0, + "total": total, + "progress_pct": 0.0 if total and total > 0 else None, + "current_ticker": None, + "started_at": now, + "finished_at": None, + "message": message, + } + + +def _runtime_progress( + job_name: str, + processed: int, + total: int | None, + current_ticker: str | None = None, + message: str | None = None, +) -> None: + progress_pct: float | None = None + if total and total > 0: + progress_pct = round((processed / total) * 100.0, 1) + runtime = _job_runtime.get(job_name, {}) + runtime.update({ + "running": True, + "status": "running", + "processed": processed, + "total": total, + "progress_pct": progress_pct, + "current_ticker": current_ticker, + "message": message, + }) + _job_runtime[job_name] = runtime + + +def _runtime_finish( + job_name: str, + status: str, + processed: int, + total: int | None, + message: str | None = None, +) -> None: + runtime = _job_runtime.get(job_name, {}) + runtime.update({ + "running": False, + "status": status, + "processed": processed, + "total": total, + "progress_pct": 100.0 if total and processed >= total else runtime.get("progress_pct"), + "current_ticker": None, + "finished_at": datetime.now(timezone.utc).isoformat(), + "message": message, + }) + _job_runtime[job_name] = runtime + + +def get_job_runtime_snapshot(job_name: str | None = None) -> dict[str, dict[str, object]] | dict[str, object]: + if job_name is not None: + return dict(_job_runtime.get(job_name, {})) + return {name: dict(meta) for name, meta in _job_runtime.items()} + + async def _is_job_enabled(db: AsyncSession, job_name: str) -> bool: """Check SystemSetting for job enabled state. Defaults to True.""" key = f"job_{job_name}_enabled" @@ -80,6 +209,61 @@ async def _get_all_tickers(db: AsyncSession) -> list[str]: return list(result.scalars().all()) +async def _get_ohlcv_priority_tickers(db: AsyncSession) -> list[str]: + """Return symbols prioritized for OHLCV collection. + + Priority: + 1) Tickers with no OHLCV bars + 2) Tickers with data, oldest latest OHLCV date first + 3) Alphabetical tiebreaker + """ + latest_date = func.max(OHLCVRecord.date) + missing_first = case((latest_date.is_(None), 0), else_=1) + result = await db.execute( + select(Ticker.symbol) + .outerjoin(OHLCVRecord, OHLCVRecord.ticker_id == Ticker.id) + .group_by(Ticker.id, Ticker.symbol) + .order_by(missing_first.asc(), latest_date.asc(), Ticker.symbol.asc()) + ) + return list(result.scalars().all()) + + +async def _get_sentiment_priority_tickers(db: AsyncSession) -> list[str]: + """Return symbols prioritized for sentiment collection. + + Priority: + 1) Tickers with no sentiment records + 2) Tickers with records, oldest latest sentiment timestamp first + 3) Alphabetical tiebreaker + """ + latest_ts = func.max(SentimentScore.timestamp) + missing_first = case((latest_ts.is_(None), 0), else_=1) + result = await db.execute( + select(Ticker.symbol) + .outerjoin(SentimentScore, SentimentScore.ticker_id == Ticker.id) + .group_by(Ticker.id, Ticker.symbol) + .order_by(missing_first.asc(), latest_ts.asc(), Ticker.symbol.asc()) + ) + return list(result.scalars().all()) + + +async def _get_fundamental_priority_tickers(db: AsyncSession) -> list[str]: + """Return symbols prioritized for fundamentals refresh. + + Priority: + 1) Tickers with no fundamentals snapshot yet + 2) Tickers with existing fundamentals, oldest fetched_at first + 3) Alphabetical tiebreaker + """ + missing_first = case((FundamentalData.fetched_at.is_(None), 0), else_=1) + result = await db.execute( + select(Ticker.symbol) + .outerjoin(FundamentalData, FundamentalData.ticker_id == Ticker.id) + .order_by(missing_first.asc(), FundamentalData.fetched_at.asc(), Ticker.symbol.asc()) + ) + return list(result.scalars().all()) + + def _resume_tickers(symbols: list[str], job_name: str) -> list[str]: """Reorder tickers to resume after the last successful one (rate-limit resume). @@ -94,6 +278,11 @@ def _resume_tickers(symbols: list[str], job_name: str) -> list[str]: return symbols[idx + 1:] + symbols[:idx + 1] +def _chunked(symbols: list[str], chunk_size: int) -> list[list[str]]: + size = max(1, chunk_size) + return [symbols[i:i + size] for i in range(0, len(symbols), size)] + + # --------------------------------------------------------------------------- # Job: Data Collector (OHLCV) # --------------------------------------------------------------------------- @@ -104,68 +293,84 @@ async def collect_ohlcv() -> None: Uses AlpacaOHLCVProvider. Processes each ticker independently. On rate limit, records last successful ticker for resume. + Start date is resolved by ingestion progress: + - existing ticker: resume from last_ingested_date + 1 + - new ticker: backfill ~1 year by default """ job_name = "data_collector" logger.info(json.dumps({"event": "job_start", "job": job_name})) - - async with async_session_factory() as db: - if not await _is_job_enabled(db, job_name): - logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) - return - - symbols = await _get_all_tickers(db) - if not symbols: - logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0})) - return - - # Reorder for rate-limit resume - symbols = _resume_tickers(symbols, job_name) - - # Build provider (skip if keys not configured) - if not settings.alpaca_api_key or not settings.alpaca_api_secret: - logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "alpaca keys not configured"})) - return + _runtime_start(job_name) + processed = 0 + total: int | None = None try: - provider = AlpacaOHLCVProvider(settings.alpaca_api_key, settings.alpaca_api_secret) - except Exception as exc: - logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) - return - - end_date = date.today() - start_date = end_date - timedelta(days=5) # Fetch last 5 days to catch up - processed = 0 - - for symbol in symbols: async with async_session_factory() as db: - try: - result = await ingestion_service.fetch_and_ingest( - db, provider, symbol, start_date=start_date, end_date=end_date, - ) - _last_successful[job_name] = symbol - processed += 1 - logger.info(json.dumps({ - "event": "ticker_collected", - "job": job_name, - "ticker": symbol, - "status": result.status, - "records": result.records_ingested, - })) - if result.status == "partial": - # Rate limited — stop and resume next run - logger.warning(json.dumps({ - "event": "rate_limited", + if not await _is_job_enabled(db, job_name): + logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) + _runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled") + return + + symbols = await _get_ohlcv_priority_tickers(db) + if not symbols: + logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0})) + _runtime_finish(job_name, "completed", processed=0, total=0, message="No tickers") + return + + total = len(symbols) + _runtime_progress(job_name, processed=0, total=total) + + # Build provider (skip if keys not configured) + if not settings.alpaca_api_key or not settings.alpaca_api_secret: + logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "alpaca keys not configured"})) + _runtime_finish(job_name, "skipped", processed=0, total=total, message="Alpaca keys not configured") + return + + try: + provider = AlpacaOHLCVProvider(settings.alpaca_api_key, settings.alpaca_api_secret) + except Exception as exc: + logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) + _runtime_finish(job_name, "error", processed=0, total=total, message=str(exc)) + return + + end_date = date.today() + + for symbol in symbols: + _runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol) + async with async_session_factory() as db: + try: + result = await ingestion_service.fetch_and_ingest( + db, provider, symbol, start_date=None, end_date=end_date, + ) + _last_successful[job_name] = symbol + processed += 1 + _runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol) + logger.info(json.dumps({ + "event": "ticker_collected", "job": job_name, "ticker": symbol, - "processed": processed, + "status": result.status, + "records": result.records_ingested, })) - return - except Exception as exc: - _log_job_error(job_name, symbol, exc) + if result.status == "partial": + # Rate limited — stop and resume next run + logger.warning(json.dumps({ + "event": "rate_limited", + "job": job_name, + "ticker": symbol, + "processed": processed, + })) + _runtime_finish(job_name, "rate_limited", processed=processed, total=total, message=f"Rate limited at {symbol}") + return + except Exception as exc: + _log_job_error(job_name, symbol, exc) - # Reset resume pointer on full completion - _last_successful[job_name] = None - logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed})) + # Reset resume pointer on full completion + _last_successful[job_name] = None + logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed})) + _runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Processed {processed} tickers") + except Exception as exc: + logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) + _runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc)) # --------------------------------------------------------------------------- @@ -181,68 +386,119 @@ async def collect_sentiment() -> None: """ job_name = "sentiment_collector" logger.info(json.dumps({"event": "job_start", "job": job_name})) - - async with async_session_factory() as db: - if not await _is_job_enabled(db, job_name): - logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) - return - - symbols = await _get_all_tickers(db) - if not symbols: - logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0})) - return - - symbols = _resume_tickers(symbols, job_name) - - if not settings.openai_api_key: - logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "openai key not configured"})) - return + _runtime_start(job_name) + processed = 0 + total: int | None = None try: - provider = OpenAISentimentProvider(settings.openai_api_key, settings.openai_model) + async with async_session_factory() as db: + if not await _is_job_enabled(db, job_name): + logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) + _runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled") + return + + symbols = await _get_sentiment_priority_tickers(db) + if not symbols: + logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0})) + _runtime_finish(job_name, "completed", processed=0, total=0, message="No tickers") + return + + total = len(symbols) + _runtime_progress(job_name, processed=0, total=total) + + if not settings.openai_api_key: + logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "openai key not configured"})) + _runtime_finish(job_name, "skipped", processed=0, total=total, message="OpenAI key not configured") + return + + try: + provider = OpenAISentimentProvider(settings.openai_api_key, settings.openai_model) + except Exception as exc: + logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) + _runtime_finish(job_name, "error", processed=0, total=total, message=str(exc)) + return + + batch_size = max(1, settings.openai_sentiment_batch_size) + batches = _chunked(symbols, batch_size) + + for batch in batches: + current_hint = batch[0] if len(batch) == 1 else f"{batch[0]} (+{len(batch) - 1})" + _runtime_progress(job_name, processed=processed, total=total, current_ticker=current_hint) + + batch_results: dict[str, SentimentData] = {} + if len(batch) > 1 and hasattr(provider, "fetch_sentiment_batch"): + try: + batch_results = await provider.fetch_sentiment_batch(batch) + except Exception as exc: + msg = str(exc).lower() + if "rate" in msg or "quota" in msg or "429" in msg: + logger.warning(json.dumps({ + "event": "rate_limited", + "job": job_name, + "ticker": batch[0], + "processed": processed, + })) + _runtime_finish(job_name, "rate_limited", processed=processed, total=total, message=f"Rate limited at {batch[0]}") + return + logger.warning(json.dumps({ + "event": "batch_fallback", + "job": job_name, + "batch": batch, + "reason": str(exc), + })) + + for symbol in batch: + _runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol) + data = batch_results.get(symbol) if batch_results else None + + if data is None: + try: + data = await provider.fetch_sentiment(symbol) + except Exception as exc: + msg = str(exc).lower() + if "rate" in msg or "quota" in msg or "429" in msg: + logger.warning(json.dumps({ + "event": "rate_limited", + "job": job_name, + "ticker": symbol, + "processed": processed, + })) + _runtime_finish(job_name, "rate_limited", processed=processed, total=total, message=f"Rate limited at {symbol}") + return + _log_job_error(job_name, symbol, exc) + continue + + async with async_session_factory() as db: + try: + await sentiment_service.store_sentiment( + db, + symbol=symbol, + classification=data.classification, + confidence=data.confidence, + source=data.source, + timestamp=data.timestamp, + reasoning=data.reasoning, + citations=data.citations, + ) + _last_successful[job_name] = symbol + processed += 1 + _runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol) + logger.info(json.dumps({ + "event": "ticker_collected", + "job": job_name, + "ticker": symbol, + "classification": data.classification, + "confidence": data.confidence, + })) + except Exception as exc: + _log_job_error(job_name, symbol, exc) + + _last_successful[job_name] = None + logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed})) + _runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Processed {processed} tickers") except Exception as exc: logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) - return - - processed = 0 - - for symbol in symbols: - async with async_session_factory() as db: - try: - data = await provider.fetch_sentiment(symbol) - await sentiment_service.store_sentiment( - db, - symbol=symbol, - classification=data.classification, - confidence=data.confidence, - source=data.source, - timestamp=data.timestamp, - reasoning=data.reasoning, - citations=data.citations, - ) - _last_successful[job_name] = symbol - processed += 1 - logger.info(json.dumps({ - "event": "ticker_collected", - "job": job_name, - "ticker": symbol, - "classification": data.classification, - "confidence": data.confidence, - })) - except Exception as exc: - msg = str(exc).lower() - if "rate" in msg or "quota" in msg or "429" in msg: - logger.warning(json.dumps({ - "event": "rate_limited", - "job": job_name, - "ticker": symbol, - "processed": processed, - })) - return - _log_job_error(job_name, symbol, exc) - - _last_successful[job_name] = None - logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed})) + _runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc)) # --------------------------------------------------------------------------- @@ -258,65 +514,114 @@ async def collect_fundamentals() -> None: """ job_name = "fundamental_collector" logger.info(json.dumps({"event": "job_start", "job": job_name})) - - async with async_session_factory() as db: - if not await _is_job_enabled(db, job_name): - logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) - return - - symbols = await _get_all_tickers(db) - if not symbols: - logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0})) - return - - symbols = _resume_tickers(symbols, job_name) - - if not settings.fmp_api_key: - logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "fmp key not configured"})) - return + _runtime_start(job_name) + processed = 0 + total: int | None = None try: - provider = FMPFundamentalProvider(settings.fmp_api_key) - except Exception as exc: - logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) - return - - processed = 0 - - for symbol in symbols: async with async_session_factory() as db: - try: - data = await provider.fetch_fundamentals(symbol) - await fundamental_service.store_fundamental( - db, - symbol=symbol, - pe_ratio=data.pe_ratio, - revenue_growth=data.revenue_growth, - earnings_surprise=data.earnings_surprise, - market_cap=data.market_cap, - unavailable_fields=data.unavailable_fields, - ) - _last_successful[job_name] = symbol - processed += 1 - logger.info(json.dumps({ - "event": "ticker_collected", - "job": job_name, - "ticker": symbol, - })) - except Exception as exc: - msg = str(exc).lower() - if "rate" in msg or "429" in msg: - logger.warning(json.dumps({ - "event": "rate_limited", + if not await _is_job_enabled(db, job_name): + logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) + _runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled") + return + + symbols = await _get_fundamental_priority_tickers(db) + if not symbols: + logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": 0})) + _runtime_finish(job_name, "completed", processed=0, total=0, message="No tickers") + return + + total = len(symbols) + _runtime_progress(job_name, processed=0, total=total) + + if not (settings.fmp_api_key or settings.finnhub_api_key or settings.alpha_vantage_api_key): + logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "no fundamentals provider keys configured"})) + _runtime_finish(job_name, "skipped", processed=0, total=total, message="No fundamentals provider keys configured") + return + + try: + provider = build_fundamental_provider_chain() + except Exception as exc: + logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) + _runtime_finish(job_name, "error", processed=0, total=total, message=str(exc)) + return + + max_retries = max(0, settings.fundamental_rate_limit_retries) + base_backoff = max(1, settings.fundamental_rate_limit_backoff_seconds) + + for symbol in symbols: + _runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol) + attempt = 0 + while True: + try: + data = await provider.fetch_fundamentals(symbol) + async with async_session_factory() as db: + await fundamental_service.store_fundamental( + db, + symbol=symbol, + pe_ratio=data.pe_ratio, + revenue_growth=data.revenue_growth, + earnings_surprise=data.earnings_surprise, + market_cap=data.market_cap, + unavailable_fields=data.unavailable_fields, + ) + _last_successful[job_name] = symbol + processed += 1 + _runtime_progress(job_name, processed=processed, total=total, current_ticker=symbol) + logger.info(json.dumps({ + "event": "ticker_collected", "job": job_name, "ticker": symbol, - "processed": processed, })) - return - _log_job_error(job_name, symbol, exc) + break + except Exception as exc: + msg = str(exc).lower() + if "rate" in msg or "429" in msg: + if attempt < max_retries: + wait_seconds = base_backoff * (2 ** attempt) + attempt += 1 + logger.warning(json.dumps({ + "event": "rate_limited_retry", + "job": job_name, + "ticker": symbol, + "attempt": attempt, + "max_retries": max_retries, + "wait_seconds": wait_seconds, + "processed": processed, + })) + _runtime_progress( + job_name, + processed=processed, + total=total, + current_ticker=symbol, + message=f"Rate-limited at {symbol}; retry {attempt}/{max_retries} in {wait_seconds}s", + ) + await asyncio.sleep(wait_seconds) + continue - _last_successful[job_name] = None - logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed})) + logger.warning(json.dumps({ + "event": "rate_limited", + "job": job_name, + "ticker": symbol, + "processed": processed, + })) + _runtime_finish( + job_name, + "rate_limited", + processed=processed, + total=total, + message=f"Rate limited at {symbol} after {attempt} retries", + ) + return + _log_job_error(job_name, symbol, exc) + break + + _last_successful[job_name] = None + logger.info(json.dumps({"event": "job_complete", "job": job_name, "tickers": processed})) + _runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Processed {processed} tickers") + except Exception as exc: + logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) + _runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc)) # --------------------------------------------------------------------------- @@ -332,28 +637,90 @@ async def scan_rr() -> None: """ job_name = "rr_scanner" logger.info(json.dumps({"event": "job_start", "job": job_name})) + _runtime_start(job_name) + processed = 0 + total: int | None = None - async with async_session_factory() as db: - if not await _is_job_enabled(db, job_name): - logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) - return + try: + async with async_session_factory() as db: + if not await _is_job_enabled(db, job_name): + logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) + _runtime_finish(job_name, "skipped", processed=0, total=0, message="Disabled") + return - try: - setups = await scan_all_tickers( - db, rr_threshold=settings.default_rr_threshold, + symbols = await _get_all_tickers(db) + total = len(symbols) + _runtime_progress(job_name, processed=0, total=total) + + try: + setups = await scan_all_tickers( + db, rr_threshold=settings.default_rr_threshold, + ) + processed = total or 0 + _runtime_finish(job_name, "completed", processed=processed, total=total, message=f"Found {len(setups)} setups") + logger.info(json.dumps({ + "event": "job_complete", + "job": job_name, + "setups_found": len(setups), + })) + except Exception as exc: + _runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc)) + logger.error(json.dumps({ + "event": "job_error", + "job": job_name, + "error_type": type(exc).__name__, + "message": str(exc), + })) + except Exception as exc: + logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)})) + _runtime_finish(job_name, "error", processed=processed, total=total, message=str(exc)) + + +# --------------------------------------------------------------------------- +# Job: Ticker Universe Sync +# --------------------------------------------------------------------------- + + +async def sync_ticker_universe() -> None: + """Sync tracked tickers from configured default universe. + + Setting key: ticker_universe_default (sp500 | nasdaq100 | nasdaq_all) + """ + job_name = "ticker_universe_sync" + logger.info(json.dumps({"event": "job_start", "job": job_name})) + _runtime_start(job_name, total=1) + + try: + async with async_session_factory() as db: + if not await _is_job_enabled(db, job_name): + logger.info(json.dumps({"event": "job_skipped", "job": job_name, "reason": "disabled"})) + _runtime_finish(job_name, "skipped", processed=0, total=1, message="Disabled") + return + + result = await db.execute( + select(SystemSetting).where(SystemSetting.key == "ticker_universe_default") ) - logger.info(json.dumps({ - "event": "job_complete", - "job": job_name, - "setups_found": len(setups), - })) - except Exception as exc: - logger.error(json.dumps({ - "event": "job_error", - "job": job_name, - "error_type": type(exc).__name__, - "message": str(exc), - })) + setting = result.scalar_one_or_none() + universe = (setting.value if setting else "sp500").strip().lower() + + async with async_session_factory() as db: + summary = await bootstrap_universe(db, universe, prune_missing=False) + _runtime_progress(job_name, processed=1, total=1) + _runtime_finish(job_name, "completed", processed=1, total=1, message=f"Synced {universe}") + logger.info(json.dumps({ + "event": "job_complete", + "job": job_name, + "universe": universe, + "summary": summary, + })) + except Exception as exc: + _runtime_finish(job_name, "error", processed=0, total=1, message=str(exc)) + logger.error(json.dumps({ + "event": "job_error", + "job": job_name, + "error_type": type(exc).__name__, + "message": str(exc), + })) # --------------------------------------------------------------------------- @@ -427,6 +794,16 @@ def configure_scheduler() -> None: replace_existing=True, ) + # Universe Sync — nightly + scheduler.add_job( + sync_ticker_universe, + "interval", + hours=24, + id="ticker_universe_sync", + name="Ticker Universe Sync", + replace_existing=True, + ) + logger.info( json.dumps({ "event": "scheduler_configured", @@ -435,6 +812,7 @@ def configure_scheduler() -> None: "sentiment_collector": {"minutes": settings.sentiment_poll_interval_minutes}, "fundamental_collector": fund_interval, "rr_scanner": rr_interval, + "ticker_universe_sync": {"hours": 24}, }, }) ) diff --git a/app/schemas/admin.py b/app/schemas/admin.py index 303253d..6caf6ae 100644 --- a/app/schemas/admin.py +++ b/app/schemas/admin.py @@ -1,5 +1,7 @@ """Admin request/response schemas.""" +from typing import Literal + from pydantic import BaseModel, Field @@ -39,3 +41,18 @@ class DataCleanupRequest(BaseModel): class JobToggle(BaseModel): """Schema for enabling/disabling a scheduled job.""" enabled: bool + + +class RecommendationConfigUpdate(BaseModel): + high_confidence_threshold: float | None = Field(default=None, ge=0, le=100) + moderate_confidence_threshold: float | None = Field(default=None, ge=0, le=100) + confidence_diff_threshold: float | None = Field(default=None, ge=0, le=100) + signal_alignment_weight: float | None = Field(default=None, ge=0, le=1) + sr_strength_weight: float | None = Field(default=None, ge=0, le=1) + distance_penalty_factor: float | None = Field(default=None, ge=0, le=1) + momentum_technical_divergence_threshold: float | None = Field(default=None, ge=0, le=100) + fundamental_technical_divergence_threshold: float | None = Field(default=None, ge=0, le=100) + + +class TickerUniverseUpdate(BaseModel): + universe: Literal["sp500", "nasdaq100", "nasdaq_all"] diff --git a/app/schemas/trade_setup.py b/app/schemas/trade_setup.py index e52d747..1d50abd 100644 --- a/app/schemas/trade_setup.py +++ b/app/schemas/trade_setup.py @@ -4,7 +4,25 @@ from __future__ import annotations from datetime import datetime -from pydantic import BaseModel +from pydantic import BaseModel, Field + + +class TradeTargetResponse(BaseModel): + price: float + distance_from_entry: float + distance_atr_multiple: float + rr_ratio: float + probability: float + classification: str + sr_level_id: int + sr_strength: float + + +class RecommendationSummaryResponse(BaseModel): + action: str + reasoning: str | None + risk_level: str | None + composite_score: float class TradeSetupResponse(BaseModel): @@ -19,3 +37,11 @@ class TradeSetupResponse(BaseModel): rr_ratio: float composite_score: float detected_at: datetime + confidence_score: float | None = None + targets: list[TradeTargetResponse] = Field(default_factory=list) + conflict_flags: list[str] = Field(default_factory=list) + recommended_action: str | None = None + reasoning: str | None = None + risk_level: str | None = None + actual_outcome: str | None = None + recommendation_summary: RecommendationSummaryResponse | None = None diff --git a/app/services/admin_service.py b/app/services/admin_service.py index d071267..66eaa31 100644 --- a/app/services/admin_service.py +++ b/app/services/admin_service.py @@ -3,16 +3,34 @@ from datetime import datetime, timedelta, timezone from passlib.hash import bcrypt -from sqlalchemy import delete, select +from sqlalchemy import delete, func, select from sqlalchemy.ext.asyncio import AsyncSession from app.exceptions import DuplicateError, NotFoundError, ValidationError from app.models.fundamental import FundamentalData from app.models.ohlcv import OHLCVRecord +from app.models.score import CompositeScore, DimensionScore from app.models.sentiment import SentimentScore +from app.models.sr_level import SRLevel from app.models.settings import SystemSetting +from app.models.ticker import Ticker +from app.models.trade_setup import TradeSetup from app.models.user import User +RECOMMENDATION_CONFIG_DEFAULTS: dict[str, float] = { + "recommendation_high_confidence_threshold": 70.0, + "recommendation_moderate_confidence_threshold": 50.0, + "recommendation_confidence_diff_threshold": 20.0, + "recommendation_signal_alignment_weight": 0.15, + "recommendation_sr_strength_weight": 0.20, + "recommendation_distance_penalty_factor": 0.10, + "recommendation_momentum_technical_divergence_threshold": 30.0, + "recommendation_fundamental_technical_divergence_threshold": 40.0, +} + +DEFAULT_TICKER_UNIVERSE = "sp500" +SUPPORTED_TICKER_UNIVERSES = {"sp500", "nasdaq100", "nasdaq_all"} + # --------------------------------------------------------------------------- # User management @@ -125,6 +143,67 @@ async def update_setting(db: AsyncSession, key: str, value: str) -> SystemSettin return setting +def _recommendation_public_to_storage_key(key: str) -> str: + return f"recommendation_{key}" + + +async def get_recommendation_config(db: AsyncSession) -> dict[str, float]: + result = await db.execute( + select(SystemSetting).where(SystemSetting.key.like("recommendation_%")) + ) + rows = result.scalars().all() + + config = dict(RECOMMENDATION_CONFIG_DEFAULTS) + for row in rows: + try: + config[row.key] = float(row.value) + except (TypeError, ValueError): + continue + + return { + "high_confidence_threshold": config["recommendation_high_confidence_threshold"], + "moderate_confidence_threshold": config["recommendation_moderate_confidence_threshold"], + "confidence_diff_threshold": config["recommendation_confidence_diff_threshold"], + "signal_alignment_weight": config["recommendation_signal_alignment_weight"], + "sr_strength_weight": config["recommendation_sr_strength_weight"], + "distance_penalty_factor": config["recommendation_distance_penalty_factor"], + "momentum_technical_divergence_threshold": config["recommendation_momentum_technical_divergence_threshold"], + "fundamental_technical_divergence_threshold": config["recommendation_fundamental_technical_divergence_threshold"], + } + + +async def update_recommendation_config( + db: AsyncSession, + payload: dict[str, float], +) -> dict[str, float]: + for public_key, public_value in payload.items(): + storage_key = _recommendation_public_to_storage_key(public_key) + await update_setting(db, storage_key, str(public_value)) + + return await get_recommendation_config(db) + + +async def get_ticker_universe_default(db: AsyncSession) -> dict[str, str]: + result = await db.execute( + select(SystemSetting).where(SystemSetting.key == "ticker_universe_default") + ) + setting = result.scalar_one_or_none() + universe = setting.value if setting else DEFAULT_TICKER_UNIVERSE + if universe not in SUPPORTED_TICKER_UNIVERSES: + universe = DEFAULT_TICKER_UNIVERSE + return {"universe": universe} + + +async def update_ticker_universe_default(db: AsyncSession, universe: str) -> dict[str, str]: + normalised = universe.strip().lower() + if normalised not in SUPPORTED_TICKER_UNIVERSES: + supported = ", ".join(sorted(SUPPORTED_TICKER_UNIVERSES)) + raise ValidationError(f"Unsupported ticker universe '{universe}'. Supported: {supported}") + + await update_setting(db, "ticker_universe_default", normalised) + return {"universe": normalised} + + # --------------------------------------------------------------------------- # Data cleanup # --------------------------------------------------------------------------- @@ -160,23 +239,181 @@ async def cleanup_data(db: AsyncSession, older_than_days: int) -> dict[str, int] return counts +async def get_pipeline_readiness(db: AsyncSession) -> list[dict]: + """Return per-ticker readiness snapshot for ingestion/scoring/scanner pipeline.""" + tickers_result = await db.execute(select(Ticker).order_by(Ticker.symbol.asc())) + tickers = list(tickers_result.scalars().all()) + + if not tickers: + return [] + + ticker_ids = [ticker.id for ticker in tickers] + + ohlcv_stats_result = await db.execute( + select( + OHLCVRecord.ticker_id, + func.count(OHLCVRecord.id), + func.max(OHLCVRecord.date), + ) + .where(OHLCVRecord.ticker_id.in_(ticker_ids)) + .group_by(OHLCVRecord.ticker_id) + ) + ohlcv_stats = { + ticker_id: { + "bars": int(count or 0), + "last_date": max_date.isoformat() if max_date else None, + } + for ticker_id, count, max_date in ohlcv_stats_result.all() + } + + dim_rows_result = await db.execute( + select(DimensionScore).where(DimensionScore.ticker_id.in_(ticker_ids)) + ) + dim_map_by_ticker: dict[int, dict[str, tuple[float | None, bool]]] = {} + for row in dim_rows_result.scalars().all(): + dim_map_by_ticker.setdefault(row.ticker_id, {})[row.dimension] = (row.score, row.is_stale) + + sr_counts_result = await db.execute( + select(SRLevel.ticker_id, func.count(SRLevel.id)) + .where(SRLevel.ticker_id.in_(ticker_ids)) + .group_by(SRLevel.ticker_id) + ) + sr_counts = {ticker_id: int(count or 0) for ticker_id, count in sr_counts_result.all()} + + sentiment_stats_result = await db.execute( + select( + SentimentScore.ticker_id, + func.count(SentimentScore.id), + func.max(SentimentScore.timestamp), + ) + .where(SentimentScore.ticker_id.in_(ticker_ids)) + .group_by(SentimentScore.ticker_id) + ) + sentiment_stats = { + ticker_id: { + "count": int(count or 0), + "last_at": max_ts.isoformat() if max_ts else None, + } + for ticker_id, count, max_ts in sentiment_stats_result.all() + } + + fundamentals_result = await db.execute( + select(FundamentalData.ticker_id, FundamentalData.fetched_at) + .where(FundamentalData.ticker_id.in_(ticker_ids)) + ) + fundamentals_map = { + ticker_id: fetched_at.isoformat() if fetched_at else None + for ticker_id, fetched_at in fundamentals_result.all() + } + + composites_result = await db.execute( + select(CompositeScore.ticker_id, CompositeScore.is_stale) + .where(CompositeScore.ticker_id.in_(ticker_ids)) + ) + composites_map = { + ticker_id: is_stale + for ticker_id, is_stale in composites_result.all() + } + + setup_counts_result = await db.execute( + select(TradeSetup.ticker_id, func.count(TradeSetup.id)) + .where(TradeSetup.ticker_id.in_(ticker_ids)) + .group_by(TradeSetup.ticker_id) + ) + setup_counts = {ticker_id: int(count or 0) for ticker_id, count in setup_counts_result.all()} + + readiness: list[dict] = [] + for ticker in tickers: + ohlcv = ohlcv_stats.get(ticker.id, {"bars": 0, "last_date": None}) + ohlcv_bars = int(ohlcv["bars"]) + ohlcv_last_date = ohlcv["last_date"] + + dim_map = dim_map_by_ticker.get(ticker.id, {}) + + sr_count = int(sr_counts.get(ticker.id, 0)) + + sentiment = sentiment_stats.get(ticker.id, {"count": 0, "last_at": None}) + sentiment_count = int(sentiment["count"]) + sentiment_last_at = sentiment["last_at"] + + fundamentals_fetched_at = fundamentals_map.get(ticker.id) + has_fundamentals = ticker.id in fundamentals_map + + has_composite = ticker.id in composites_map + composite_stale = composites_map.get(ticker.id) + + setup_count = int(setup_counts.get(ticker.id, 0)) + + missing_reasons: list[str] = [] + if ohlcv_bars < 30: + missing_reasons.append("insufficient_ohlcv_bars(<30)") + if "technical" not in dim_map or dim_map["technical"][0] is None: + missing_reasons.append("missing_technical") + if "momentum" not in dim_map or dim_map["momentum"][0] is None: + missing_reasons.append("missing_momentum") + if "sr_quality" not in dim_map or dim_map["sr_quality"][0] is None: + missing_reasons.append("missing_sr_quality") + if sentiment_count == 0: + missing_reasons.append("missing_sentiment") + if not has_fundamentals: + missing_reasons.append("missing_fundamentals") + if not has_composite: + missing_reasons.append("missing_composite") + if setup_count == 0: + missing_reasons.append("missing_trade_setup") + + readiness.append( + { + "symbol": ticker.symbol, + "ohlcv_bars": ohlcv_bars, + "ohlcv_last_date": ohlcv_last_date, + "dimensions": { + "technical": dim_map.get("technical", (None, True))[0], + "sr_quality": dim_map.get("sr_quality", (None, True))[0], + "sentiment": dim_map.get("sentiment", (None, True))[0], + "fundamental": dim_map.get("fundamental", (None, True))[0], + "momentum": dim_map.get("momentum", (None, True))[0], + }, + "sentiment_count": sentiment_count, + "sentiment_last_at": sentiment_last_at, + "has_fundamentals": has_fundamentals, + "fundamentals_fetched_at": fundamentals_fetched_at, + "sr_level_count": sr_count, + "has_composite": has_composite, + "composite_stale": composite_stale, + "trade_setup_count": setup_count, + "missing_reasons": missing_reasons, + "ready_for_scanner": ohlcv_bars >= 15 and sr_count > 0, + } + ) + + return readiness + + # --------------------------------------------------------------------------- # Job control (placeholder — scheduler is Task 12.1) # --------------------------------------------------------------------------- -VALID_JOB_NAMES = {"data_collector", "sentiment_collector", "fundamental_collector", "rr_scanner"} +VALID_JOB_NAMES = { + "data_collector", + "sentiment_collector", + "fundamental_collector", + "rr_scanner", + "ticker_universe_sync", +} JOB_LABELS = { "data_collector": "Data Collector (OHLCV)", "sentiment_collector": "Sentiment Collector", "fundamental_collector": "Fundamental Collector", "rr_scanner": "R:R Scanner", + "ticker_universe_sync": "Ticker Universe Sync", } async def list_jobs(db: AsyncSession) -> list[dict]: """Return status of all scheduled jobs.""" - from app.scheduler import scheduler + from app.scheduler import get_job_runtime_snapshot, scheduler jobs_out = [] for name in sorted(VALID_JOB_NAMES): @@ -194,12 +431,23 @@ async def list_jobs(db: AsyncSession) -> list[dict]: if job and job.next_run_time: next_run = job.next_run_time.isoformat() + runtime = get_job_runtime_snapshot(name) + jobs_out.append({ "name": name, "label": JOB_LABELS.get(name, name), "enabled": enabled, "next_run_at": next_run, "registered": job is not None, + "running": bool(runtime.get("running", False)), + "runtime_status": runtime.get("status"), + "runtime_processed": runtime.get("processed"), + "runtime_total": runtime.get("total"), + "runtime_progress_pct": runtime.get("progress_pct"), + "runtime_current_ticker": runtime.get("current_ticker"), + "runtime_started_at": runtime.get("started_at"), + "runtime_finished_at": runtime.get("finished_at"), + "runtime_message": runtime.get("message"), }) return jobs_out @@ -213,7 +461,26 @@ async def trigger_job(db: AsyncSession, job_name: str) -> dict[str, str]: if job_name not in VALID_JOB_NAMES: raise ValidationError(f"Unknown job: {job_name}. Valid jobs: {', '.join(sorted(VALID_JOB_NAMES))}") - from app.scheduler import scheduler + from app.scheduler import get_job_runtime_snapshot, scheduler + + runtime_target = get_job_runtime_snapshot(job_name) + if runtime_target.get("running"): + return { + "job": job_name, + "status": "busy", + "message": f"Job '{job_name}' is already running", + } + + all_runtime = get_job_runtime_snapshot() + for running_name, runtime in all_runtime.items(): + if running_name == job_name: + continue + if runtime.get("running"): + return { + "job": job_name, + "status": "blocked", + "message": f"Cannot trigger '{job_name}' while '{running_name}' is running", + } job = scheduler.get_job(job_name) if job is None: diff --git a/app/services/ingestion_service.py b/app/services/ingestion_service.py index 16c32cd..25dc1df 100644 --- a/app/services/ingestion_service.py +++ b/app/services/ingestion_service.py @@ -9,10 +9,11 @@ import logging from dataclasses import dataclass from datetime import date, timedelta -from sqlalchemy import select +from sqlalchemy import func, select from sqlalchemy.ext.asyncio import AsyncSession from app.exceptions import NotFoundError, ProviderError, RateLimitError +from app.models.ohlcv import OHLCVRecord from app.models.settings import IngestionProgress from app.models.ticker import Ticker from app.providers.protocol import MarketDataProvider @@ -50,6 +51,13 @@ async def _get_progress(db: AsyncSession, ticker_id: int) -> IngestionProgress | return result.scalar_one_or_none() +async def _get_ohlcv_bar_count(db: AsyncSession, ticker_id: int) -> int: + result = await db.execute( + select(func.count()).select_from(OHLCVRecord).where(OHLCVRecord.ticker_id == ticker_id) + ) + return int(result.scalar() or 0) + + async def _update_progress( db: AsyncSession, ticker_id: int, last_date: date ) -> None: @@ -84,10 +92,17 @@ async def fetch_and_ingest( if end_date is None: end_date = date.today() - # Resolve start_date: use progress resume or default to 1 year ago + # Resolve start_date: use progress resume or default to 1 year ago. + # If we have too little history, force a one-year backfill even if + # ingestion progress exists (upsert makes this safe and idempotent). if start_date is None: progress = await _get_progress(db, ticker.id) - if progress is not None: + bar_count = await _get_ohlcv_bar_count(db, ticker.id) + minimum_backfill_bars = 200 + + if bar_count < minimum_backfill_bars: + start_date = end_date - timedelta(days=365) + elif progress is not None: start_date = progress.last_ingested_date + timedelta(days=1) else: start_date = end_date - timedelta(days=365) diff --git a/app/services/recommendation_service.py b/app/services/recommendation_service.py new file mode 100644 index 0000000..5068199 --- /dev/null +++ b/app/services/recommendation_service.py @@ -0,0 +1,499 @@ +from __future__ import annotations + +import json +import logging +from typing import Any + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.models.settings import SystemSetting +from app.models.sr_level import SRLevel +from app.models.ticker import Ticker +from app.models.trade_setup import TradeSetup + +logger = logging.getLogger(__name__) + + +DEFAULT_RECOMMENDATION_CONFIG: dict[str, float] = { + "recommendation_high_confidence_threshold": 70.0, + "recommendation_moderate_confidence_threshold": 50.0, + "recommendation_confidence_diff_threshold": 20.0, + "recommendation_signal_alignment_weight": 0.15, + "recommendation_sr_strength_weight": 0.20, + "recommendation_distance_penalty_factor": 0.10, + "recommendation_momentum_technical_divergence_threshold": 30.0, + "recommendation_fundamental_technical_divergence_threshold": 40.0, +} + + +def _clamp(value: float, low: float, high: float) -> float: + return max(low, min(high, value)) + + +def _sentiment_value(sentiment_classification: str | None) -> str | None: + if sentiment_classification is None: + return None + return sentiment_classification.strip().lower() + + +def check_signal_alignment( + direction: str, + dimension_scores: dict[str, float], + sentiment_classification: str | None, +) -> tuple[bool, str]: + technical = float(dimension_scores.get("technical", 50.0)) + momentum = float(dimension_scores.get("momentum", 50.0)) + sentiment = _sentiment_value(sentiment_classification) + + if direction == "long": + aligned_count = sum([ + technical > 60, + momentum > 60, + sentiment == "bullish", + ]) + if aligned_count >= 2: + return True, "Technical, momentum, and/or sentiment align with LONG direction." + return False, "Signals are mixed for LONG direction." + + aligned_count = sum([ + technical < 40, + momentum < 40, + sentiment == "bearish", + ]) + if aligned_count >= 2: + return True, "Technical, momentum, and/or sentiment align with SHORT direction." + return False, "Signals are mixed for SHORT direction." + + +class SignalConflictDetector: + def detect_conflicts( + self, + dimension_scores: dict[str, float], + sentiment_classification: str | None, + config: dict[str, float] | None = None, + ) -> list[str]: + cfg = config or DEFAULT_RECOMMENDATION_CONFIG + technical = float(dimension_scores.get("technical", 50.0)) + momentum = float(dimension_scores.get("momentum", 50.0)) + fundamental = float(dimension_scores.get("fundamental", 50.0)) + sentiment = _sentiment_value(sentiment_classification) + + mt_threshold = float(cfg.get("recommendation_momentum_technical_divergence_threshold", 30.0)) + ft_threshold = float(cfg.get("recommendation_fundamental_technical_divergence_threshold", 40.0)) + + conflicts: list[str] = [] + + if sentiment == "bearish" and technical > 60: + conflicts.append( + f"sentiment-technical: Bearish sentiment conflicts with bullish technical ({technical:.0f})" + ) + if sentiment == "bullish" and technical < 40: + conflicts.append( + f"sentiment-technical: Bullish sentiment conflicts with bearish technical ({technical:.0f})" + ) + + mt_diff = abs(momentum - technical) + if mt_diff > mt_threshold: + conflicts.append( + "momentum-technical: " + f"Momentum ({momentum:.0f}) diverges from technical ({technical:.0f}) by {mt_diff:.0f} points" + ) + + if sentiment == "bearish" and momentum > 60: + conflicts.append( + f"sentiment-momentum: Bearish sentiment conflicts with momentum ({momentum:.0f})" + ) + if sentiment == "bullish" and momentum < 40: + conflicts.append( + f"sentiment-momentum: Bullish sentiment conflicts with momentum ({momentum:.0f})" + ) + + ft_diff = abs(fundamental - technical) + if ft_diff > ft_threshold: + conflicts.append( + "fundamental-technical: " + f"Fundamental ({fundamental:.0f}) diverges significantly from technical ({technical:.0f})" + ) + + return conflicts + + +class DirectionAnalyzer: + def calculate_confidence( + self, + direction: str, + dimension_scores: dict[str, float], + sentiment_classification: str | None, + conflicts: list[str] | None = None, + ) -> float: + confidence = 50.0 + technical = float(dimension_scores.get("technical", 50.0)) + momentum = float(dimension_scores.get("momentum", 50.0)) + fundamental = float(dimension_scores.get("fundamental", 50.0)) + sentiment = _sentiment_value(sentiment_classification) + + if direction == "long": + if technical > 70: + confidence += 25.0 + elif technical > 60: + confidence += 15.0 + + if momentum > 70: + confidence += 20.0 + elif momentum > 60: + confidence += 15.0 + + if sentiment == "bullish": + confidence += 15.0 + elif sentiment == "neutral": + confidence += 5.0 + + if fundamental > 60: + confidence += 10.0 + else: + if technical < 30: + confidence += 25.0 + elif technical < 40: + confidence += 15.0 + + if momentum < 30: + confidence += 20.0 + elif momentum < 40: + confidence += 15.0 + + if sentiment == "bearish": + confidence += 15.0 + elif sentiment == "neutral": + confidence += 5.0 + + if fundamental < 40: + confidence += 10.0 + + for conflict in conflicts or []: + if "sentiment-technical" in conflict: + confidence -= 20.0 + elif "momentum-technical" in conflict: + confidence -= 15.0 + elif "sentiment-momentum" in conflict: + confidence -= 20.0 + elif "fundamental-technical" in conflict: + confidence -= 10.0 + + return _clamp(confidence, 0.0, 100.0) + + +class TargetGenerator: + def generate_targets( + self, + direction: str, + entry_price: float, + stop_loss: float, + sr_levels: list[SRLevel], + atr_value: float, + ) -> list[dict[str, Any]]: + if atr_value <= 0: + return [] + + risk = abs(entry_price - stop_loss) + if risk <= 0: + return [] + + candidates: list[dict[str, Any]] = [] + atr_pct = atr_value / entry_price if entry_price > 0 else 0.0 + + max_atr_multiple: float | None = None + if atr_pct > 0.05: + max_atr_multiple = 10.0 + elif atr_pct < 0.02: + max_atr_multiple = 3.0 + + for level in sr_levels: + is_candidate = False + if direction == "long": + is_candidate = level.type == "resistance" and level.price_level > entry_price + else: + is_candidate = level.type == "support" and level.price_level < entry_price + + if not is_candidate: + continue + + distance = abs(level.price_level - entry_price) + distance_atr_multiple = distance / atr_value + + if distance_atr_multiple < 1.0: + continue + if max_atr_multiple is not None and distance_atr_multiple > max_atr_multiple: + continue + + reward = abs(level.price_level - entry_price) + rr_ratio = reward / risk + + norm_rr = min(rr_ratio / 10.0, 1.0) + norm_strength = _clamp(level.strength, 0, 100) / 100.0 + norm_proximity = 1.0 - min(distance / entry_price, 1.0) + quality = 0.35 * norm_rr + 0.35 * norm_strength + 0.30 * norm_proximity + + candidates.append( + { + "price": float(level.price_level), + "distance_from_entry": float(distance), + "distance_atr_multiple": float(distance_atr_multiple), + "rr_ratio": float(rr_ratio), + "classification": "Moderate", + "sr_level_id": int(level.id), + "sr_strength": float(level.strength), + "quality": float(quality), + } + ) + + candidates.sort(key=lambda row: row["quality"], reverse=True) + selected = candidates[:5] + selected.sort(key=lambda row: row["distance_from_entry"]) + + if not selected: + return [] + + n = len(selected) + for idx, target in enumerate(selected): + if n <= 2: + target["classification"] = "Conservative" if idx == 0 else "Aggressive" + elif idx <= 1: + target["classification"] = "Conservative" + elif idx >= n - 2: + target["classification"] = "Aggressive" + else: + target["classification"] = "Moderate" + + target.pop("quality", None) + + return selected + + +class ProbabilityEstimator: + def estimate_probability( + self, + target: dict[str, Any], + dimension_scores: dict[str, float], + sentiment_classification: str | None, + direction: str, + config: dict[str, float], + ) -> float: + classification = str(target.get("classification", "Moderate")) + strength = float(target.get("sr_strength", 50.0)) + atr_multiple = float(target.get("distance_atr_multiple", 1.0)) + + if classification == "Conservative": + base_prob = 70.0 + elif classification == "Aggressive": + base_prob = 40.0 + else: + base_prob = 55.0 + + if strength >= 80: + strength_adj = 15.0 + elif strength >= 60: + strength_adj = 10.0 + elif strength >= 40: + strength_adj = 5.0 + else: + strength_adj = -10.0 + + technical = float(dimension_scores.get("technical", 50.0)) + momentum = float(dimension_scores.get("momentum", 50.0)) + sentiment = _sentiment_value(sentiment_classification) + + alignment_adj = 0.0 + if direction == "long": + if technical > 60 and (sentiment == "bullish" or momentum > 60): + alignment_adj = 15.0 + elif technical < 40 or (sentiment == "bearish" and momentum < 40): + alignment_adj = -15.0 + else: + if technical < 40 and (sentiment == "bearish" or momentum < 40): + alignment_adj = 15.0 + elif technical > 60 or (sentiment == "bullish" and momentum > 60): + alignment_adj = -15.0 + + volatility_adj = 0.0 + if atr_multiple > 5: + volatility_adj = 5.0 + elif atr_multiple < 2: + volatility_adj = 5.0 + + signal_weight = float(config.get("recommendation_signal_alignment_weight", 0.15)) + sr_weight = float(config.get("recommendation_sr_strength_weight", 0.20)) + distance_penalty = float(config.get("recommendation_distance_penalty_factor", 0.10)) + + scaled_alignment_adj = alignment_adj * (signal_weight / 0.15) + scaled_strength_adj = strength_adj * (sr_weight / 0.20) + distance_adj = -distance_penalty * max(atr_multiple - 1.0, 0.0) * 2.0 + + probability = base_prob + scaled_strength_adj + scaled_alignment_adj + volatility_adj + distance_adj + probability = _clamp(probability, 10.0, 90.0) + + if classification == "Conservative": + probability = max(probability, 61.0) + elif classification == "Moderate": + probability = _clamp(probability, 40.0, 70.0) + elif classification == "Aggressive": + probability = min(probability, 49.0) + + return round(probability, 2) + + +signal_conflict_detector = SignalConflictDetector() +direction_analyzer = DirectionAnalyzer() +target_generator = TargetGenerator() +probability_estimator = ProbabilityEstimator() + + +async def get_recommendation_config(db: AsyncSession) -> dict[str, float]: + result = await db.execute( + select(SystemSetting).where(SystemSetting.key.like("recommendation_%")) + ) + rows = result.scalars().all() + + config: dict[str, float] = dict(DEFAULT_RECOMMENDATION_CONFIG) + for setting in rows: + try: + config[setting.key] = float(setting.value) + except (TypeError, ValueError): + logger.warning("Invalid recommendation setting value for %s: %s", setting.key, setting.value) + + return config + + +def _risk_level_from_conflicts(conflicts: list[str]) -> str: + if not conflicts: + return "Low" + severe = [c for c in conflicts if "sentiment-technical" in c or "sentiment-momentum" in c] + if len(severe) >= 2 or len(conflicts) >= 3: + return "High" + return "Medium" + + +def _choose_recommended_action( + long_confidence: float, + short_confidence: float, + config: dict[str, float], +) -> str: + high = float(config.get("recommendation_high_confidence_threshold", 70.0)) + moderate = float(config.get("recommendation_moderate_confidence_threshold", 50.0)) + diff = float(config.get("recommendation_confidence_diff_threshold", 20.0)) + + if long_confidence >= high and (long_confidence - short_confidence) >= diff: + return "LONG_HIGH" + if short_confidence >= high and (short_confidence - long_confidence) >= diff: + return "SHORT_HIGH" + if long_confidence >= moderate and (long_confidence - short_confidence) >= diff: + return "LONG_MODERATE" + if short_confidence >= moderate and (short_confidence - long_confidence) >= diff: + return "SHORT_MODERATE" + return "NEUTRAL" + + +def _build_reasoning( + direction: str, + confidence: float, + conflicts: list[str], + dimension_scores: dict[str, float], + sentiment_classification: str | None, + action: str, +) -> str: + aligned, alignment_text = check_signal_alignment( + direction, + dimension_scores, + sentiment_classification, + ) + sentiment = _sentiment_value(sentiment_classification) or "unknown" + technical = float(dimension_scores.get("technical", 50.0)) + momentum = float(dimension_scores.get("momentum", 50.0)) + + direction_text = direction.upper() + alignment_summary = "aligned" if aligned else "mixed" + base = ( + f"{direction_text} confidence {confidence:.1f}% with {alignment_summary} signals " + f"(technical={technical:.0f}, momentum={momentum:.0f}, sentiment={sentiment})." + ) + + if conflicts: + return ( + f"{base} {alignment_text} Detected {len(conflicts)} conflict(s), " + f"so recommendation is risk-adjusted. Action={action}." + ) + + return f"{base} {alignment_text} No major conflicts detected. Action={action}." + + +async def enhance_trade_setup( + db: AsyncSession, + ticker: Ticker, + setup: TradeSetup, + dimension_scores: dict[str, float], + sr_levels: list[SRLevel], + sentiment_classification: str | None, + atr_value: float, +) -> TradeSetup: + config = await get_recommendation_config(db) + + conflicts = signal_conflict_detector.detect_conflicts( + dimension_scores=dimension_scores, + sentiment_classification=sentiment_classification, + config=config, + ) + + long_confidence = direction_analyzer.calculate_confidence( + direction="long", + dimension_scores=dimension_scores, + sentiment_classification=sentiment_classification, + conflicts=conflicts, + ) + short_confidence = direction_analyzer.calculate_confidence( + direction="short", + dimension_scores=dimension_scores, + sentiment_classification=sentiment_classification, + conflicts=conflicts, + ) + + direction = setup.direction.lower() + confidence = long_confidence if direction == "long" else short_confidence + + targets = target_generator.generate_targets( + direction=direction, + entry_price=setup.entry_price, + stop_loss=setup.stop_loss, + sr_levels=sr_levels, + atr_value=atr_value, + ) + + for target in targets: + target["probability"] = probability_estimator.estimate_probability( + target=target, + dimension_scores=dimension_scores, + sentiment_classification=sentiment_classification, + direction=direction, + config=config, + ) + + if len(targets) < 3: + conflicts = [*conflicts, "target-availability: Fewer than 3 valid S/R targets available"] + + action = _choose_recommended_action(long_confidence, short_confidence, config) + risk_level = _risk_level_from_conflicts(conflicts) + + setup.confidence_score = round(confidence, 2) + setup.targets_json = json.dumps(targets) + setup.conflict_flags_json = json.dumps(conflicts) + setup.recommended_action = action + setup.reasoning = _build_reasoning( + direction=direction, + confidence=confidence, + conflicts=conflicts, + dimension_scores=dimension_scores, + sentiment_classification=sentiment_classification, + action=action, + ) + setup.risk_level = risk_level + + return setup diff --git a/app/services/rr_scanner_service.py b/app/services/rr_scanner_service.py index 493032f..1816b37 100644 --- a/app/services/rr_scanner_service.py +++ b/app/services/rr_scanner_service.py @@ -3,24 +3,27 @@ Scans tracked tickers for asymmetric risk-reward trade setups. Long: target = nearest SR above, stop = entry - ATR × multiplier. Short: target = nearest SR below, stop = entry + ATR × multiplier. -Filters by configurable R:R threshold (default 3:1). +Filters by configurable R:R threshold (default 1.5). """ from __future__ import annotations +import json import logging from datetime import datetime, timezone -from sqlalchemy import delete, select +from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession from app.exceptions import NotFoundError -from app.models.score import CompositeScore +from app.models.score import CompositeScore, DimensionScore +from app.models.sentiment import SentimentScore from app.models.sr_level import SRLevel from app.models.ticker import Ticker from app.models.trade_setup import TradeSetup from app.services.indicator_service import _extract_ohlcv, compute_atr from app.services.price_service import query_ohlcv +from app.services.recommendation_service import enhance_trade_setup logger = logging.getLogger(__name__) @@ -45,70 +48,63 @@ def _compute_quality_score( w_proximity: float = 0.30, rr_cap: float = 10.0, ) -> float: - """Compute a quality score for a candidate S/R level. - - Combines normalized R:R ratio, level strength, and proximity to entry - into a single 0–1 score using configurable weights. - """ + """Compute a quality score for a candidate S/R level.""" norm_rr = min(rr / rr_cap, 1.0) norm_strength = strength / 100.0 norm_proximity = 1.0 - min(distance / entry_price, 1.0) return w_rr * norm_rr + w_strength * norm_strength + w_proximity * norm_proximity +async def _get_dimension_scores(db: AsyncSession, ticker_id: int) -> dict[str, float]: + result = await db.execute( + select(DimensionScore).where(DimensionScore.ticker_id == ticker_id) + ) + rows = result.scalars().all() + return {row.dimension: float(row.score) for row in rows} + + +async def _get_latest_sentiment(db: AsyncSession, ticker_id: int) -> str | None: + result = await db.execute( + select(SentimentScore) + .where(SentimentScore.ticker_id == ticker_id) + .order_by(SentimentScore.timestamp.desc()) + .limit(1) + ) + row = result.scalar_one_or_none() + return row.classification if row else None + + async def scan_ticker( db: AsyncSession, symbol: str, rr_threshold: float = 1.5, atr_multiplier: float = 1.5, ) -> list[TradeSetup]: - """Scan a single ticker for trade setups meeting the R:R threshold. - - 1. Fetch OHLCV data and compute ATR. - 2. Fetch SR levels. - 3. Compute long and short setups. - 4. Filter by R:R threshold. - 5. Delete old setups for this ticker and persist new ones. - - Returns list of persisted TradeSetup models. - """ + """Scan a single ticker for trade setups meeting the R:R threshold.""" ticker = await _get_ticker(db, symbol) - # Fetch OHLCV records = await query_ohlcv(db, symbol) if not records or len(records) < 15: logger.info( "Skipping %s: insufficient OHLCV data (%d bars, need 15+)", symbol, len(records), ) - # Clear any stale setups - await db.execute( - delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id) - ) return [] _, highs, lows, closes, _ = _extract_ohlcv(records) entry_price = closes[-1] - # Compute ATR try: atr_result = compute_atr(highs, lows, closes) atr_value = atr_result["atr"] except Exception: logger.info("Skipping %s: cannot compute ATR", symbol) - await db.execute( - delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id) - ) return [] if atr_value <= 0: logger.info("Skipping %s: ATR is zero or negative", symbol) - await db.execute( - delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id) - ) return [] - # Fetch SR levels from DB (already computed by sr_service) sr_result = await db.execute( select(SRLevel).where(SRLevel.ticker_id == ticker.id) ) @@ -116,9 +112,6 @@ async def scan_ticker( if not sr_levels: logger.info("Skipping %s: no SR levels available", symbol) - await db.execute( - delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id) - ) return [] levels_above = sorted( @@ -131,18 +124,18 @@ async def scan_ticker( reverse=True, ) - # Get composite score for this ticker comp_result = await db.execute( select(CompositeScore).where(CompositeScore.ticker_id == ticker.id) ) comp = comp_result.scalar_one_or_none() composite_score = comp.score if comp else 0.0 + dimension_scores = await _get_dimension_scores(db, ticker.id) + sentiment_classification = await _get_latest_sentiment(db, ticker.id) + now = datetime.now(timezone.utc) setups: list[TradeSetup] = [] - # Long setup: target = nearest SR above, stop = entry - ATR × multiplier - # Check all resistance levels above and pick the one with the best quality score if levels_above: stop = entry_price - (atr_value * atr_multiplier) risk = entry_price - stop @@ -152,15 +145,18 @@ async def scan_ticker( best_candidate_target = 0.0 for lv in levels_above: reward = lv.price_level - entry_price - if reward > 0: - rr = reward / risk - if rr >= rr_threshold: - distance = lv.price_level - entry_price - quality = _compute_quality_score(rr, lv.strength, distance, entry_price) - if quality > best_quality: - best_quality = quality - best_candidate_rr = rr - best_candidate_target = lv.price_level + if reward <= 0: + continue + rr = reward / risk + if rr < rr_threshold: + continue + distance = lv.price_level - entry_price + quality = _compute_quality_score(rr, lv.strength, distance, entry_price) + if quality > best_quality: + best_quality = quality + best_candidate_rr = rr + best_candidate_target = lv.price_level + if best_candidate_rr > 0: setups.append(TradeSetup( ticker_id=ticker.id, @@ -173,8 +169,6 @@ async def scan_ticker( detected_at=now, )) - # Short setup: target = nearest SR below, stop = entry + ATR × multiplier - # Check all support levels below and pick the one with the best quality score if levels_below: stop = entry_price + (atr_value * atr_multiplier) risk = stop - entry_price @@ -184,15 +178,18 @@ async def scan_ticker( best_candidate_target = 0.0 for lv in levels_below: reward = entry_price - lv.price_level - if reward > 0: - rr = reward / risk - if rr >= rr_threshold: - distance = entry_price - lv.price_level - quality = _compute_quality_score(rr, lv.strength, distance, entry_price) - if quality > best_quality: - best_quality = quality - best_candidate_rr = rr - best_candidate_target = lv.price_level + if reward <= 0: + continue + rr = reward / risk + if rr < rr_threshold: + continue + distance = entry_price - lv.price_level + quality = _compute_quality_score(rr, lv.strength, distance, entry_price) + if quality > best_quality: + best_quality = quality + best_candidate_rr = rr + best_candidate_target = lv.price_level + if best_candidate_rr > 0: setups.append(TradeSetup( ticker_id=ticker.id, @@ -205,20 +202,32 @@ async def scan_ticker( detected_at=now, )) - # Delete old setups for this ticker, persist new ones - await db.execute( - delete(TradeSetup).where(TradeSetup.ticker_id == ticker.id) - ) + enhanced_setups: list[TradeSetup] = [] for setup in setups: + try: + enhanced = await enhance_trade_setup( + db=db, + ticker=ticker, + setup=setup, + dimension_scores=dimension_scores, + sr_levels=sr_levels, + sentiment_classification=sentiment_classification, + atr_value=atr_value, + ) + enhanced_setups.append(enhanced) + except Exception: + logger.exception("Error enhancing setup for %s (%s)", ticker.symbol, setup.direction) + enhanced_setups.append(setup) + + for setup in enhanced_setups: db.add(setup) await db.commit() - # Refresh to get IDs - for s in setups: + for s in enhanced_setups: await db.refresh(s) - return setups + return enhanced_setups async def scan_all_tickers( @@ -226,11 +235,7 @@ async def scan_all_tickers( rr_threshold: float = 1.5, atr_multiplier: float = 1.5, ) -> list[TradeSetup]: - """Scan all tracked tickers for trade setups. - - Processes each ticker independently — one failure doesn't stop others. - Returns all setups found across all tickers. - """ + """Scan all tracked tickers for trade setups.""" result = await db.execute(select(Ticker).order_by(Ticker.symbol)) tickers = list(result.scalars().all()) @@ -250,38 +255,100 @@ async def scan_all_tickers( async def get_trade_setups( db: AsyncSession, direction: str | None = None, + min_confidence: float | None = None, + recommended_action: str | None = None, + symbol: str | None = None, ) -> list[dict]: - """Get all stored trade setups, optionally filtered by direction. - - Returns dicts sorted by R:R desc, secondary composite desc. - Each dict includes the ticker symbol. - """ + """Get latest stored trade setups, optionally filtered.""" stmt = ( select(TradeSetup, Ticker.symbol) .join(Ticker, TradeSetup.ticker_id == Ticker.id) ) if direction is not None: stmt = stmt.where(TradeSetup.direction == direction.lower()) + if symbol is not None: + stmt = stmt.where(Ticker.symbol == symbol.strip().upper()) + if min_confidence is not None: + stmt = stmt.where(TradeSetup.confidence_score >= min_confidence) + if recommended_action is not None: + stmt = stmt.where(TradeSetup.recommended_action == recommended_action) - stmt = stmt.order_by( - TradeSetup.rr_ratio.desc(), - TradeSetup.composite_score.desc(), - ) + stmt = stmt.order_by(TradeSetup.detected_at.desc(), TradeSetup.id.desc()) result = await db.execute(stmt) rows = result.all() - return [ - { - "id": setup.id, - "symbol": symbol, - "direction": setup.direction, - "entry_price": setup.entry_price, - "stop_loss": setup.stop_loss, - "target": setup.target, - "rr_ratio": setup.rr_ratio, - "composite_score": setup.composite_score, - "detected_at": setup.detected_at, - } - for setup, symbol in rows - ] + latest_by_key: dict[tuple[str, str], tuple[TradeSetup, str]] = {} + for setup, ticker_symbol in rows: + dedupe_key = (ticker_symbol, setup.direction) + if dedupe_key not in latest_by_key: + latest_by_key[dedupe_key] = (setup, ticker_symbol) + + latest_rows = list(latest_by_key.values()) + latest_rows.sort( + key=lambda row: ( + row[0].confidence_score if row[0].confidence_score is not None else -1.0, + row[0].rr_ratio, + row[0].composite_score, + ), + reverse=True, + ) + + return [_trade_setup_to_dict(setup, ticker_symbol) for setup, ticker_symbol in latest_rows] + + +async def get_trade_setup_history( + db: AsyncSession, + symbol: str, +) -> list[dict]: + """Get full recommendation history for a symbol (newest first).""" + stmt = ( + select(TradeSetup, Ticker.symbol) + .join(Ticker, TradeSetup.ticker_id == Ticker.id) + .where(Ticker.symbol == symbol.strip().upper()) + .order_by(TradeSetup.detected_at.desc(), TradeSetup.id.desc()) + ) + result = await db.execute(stmt) + rows = result.all() + + return [_trade_setup_to_dict(setup, ticker_symbol) for setup, ticker_symbol in rows] + + +def _trade_setup_to_dict(setup: TradeSetup, symbol: str) -> dict: + targets: list[dict] = [] + conflicts: list[str] = [] + + if setup.targets_json: + try: + parsed_targets = json.loads(setup.targets_json) + if isinstance(parsed_targets, list): + targets = parsed_targets + except (TypeError, ValueError): + targets = [] + + if setup.conflict_flags_json: + try: + parsed_conflicts = json.loads(setup.conflict_flags_json) + if isinstance(parsed_conflicts, list): + conflicts = [str(item) for item in parsed_conflicts] + except (TypeError, ValueError): + conflicts = [] + + return { + "id": setup.id, + "symbol": symbol, + "direction": setup.direction, + "entry_price": setup.entry_price, + "stop_loss": setup.stop_loss, + "target": setup.target, + "rr_ratio": setup.rr_ratio, + "composite_score": setup.composite_score, + "detected_at": setup.detected_at, + "confidence_score": setup.confidence_score, + "targets": targets, + "conflict_flags": conflicts, + "recommended_action": setup.recommended_action, + "reasoning": setup.reasoning, + "risk_level": setup.risk_level, + "actual_outcome": setup.actual_outcome, + } diff --git a/app/services/ticker_universe_service.py b/app/services/ticker_universe_service.py new file mode 100644 index 0000000..702c87b --- /dev/null +++ b/app/services/ticker_universe_service.py @@ -0,0 +1,405 @@ +"""Ticker universe discovery and bootstrap service. + +Provides a minimal, provider-backed way to populate tracked tickers from +well-known universes (S&P 500, NASDAQ-100, NASDAQ All). +""" + +from __future__ import annotations + +import json +import logging +import os +import re +from collections.abc import Iterable +from datetime import datetime, timezone +from pathlib import Path + +import httpx +from sqlalchemy import delete, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.config import settings +from app.exceptions import ProviderError, ValidationError +from app.models.settings import SystemSetting +from app.models.ticker import Ticker + +logger = logging.getLogger(__name__) + +SUPPORTED_UNIVERSES = {"sp500", "nasdaq100", "nasdaq_all"} +_SYMBOL_PATTERN = re.compile(r"^[A-Z0-9-]{1,10}$") + +_SEED_UNIVERSES: dict[str, list[str]] = { + "sp500": [ + "AAPL", "MSFT", "NVDA", "AMZN", "META", "GOOGL", "GOOG", "BRK-B", "TSLA", "JPM", + "V", "MA", "UNH", "XOM", "LLY", "AVGO", "COST", "PG", "JNJ", "HD", "MRK", "BAC", + "ABBV", "PEP", "KO", "ADBE", "NFLX", "CRM", "CSCO", "WMT", "AMD", "TMO", "MCD", + "ORCL", "ACN", "CVX", "LIN", "DHR", "ABT", "QCOM", "TXN", "PM", "DIS", "INTU", + ], + "nasdaq100": [ + "AAPL", "MSFT", "NVDA", "AMZN", "META", "GOOGL", "GOOG", "TSLA", "AVGO", "COST", + "NFLX", "ADBE", "CSCO", "AMD", "INTU", "QCOM", "AMGN", "TXN", "INTC", "BKNG", "GILD", + "ISRG", "MDLZ", "ADP", "LRCX", "ADI", "PANW", "SNPS", "CDNS", "KLAC", "MELI", "MU", + "SBUX", "CSX", "REGN", "VRTX", "MAR", "MNST", "CTAS", "ASML", "PYPL", "AMAT", "NXPI", + ], + "nasdaq_all": [ + "AAPL", "MSFT", "NVDA", "AMZN", "META", "GOOGL", "TSLA", "AMD", "INTC", "QCOM", "CSCO", + "ADBE", "NFLX", "PYPL", "AMAT", "MU", "SBUX", "GILD", "INTU", "BKNG", "ADP", "CTAS", + "PANW", "SNPS", "CDNS", "LRCX", "KLAC", "MELI", "ASML", "REGN", "VRTX", "MDLZ", "AMGN", + ], +} + +_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "") +if not _CA_BUNDLE or not Path(_CA_BUNDLE).exists(): + _CA_BUNDLE_PATH: str | bool = True +else: + _CA_BUNDLE_PATH = _CA_BUNDLE + + +def _validate_universe(universe: str) -> str: + normalised = universe.strip().lower() + if normalised not in SUPPORTED_UNIVERSES: + supported = ", ".join(sorted(SUPPORTED_UNIVERSES)) + raise ValidationError(f"Unsupported universe '{universe}'. Supported: {supported}") + return normalised + + +def _normalise_symbols(symbols: Iterable[str]) -> list[str]: + deduped: set[str] = set() + for raw_symbol in symbols: + symbol = raw_symbol.strip().upper().replace(".", "-") + if not symbol: + continue + if _SYMBOL_PATTERN.fullmatch(symbol) is None: + continue + deduped.add(symbol) + return sorted(deduped) + + +def _extract_symbols_from_fmp_payload(payload: object) -> list[str]: + if not isinstance(payload, list): + return [] + + symbols: list[str] = [] + for item in payload: + if not isinstance(item, dict): + continue + candidate = item.get("symbol") or item.get("ticker") + if isinstance(candidate, str): + symbols.append(candidate) + return symbols + + +async def _try_fmp_urls( + client: httpx.AsyncClient, + urls: list[str], +) -> tuple[list[str], list[str]]: + failures: list[str] = [] + for url in urls: + endpoint = url.split("?")[0] + try: + response = await client.get(url) + except httpx.HTTPError as exc: + failures.append(f"{endpoint}: network error ({type(exc).__name__}: {exc})") + continue + + if response.status_code != 200: + failures.append(f"{endpoint}: HTTP {response.status_code}") + continue + + try: + payload = response.json() + except ValueError: + failures.append(f"{endpoint}: invalid JSON payload") + continue + + symbols = _extract_symbols_from_fmp_payload(payload) + if symbols: + return symbols, failures + + failures.append(f"{endpoint}: empty/unsupported payload") + + return [], failures + + +async def _fetch_universe_symbols_from_fmp(universe: str) -> list[str]: + if not settings.fmp_api_key: + raise ValidationError( + "FMP API key is required for universe bootstrap (set FMP_API_KEY)" + ) + + api_key = settings.fmp_api_key + stable_base = "https://financialmodelingprep.com/stable" + legacy_base = "https://financialmodelingprep.com/api/v3" + + stable_candidates: dict[str, list[str]] = { + "sp500": [ + f"{stable_base}/sp500-constituent?apikey={api_key}", + f"{stable_base}/sp500-constituents?apikey={api_key}", + ], + "nasdaq100": [ + f"{stable_base}/nasdaq-100-constituent?apikey={api_key}", + f"{stable_base}/nasdaq100-constituent?apikey={api_key}", + f"{stable_base}/nasdaq-100-constituents?apikey={api_key}", + ], + "nasdaq_all": [ + f"{stable_base}/stock-screener?exchange=NASDAQ&isEtf=false&limit=10000&apikey={api_key}", + f"{stable_base}/available-traded/list?apikey={api_key}", + ], + } + + legacy_candidates: dict[str, list[str]] = { + "sp500": [ + f"{legacy_base}/sp500_constituent?apikey={api_key}", + f"{legacy_base}/sp500_constituent", + ], + "nasdaq100": [ + f"{legacy_base}/nasdaq_constituent?apikey={api_key}", + f"{legacy_base}/nasdaq_constituent", + ], + "nasdaq_all": [ + f"{legacy_base}/stock-screener?exchange=NASDAQ&isEtf=false&limit=10000&apikey={api_key}", + ], + } + + failures: list[str] = [] + async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client: + stable_symbols, stable_failures = await _try_fmp_urls(client, stable_candidates[universe]) + failures.extend(stable_failures) + + if stable_symbols: + return stable_symbols + + legacy_symbols, legacy_failures = await _try_fmp_urls(client, legacy_candidates[universe]) + failures.extend(legacy_failures) + + if legacy_symbols: + return legacy_symbols + + if failures: + reason = "; ".join(failures[:6]) + logger.warning("FMP universe fetch failed for %s: %s", universe, reason) + raise ProviderError( + f"Failed to fetch universe symbols from FMP for '{universe}'. Attempts: {reason}" + ) + + raise ProviderError(f"Failed to fetch universe symbols from FMP for '{universe}'") + + +async def _fetch_html_symbols( + client: httpx.AsyncClient, + url: str, + pattern: str, +) -> tuple[list[str], str | None]: + try: + response = await client.get(url) + except httpx.HTTPError as exc: + return [], f"{url}: network error ({type(exc).__name__}: {exc})" + + if response.status_code != 200: + return [], f"{url}: HTTP {response.status_code}" + + matches = re.findall(pattern, response.text, flags=re.IGNORECASE) + if not matches: + return [], f"{url}: no symbols parsed" + return list(matches), None + + +async def _fetch_nasdaq_trader_symbols( + client: httpx.AsyncClient, +) -> tuple[list[str], str | None]: + url = "https://www.nasdaqtrader.com/dynamic/SymDir/nasdaqlisted.txt" + try: + response = await client.get(url) + except httpx.HTTPError as exc: + return [], f"{url}: network error ({type(exc).__name__}: {exc})" + + if response.status_code != 200: + return [], f"{url}: HTTP {response.status_code}" + + symbols: list[str] = [] + for line in response.text.splitlines(): + if not line or line.startswith("Symbol|") or line.startswith("File Creation Time"): + continue + parts = line.split("|") + if not parts: + continue + symbol = parts[0].strip() + test_issue = parts[6].strip() if len(parts) > 6 else "N" + if test_issue == "Y": + continue + symbols.append(symbol) + + if not symbols: + return [], f"{url}: no symbols parsed" + return symbols, None + + +async def _fetch_universe_symbols_from_public(universe: str) -> tuple[list[str], list[str], str | None]: + failures: list[str] = [] + + sp500_url = "https://en.wikipedia.org/wiki/List_of_S%26P_500_companies" + nasdaq100_url = "https://en.wikipedia.org/wiki/Nasdaq-100" + wiki_symbol_pattern = r"\s*]*>([A-Z.]{1,10})\s*" + + async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client: + if universe == "sp500": + symbols, error = await _fetch_html_symbols(client, sp500_url, wiki_symbol_pattern) + if error: + failures.append(error) + else: + return symbols, failures, "wikipedia_sp500" + + if universe == "nasdaq100": + symbols, error = await _fetch_html_symbols(client, nasdaq100_url, wiki_symbol_pattern) + if error: + failures.append(error) + else: + return symbols, failures, "wikipedia_nasdaq100" + + if universe == "nasdaq_all": + symbols, error = await _fetch_nasdaq_trader_symbols(client) + if error: + failures.append(error) + else: + return symbols, failures, "nasdaq_trader" + + return [], failures, None + + +async def _read_cached_symbols(db: AsyncSession, universe: str) -> list[str]: + key = f"ticker_universe_cache_{universe}" + result = await db.execute(select(SystemSetting).where(SystemSetting.key == key)) + setting = result.scalar_one_or_none() + if setting is None: + return [] + + try: + payload = json.loads(setting.value) + except (TypeError, ValueError): + return [] + + if isinstance(payload, dict): + symbols = payload.get("symbols", []) + elif isinstance(payload, list): + symbols = payload + else: + symbols = [] + + if not isinstance(symbols, list): + return [] + + return _normalise_symbols([str(symbol) for symbol in symbols]) + + +async def _write_cached_symbols( + db: AsyncSession, + universe: str, + symbols: list[str], + source: str, +) -> None: + key = f"ticker_universe_cache_{universe}" + payload = { + "symbols": symbols, + "source": source, + "updated_at": datetime.now(timezone.utc).isoformat(), + } + + result = await db.execute(select(SystemSetting).where(SystemSetting.key == key)) + setting = result.scalar_one_or_none() + value = json.dumps(payload) + + if setting is None: + db.add(SystemSetting(key=key, value=value)) + else: + setting.value = value + + await db.commit() + + +async def fetch_universe_symbols(db: AsyncSession, universe: str) -> list[str]: + """Fetch and normalise symbols for a supported universe with fallbacks. + + Fallback order: + 1) Free public sources (Wikipedia/NASDAQ trader) + 2) FMP endpoints (if available) + 3) Cached snapshot in SystemSetting + 4) Built-in seed symbols + """ + normalised_universe = _validate_universe(universe) + failures: list[str] = [] + + public_symbols, public_failures, public_source = await _fetch_universe_symbols_from_public(normalised_universe) + failures.extend(public_failures) + cleaned_public = _normalise_symbols(public_symbols) + if cleaned_public: + await _write_cached_symbols(db, normalised_universe, cleaned_public, public_source or "public") + return cleaned_public + + try: + fmp_symbols = await _fetch_universe_symbols_from_fmp(normalised_universe) + cleaned_fmp = _normalise_symbols(fmp_symbols) + if cleaned_fmp: + await _write_cached_symbols(db, normalised_universe, cleaned_fmp, "fmp") + return cleaned_fmp + except (ProviderError, ValidationError) as exc: + failures.append(str(exc)) + + cached_symbols = await _read_cached_symbols(db, normalised_universe) + if cached_symbols: + logger.warning( + "Using cached universe symbols for %s because live fetch failed: %s", + normalised_universe, + "; ".join(failures[:3]), + ) + return cached_symbols + + seed_symbols = _normalise_symbols(_SEED_UNIVERSES.get(normalised_universe, [])) + if seed_symbols: + logger.warning( + "Using built-in seed symbols for %s because live/cache fetch failed: %s", + normalised_universe, + "; ".join(failures[:3]), + ) + return seed_symbols + + reason = "; ".join(failures[:6]) if failures else "no provider returned symbols" + raise ProviderError(f"Universe '{normalised_universe}' returned no valid symbols. Attempts: {reason}") + + +async def bootstrap_universe( + db: AsyncSession, + universe: str, + *, + prune_missing: bool = False, +) -> dict[str, int | str]: + """Upsert ticker universe into tracked tickers. + + Returns summary counts for added/existing/deleted symbols. + """ + normalised_universe = _validate_universe(universe) + symbols = await fetch_universe_symbols(db, normalised_universe) + + existing_rows = await db.execute(select(Ticker.symbol)) + existing_symbols = set(existing_rows.scalars().all()) + target_symbols = set(symbols) + + symbols_to_add = sorted(target_symbols - existing_symbols) + symbols_to_delete = sorted(existing_symbols - target_symbols) if prune_missing else [] + + for symbol in symbols_to_add: + db.add(Ticker(symbol=symbol)) + + deleted_count = 0 + if symbols_to_delete: + result = await db.execute(delete(Ticker).where(Ticker.symbol.in_(symbols_to_delete))) + deleted_count = int(result.rowcount or 0) + + await db.commit() + + return { + "universe": normalised_universe, + "total_universe_symbols": len(symbols), + "added": len(symbols_to_add), + "already_tracked": len(target_symbols & existing_symbols), + "deleted": deleted_count, + } diff --git a/frontend/src/api/admin.ts b/frontend/src/api/admin.ts index 45dee1d..5af5dd2 100644 --- a/frontend/src/api/admin.ts +++ b/frontend/src/api/admin.ts @@ -1,5 +1,13 @@ import apiClient from './client'; -import type { AdminUser, SystemSetting } from '../lib/types'; +import type { + AdminUser, + PipelineReadiness, + RecommendationConfig, + SystemSetting, + TickerUniverse, + TickerUniverseBootstrapResult, + TickerUniverseSetting, +} from '../lib/types'; // Users export function listUsers() { @@ -48,6 +56,41 @@ export function updateRegistration(enabled: boolean) { .then((r) => r.data); } +export function getRecommendationSettings() { + return apiClient + .get('admin/settings/recommendations') + .then((r) => r.data); +} + +export function updateRecommendationSettings(payload: Partial) { + return apiClient + .put('admin/settings/recommendations', payload) + .then((r) => r.data); +} + +export function getTickerUniverseSetting() { + return apiClient + .get('admin/settings/ticker-universe') + .then((r) => r.data); +} + +export function updateTickerUniverseSetting(universe: TickerUniverse) { + return apiClient + .put('admin/settings/ticker-universe', { universe }) + .then((r) => r.data); +} + +export function bootstrapTickers(universe: TickerUniverse, pruneMissing: boolean) { + return apiClient + .post('admin/tickers/bootstrap', null, { + params: { + universe, + prune_missing: pruneMissing, + }, + }) + .then((r) => r.data); +} + // Jobs export interface JobStatus { name: string; @@ -55,12 +98,31 @@ export interface JobStatus { enabled: boolean; next_run_at: string | null; registered: boolean; + running?: boolean; + runtime_status?: string | null; + runtime_processed?: number | null; + runtime_total?: number | null; + runtime_progress_pct?: number | null; + runtime_current_ticker?: string | null; + runtime_started_at?: string | null; + runtime_finished_at?: string | null; + runtime_message?: string | null; +} + +export interface TriggerJobResponse { + job: string; + status: 'triggered' | 'busy' | 'blocked' | 'not_found'; + message: string; } export function listJobs() { return apiClient.get('admin/jobs').then((r) => r.data); } +export function getPipelineReadiness() { + return apiClient.get('admin/pipeline/readiness').then((r) => r.data); +} + export function toggleJob(jobName: string, enabled: boolean) { return apiClient .put<{ message: string }>(`admin/jobs/${jobName}/toggle`, { enabled }) @@ -69,7 +131,7 @@ export function toggleJob(jobName: string, enabled: boolean) { export function triggerJob(jobName: string) { return apiClient - .post<{ message: string }>(`admin/jobs/${jobName}/trigger`) + .post(`admin/jobs/${jobName}/trigger`) .then((r) => r.data); } diff --git a/frontend/src/api/ingestion.ts b/frontend/src/api/ingestion.ts index 771111f..1878e9e 100644 --- a/frontend/src/api/ingestion.ts +++ b/frontend/src/api/ingestion.ts @@ -1,7 +1,20 @@ import apiClient from './client'; +export interface IngestionSourceResult { + status: 'ok' | 'error' | 'skipped'; + message?: string | null; + records?: number; + classification?: string; + confidence?: number; +} + +export interface FetchDataResult { + symbol: string; + sources: Record; +} + export function fetchData(symbol: string) { return apiClient - .post<{ message: string }>(`ingestion/fetch/${symbol}`) + .post(`ingestion/fetch/${symbol}`) .then((r) => r.data); } diff --git a/frontend/src/api/trades.ts b/frontend/src/api/trades.ts index 6afa4de..aa7acd7 100644 --- a/frontend/src/api/trades.ts +++ b/frontend/src/api/trades.ts @@ -1,6 +1,20 @@ import apiClient from './client'; import type { TradeSetup } from '../lib/types'; -export function list() { - return apiClient.get('trades').then((r) => r.data); +export interface TradeListParams { + direction?: 'long' | 'short'; + min_confidence?: number; + recommended_action?: 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL'; +} + +export function list(params?: TradeListParams) { + return apiClient.get('trades', { params }).then((r) => r.data); +} + +export function bySymbol(symbol: string) { + return apiClient.get(`trades/${symbol.toUpperCase()}`).then((r) => r.data); +} + +export function history(symbol: string) { + return apiClient.get(`trades/${symbol.toUpperCase()}/history`).then((r) => r.data); } diff --git a/frontend/src/components/admin/JobControls.tsx b/frontend/src/components/admin/JobControls.tsx index f07c09c..01a2440 100644 --- a/frontend/src/components/admin/JobControls.tsx +++ b/frontend/src/components/admin/JobControls.tsx @@ -17,11 +17,79 @@ export function JobControls() { const { data: jobs, isLoading } = useJobs(); const toggleJob = useToggleJob(); const triggerJob = useTriggerJob(); + const anyJobRunning = (jobs ?? []).some((job) => job.running); + const runningJob = jobs?.find((job) => job.running); + const pausedJob = jobs?.find((job) => !job.running && job.runtime_status === 'rate_limited'); + const runningJobLabel = runningJob?.label; if (isLoading) return ; return (
+ {runningJob && ( +
+
+
+
+ Active job: {runningJob.label} +
+
+ Manual triggers are blocked until this run finishes. +
+
+
+ {runningJob.runtime_processed ?? 0} + {typeof runningJob.runtime_total === 'number' + ? ` / ${runningJob.runtime_total}` + : ''} +
+
+
+
+
+ {runningJob.runtime_current_ticker && ( +
+ Current: {runningJob.runtime_current_ticker} +
+ )} + {runningJob.runtime_message && ( +
+ {runningJob.runtime_message} +
+ )} +
+ )} + + {!runningJob && pausedJob && ( +
+
+
+
+ Last run paused: {pausedJob.label} +
+
+ {pausedJob.runtime_message || 'Rate limit hit. The collector stopped early and will resume from last progress on the next run.'} +
+
+
+ {pausedJob.runtime_processed ?? 0} + {typeof pausedJob.runtime_total === 'number' + ? ` / ${pausedJob.runtime_total}` + : ''} +
+
+
+ )} + {jobs?.map((job) => (
@@ -29,7 +97,9 @@ export function JobControls() { {/* Status dot */} {job.label}
- - {job.enabled ? 'Active' : 'Inactive'} + + {job.running + ? 'Running' + : job.runtime_status === 'rate_limited' + ? 'Paused (rate-limited)' + : job.runtime_status === 'error' + ? 'Last run error' + : job.enabled + ? 'Active' + : 'Inactive'} {job.enabled && job.next_run_at && ( @@ -49,6 +139,35 @@ export function JobControls() { Not registered )}
+ {job.running && ( +
+
+ + {job.runtime_processed ?? 0} + {typeof job.runtime_total === 'number' ? ` / ${job.runtime_total}` : ''} + {' '}processed + + {typeof job.runtime_progress_pct === 'number' && ( + {Math.max(0, Math.min(100, job.runtime_progress_pct)).toFixed(0)}% + )} +
+
+
+
+ {job.runtime_current_ticker && ( +
Current: {job.runtime_current_ticker}
+ )} +
+ )}
@@ -68,13 +187,26 @@ export function JobControls() {
+ {anyJobRunning && !job.running && ( +
+ Manual trigger blocked while {runningJobLabel ?? 'another job'} is running. +
+ )}
))} diff --git a/frontend/src/components/admin/PipelineReadinessPanel.tsx b/frontend/src/components/admin/PipelineReadinessPanel.tsx new file mode 100644 index 0000000..d562ceb --- /dev/null +++ b/frontend/src/components/admin/PipelineReadinessPanel.tsx @@ -0,0 +1,105 @@ +import { useQueryClient } from '@tanstack/react-query'; +import { usePipelineReadiness } from '../../hooks/useAdmin'; +import { useFetchSymbolData } from '../../hooks/useFetchSymbolData'; +import { formatDateTime } from '../../lib/format'; +import { SkeletonTable } from '../ui/Skeleton'; + +function scoreBadge(score: number | null) { + if (score === null) return ; + const cls = score >= 60 ? 'text-emerald-400' : score >= 40 ? 'text-amber-400' : 'text-red-400'; + return {score.toFixed(0)}; +} + +export function PipelineReadinessPanel() { + const queryClient = useQueryClient(); + const { data, isLoading, isError, error, isFetching } = usePipelineReadiness(); + const fetchMutation = useFetchSymbolData({ + includeSymbolPrefix: true, + invalidatePipelineReadiness: true, + }); + + if (isLoading) return ; + if (isError) return

{(error as Error)?.message || 'Failed to load pipeline readiness'}

; + + const rows = data ?? []; + + return ( +
+
+
+

Pipeline Readiness

+

Shows why tickers may be missing in scanner/rankings and what is incomplete.

+
+ +
+ + {rows.length === 0 ? ( +

No tickers available.

+ ) : ( +
+ + + + + + + + + + + + + + {rows.map((row) => ( + + + + + + + + + + ))} + +
SymbolOHLCVDimsS/RScannerMissing ReasonsAction
{row.symbol} +
{row.ohlcv_bars} bars
+
{row.ohlcv_last_date ? formatDateTime(row.ohlcv_last_date) : '—'}
+
+
+ {scoreBadge(row.dimensions.technical)} + {scoreBadge(row.dimensions.sr_quality)} + {scoreBadge(row.dimensions.sentiment)} + {scoreBadge(row.dimensions.fundamental)} + {scoreBadge(row.dimensions.momentum)} +
+
T SR S F M
+
{row.sr_level_count} + + {row.ready_for_scanner ? 'Ready' : 'Blocked'} + +
setups: {row.trade_setup_count}
+
+ {row.missing_reasons.length ? row.missing_reasons.join(', ') : none} + + +
+
+ )} +
+ ); +} diff --git a/frontend/src/components/admin/RecommendationSettings.tsx b/frontend/src/components/admin/RecommendationSettings.tsx new file mode 100644 index 0000000..ec557d1 --- /dev/null +++ b/frontend/src/components/admin/RecommendationSettings.tsx @@ -0,0 +1,101 @@ +import { useEffect, useState } from 'react'; +import type { RecommendationConfig } from '../../lib/types'; +import { useRecommendationSettings, useUpdateRecommendationSettings } from '../../hooks/useAdmin'; +import { SkeletonTable } from '../ui/Skeleton'; + +const DEFAULTS: RecommendationConfig = { + high_confidence_threshold: 70, + moderate_confidence_threshold: 50, + confidence_diff_threshold: 20, + signal_alignment_weight: 0.15, + sr_strength_weight: 0.2, + distance_penalty_factor: 0.1, + momentum_technical_divergence_threshold: 30, + fundamental_technical_divergence_threshold: 40, +}; + +function NumberInput({ + label, + value, + min, + max, + step, + onChange, +}: { + label: string; + value: number; + min: number; + max: number; + step?: number; + onChange: (v: number) => void; +}) { + return ( + + ); +} + +export function RecommendationSettings() { + const { data, isLoading, isError, error } = useRecommendationSettings(); + const update = useUpdateRecommendationSettings(); + + const [form, setForm] = useState(DEFAULTS); + + useEffect(() => { + if (data) setForm(data); + }, [data]); + + const setField = (field: keyof RecommendationConfig, value: number) => { + setForm((prev) => ({ ...prev, [field]: value })); + }; + + const onSave = () => { + update.mutate(form as unknown as Record); + }; + + const onReset = () => { + setForm(DEFAULTS); + update.mutate(DEFAULTS as unknown as Record); + }; + + if (isLoading) return ; + if (isError) return

{(error as Error)?.message || 'Failed to load recommendation settings'}

; + + return ( +
+

Recommendation Configuration

+ +
+ setField('high_confidence_threshold', v)} /> + setField('moderate_confidence_threshold', v)} /> + setField('confidence_diff_threshold', v)} /> + + setField('signal_alignment_weight', v)} /> + setField('sr_strength_weight', v)} /> + setField('distance_penalty_factor', v)} /> + + setField('momentum_technical_divergence_threshold', v)} /> + setField('fundamental_technical_divergence_threshold', v)} /> +
+ +
+ + +
+
+ ); +} diff --git a/frontend/src/components/admin/TickerUniverseBootstrap.tsx b/frontend/src/components/admin/TickerUniverseBootstrap.tsx new file mode 100644 index 0000000..9838983 --- /dev/null +++ b/frontend/src/components/admin/TickerUniverseBootstrap.tsx @@ -0,0 +1,97 @@ +import { useEffect, useState } from 'react'; +import { + useBootstrapTickers, + useTickerUniverseSetting, + useUpdateTickerUniverseSetting, +} from '../../hooks/useAdmin'; +import type { TickerUniverse } from '../../lib/types'; + +const UNIVERSE_OPTIONS: Array<{ value: TickerUniverse; label: string }> = [ + { value: 'sp500', label: 'S&P 500' }, + { value: 'nasdaq100', label: 'NASDAQ 100' }, + { value: 'nasdaq_all', label: 'NASDAQ All' }, +]; + +export function TickerUniverseBootstrap() { + const { data, isLoading, isError, error } = useTickerUniverseSetting(); + const updateDefault = useUpdateTickerUniverseSetting(); + const bootstrap = useBootstrapTickers(); + + const [universe, setUniverse] = useState('sp500'); + const [pruneMissing, setPruneMissing] = useState(false); + + useEffect(() => { + if (data?.universe) { + setUniverse(data.universe); + } + }, [data]); + + const onSaveDefault = () => { + updateDefault.mutate(universe); + }; + + const onBootstrap = () => { + bootstrap.mutate({ universe, pruneMissing }); + }; + + return ( +
+

Ticker Universe Discovery

+

+ Auto-discover tickers from a predefined universe and keep your registry updated. +

+ + {isError && ( +

+ {(error as Error)?.message || 'Failed to load ticker universe setting'} +

+ )} + +
+ + + +
+ +
+ + +
+
+ ); +} diff --git a/frontend/src/components/scanner/TradeTable.tsx b/frontend/src/components/scanner/TradeTable.tsx index f08c1f3..3fa9765 100644 --- a/frontend/src/components/scanner/TradeTable.tsx +++ b/frontend/src/components/scanner/TradeTable.tsx @@ -1,8 +1,9 @@ import { Link } from 'react-router-dom'; import type { TradeSetup } from '../../lib/types'; import { formatPrice, formatPercent, formatDateTime } from '../../lib/format'; +import { recommendationActionDirection, recommendationActionLabel } from '../../lib/recommendation'; -export type SortColumn = 'symbol' | 'direction' | 'entry_price' | 'stop_loss' | 'target' | 'risk_amount' | 'reward_amount' | 'rr_ratio' | 'stop_pct' | 'target_pct' | 'composite_score' | 'detected_at'; +export type SortColumn = 'symbol' | 'direction' | 'recommended_action' | 'confidence_score' | 'entry_price' | 'stop_loss' | 'target' | 'best_target_probability' | 'risk_amount' | 'reward_amount' | 'rr_ratio' | 'stop_pct' | 'target_pct' | 'risk_level' | 'composite_score' | 'detected_at'; export type SortDirection = 'asc' | 'desc'; interface TradeTableProps { @@ -14,15 +15,19 @@ interface TradeTableProps { const columns: { key: SortColumn; label: string }[] = [ { key: 'symbol', label: 'Symbol' }, + { key: 'recommended_action', label: 'Recommended Action' }, + { key: 'confidence_score', label: 'Confidence' }, { key: 'direction', label: 'Direction' }, { key: 'entry_price', label: 'Entry' }, { key: 'stop_loss', label: 'Stop Loss' }, { key: 'target', label: 'Target' }, + { key: 'best_target_probability', label: 'Best Target' }, { key: 'risk_amount', label: 'Risk $' }, { key: 'reward_amount', label: 'Reward $' }, { key: 'rr_ratio', label: 'R:R' }, { key: 'stop_pct', label: '% to Stop' }, { key: 'target_pct', label: '% to Target' }, + { key: 'risk_level', label: 'Risk' }, { key: 'composite_score', label: 'Score' }, { key: 'detected_at', label: 'Detected' }, ]; @@ -53,6 +58,19 @@ function sortIndicator(column: SortColumn, active: SortColumn, dir: SortDirectio return dir === 'asc' ? ' ▲' : ' ▼'; } +function riskLevelClass(riskLevel: TradeSetup['risk_level']) { + if (riskLevel === 'Low') return 'text-emerald-400'; + if (riskLevel === 'Medium') return 'text-amber-400'; + if (riskLevel === 'High') return 'text-red-400'; + return 'text-gray-400'; +} + +function bestTargetText(trade: TradeSetup) { + if (!trade.targets || trade.targets.length === 0) return '—'; + const best = [...trade.targets].sort((a, b) => b.probability - a.probability)[0]; + return `${formatPrice(best.price)} (${best.probability.toFixed(0)}%)`; +} + export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeTableProps) { if (trades.length === 0) { return

No trade setups match the current filters.

; @@ -84,6 +102,17 @@ export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeT {trade.symbol} + +
+ {recommendationActionLabel(trade.recommended_action)} + {recommendationActionDirection(trade.recommended_action) !== 'neutral' && recommendationActionDirection(trade.recommended_action) !== trade.direction && ( +
Alternative setup (not preferred)
+ )} +
+ + + {trade.confidence_score === null ? '—' : `${trade.confidence_score.toFixed(1)}%`} + {trade.direction} @@ -92,11 +121,13 @@ export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeT {formatPrice(trade.entry_price)} {formatPrice(trade.stop_loss)} {formatPrice(trade.target)} + {bestTargetText(trade)} {formatPrice(analysis.risk_amount)} {formatPrice(analysis.reward_amount)} {trade.rr_ratio.toFixed(2)} {formatPercent(analysis.stop_pct)} {formatPercent(analysis.target_pct)} + {trade.risk_level ?? '—'} 70 ? 'text-emerald-400' : trade.composite_score >= 40 ? 'text-amber-400' : 'text-red-400'}`}> {Math.round(trade.composite_score)} diff --git a/frontend/src/components/ticker/RecommendationPanel.tsx b/frontend/src/components/ticker/RecommendationPanel.tsx new file mode 100644 index 0000000..31bfc9c --- /dev/null +++ b/frontend/src/components/ticker/RecommendationPanel.tsx @@ -0,0 +1,168 @@ +import type { TradeSetup } from '../../lib/types'; +import { formatPrice, formatPercent } from '../../lib/format'; +import { recommendationActionDirection, recommendationActionLabel } from '../../lib/recommendation'; + +interface RecommendationPanelProps { + symbol: string; + longSetup?: TradeSetup; + shortSetup?: TradeSetup; +} + +function riskClass(risk: TradeSetup['risk_level']) { + if (risk === 'Low') return 'text-emerald-400'; + if (risk === 'Medium') return 'text-amber-400'; + if (risk === 'High') return 'text-red-400'; + return 'text-gray-400'; +} + +function isRecommended(setup: TradeSetup | undefined, action: TradeSetup['recommended_action'] | undefined) { + if (!setup || !action) return false; + if (setup.direction === 'long') return action.startsWith('LONG'); + return action.startsWith('SHORT'); +} + +function TargetTable({ setup }: { setup: TradeSetup }) { + if (!setup.targets || setup.targets.length === 0) { + return

No target probabilities available.

; + } + + return ( +
+ + + + + + + + + + + + {setup.targets.map((target) => ( + + + + + + + + ))} + +
ClassificationPriceDistanceR:RProbability
{target.classification}{formatPrice(target.price)}{formatPercent((target.distance_from_entry / setup.entry_price) * 100)}{target.rr_ratio.toFixed(2)}{target.probability.toFixed(1)}%
+
+ ); +} + +function SetupCard({ setup, action }: { setup?: TradeSetup; action?: TradeSetup['recommended_action'] }) { + if (!setup) { + return ( +
+ Setup unavailable for this direction. +
+ ); + } + + const recommended = isRecommended(setup, action); + + return ( +
+
+

+ {setup.direction.toUpperCase()} +

+ {setup.confidence_score?.toFixed(1) ?? '—'}% +
+ + {!recommended && recommendationActionDirection(action ?? null) !== 'neutral' && ( +

Alternative setup (ticker bias currently favors the opposite direction).

+ )} + +
+
Entry
{formatPrice(setup.entry_price)}
+
Stop
{formatPrice(setup.stop_loss)}
+
Primary Target
{formatPrice(setup.target)}
+
R:R
{setup.rr_ratio.toFixed(2)}
+
+ + + + {setup.conflict_flags.length > 0 && ( +
+ {setup.conflict_flags.join(' • ')} +
+ )} +
+ ); +} + +export function RecommendationPanel({ symbol, longSetup, shortSetup }: RecommendationPanelProps) { + const summary = longSetup?.recommendation_summary ?? shortSetup?.recommendation_summary; + const action = (summary?.action ?? 'NEUTRAL') as TradeSetup['recommended_action']; + const preferredDirection = recommendationActionDirection(action); + + const preferredSetup = + preferredDirection === 'long' + ? longSetup + : preferredDirection === 'short' + ? shortSetup + : undefined; + + const alternativeSetup = + preferredDirection === 'long' + ? shortSetup + : preferredDirection === 'short' + ? longSetup + : undefined; + + if (!longSetup && !shortSetup) { + return null; + } + + return ( +
+

Recommendation

+
+
+ {recommendationActionLabel(action)} + + Risk: {summary?.risk_level ?? '—'} + + Composite: {summary?.composite_score?.toFixed(1) ?? '—'} + {symbol.toUpperCase()} +
+ +

Recommended Action is the ticker-level bias. The preferred setup is shown first; the opposite side is available under Alternative scenario.

+ + {summary?.reasoning && ( +

{summary.reasoning}

+ )} + + {preferredDirection !== 'neutral' && preferredSetup ? ( +
+ + + {alternativeSetup && ( +
+ + Alternative scenario ({alternativeSetup.direction.toUpperCase()}) + +
+ +
+
+ )} +
+ ) : ( +
+ + +
+ )} +
+
+ ); +} diff --git a/frontend/src/hooks/useAdmin.ts b/frontend/src/hooks/useAdmin.ts index 660d922..4961818 100644 --- a/frontend/src/hooks/useAdmin.ts +++ b/frontend/src/hooks/useAdmin.ts @@ -1,6 +1,7 @@ import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'; import * as adminApi from '../api/admin'; import { useToast } from '../components/ui/Toast'; +import type { TickerUniverse } from '../lib/types'; // ── Users ── @@ -89,13 +90,93 @@ export function useUpdateSetting() { }); } +export function useRecommendationSettings() { + return useQuery({ + queryKey: ['admin', 'recommendation-settings'], + queryFn: () => adminApi.getRecommendationSettings(), + }); +} + +export function useUpdateRecommendationSettings() { + const qc = useQueryClient(); + const { addToast } = useToast(); + + return useMutation({ + mutationFn: (payload: Record) => + adminApi.updateRecommendationSettings(payload), + onSuccess: () => { + qc.invalidateQueries({ queryKey: ['admin', 'recommendation-settings'] }); + addToast('success', 'Recommendation settings updated'); + }, + onError: (error: Error) => { + addToast('error', error.message || 'Failed to update recommendation settings'); + }, + }); +} + +export function useTickerUniverseSetting() { + return useQuery({ + queryKey: ['admin', 'ticker-universe'], + queryFn: () => adminApi.getTickerUniverseSetting(), + }); +} + +export function useUpdateTickerUniverseSetting() { + const qc = useQueryClient(); + const { addToast } = useToast(); + + return useMutation({ + mutationFn: (universe: TickerUniverse) => adminApi.updateTickerUniverseSetting(universe), + onSuccess: () => { + qc.invalidateQueries({ queryKey: ['admin', 'ticker-universe'] }); + addToast('success', 'Default ticker universe updated'); + }, + onError: (error: Error) => { + addToast('error', error.message || 'Failed to update default ticker universe'); + }, + }); +} + +export function useBootstrapTickers() { + const qc = useQueryClient(); + const { addToast } = useToast(); + + return useMutation({ + mutationFn: ({ universe, pruneMissing }: { universe: TickerUniverse; pruneMissing: boolean }) => + adminApi.bootstrapTickers(universe, pruneMissing), + onSuccess: (result) => { + qc.invalidateQueries({ queryKey: ['tickers'] }); + qc.invalidateQueries({ queryKey: ['admin', 'ticker-universe'] }); + addToast( + 'success', + `Bootstrap done: +${result.added}, existing ${result.already_tracked}, deleted ${result.deleted}`, + ); + }, + onError: (error: Error) => { + addToast('error', error.message || 'Failed to bootstrap tickers'); + }, + }); +} + // ── Jobs ── export function useJobs() { return useQuery({ queryKey: ['admin', 'jobs'], queryFn: () => adminApi.listJobs(), - refetchInterval: 15_000, + refetchInterval: (query) => { + const jobs = (query.state.data ?? []) as adminApi.JobStatus[]; + const hasRunning = jobs.some((job) => job.running); + return hasRunning ? 2_000 : 15_000; + }, + }); +} + +export function usePipelineReadiness() { + return useQuery({ + queryKey: ['admin', 'pipeline-readiness'], + queryFn: () => adminApi.getPipelineReadiness(), + refetchInterval: 20_000, }); } @@ -121,9 +202,13 @@ export function useTriggerJob() { return useMutation({ mutationFn: (jobName: string) => adminApi.triggerJob(jobName), - onSuccess: () => { + onSuccess: (result) => { qc.invalidateQueries({ queryKey: ['admin', 'jobs'] }); - addToast('success', 'Job triggered successfully'); + if (result.status === 'triggered') { + addToast('success', result.message || 'Job triggered successfully'); + return; + } + addToast('info', result.message || 'Job could not be triggered'); }, onError: (error: Error) => { addToast('error', error.message || 'Failed to trigger job'); diff --git a/frontend/src/hooks/useFetchSymbolData.ts b/frontend/src/hooks/useFetchSymbolData.ts new file mode 100644 index 0000000..281c330 --- /dev/null +++ b/frontend/src/hooks/useFetchSymbolData.ts @@ -0,0 +1,42 @@ +import { useMutation, useQueryClient } from '@tanstack/react-query'; +import { fetchData, type FetchDataResult } from '../api/ingestion'; +import { useToast } from '../components/ui/Toast'; +import { summarizeIngestionResult } from '../lib/ingestionStatus'; + +interface UseFetchSymbolDataOptions { + includeSymbolPrefix?: boolean; + invalidatePipelineReadiness?: boolean; +} + +export function useFetchSymbolData(options: UseFetchSymbolDataOptions = {}) { + const { includeSymbolPrefix = false, invalidatePipelineReadiness = false } = options; + const queryClient = useQueryClient(); + const { addToast } = useToast(); + + return useMutation({ + mutationFn: (symbol: string) => fetchData(symbol), + onSuccess: (result: FetchDataResult, symbol: string) => { + const normalized = symbol.toUpperCase(); + const summary = summarizeIngestionResult(result, normalized); + const toastMessage = includeSymbolPrefix + ? `${normalized}: ${summary.message}` + : summary.message; + addToast(summary.toastType, toastMessage); + + queryClient.invalidateQueries({ queryKey: ['ohlcv', symbol] }); + queryClient.invalidateQueries({ queryKey: ['sentiment', symbol] }); + queryClient.invalidateQueries({ queryKey: ['fundamentals', symbol] }); + queryClient.invalidateQueries({ queryKey: ['sr-levels', symbol] }); + queryClient.invalidateQueries({ queryKey: ['scores', symbol] }); + + if (invalidatePipelineReadiness) { + queryClient.invalidateQueries({ queryKey: ['admin', 'pipeline-readiness'] }); + } + }, + onError: (err: Error, symbol: string) => { + const normalized = symbol.toUpperCase(); + const prefix = includeSymbolPrefix ? `${normalized}: ` : ''; + addToast('error', `${prefix}${err.message || 'Failed to fetch data'}`); + }, + }); +} diff --git a/frontend/src/hooks/useTickerDetail.ts b/frontend/src/hooks/useTickerDetail.ts index 4a6ff3d..15abb55 100644 --- a/frontend/src/hooks/useTickerDetail.ts +++ b/frontend/src/hooks/useTickerDetail.ts @@ -38,8 +38,8 @@ export function useTickerDetail(symbol: string) { }); const trades = useQuery({ - queryKey: ['trades'], - queryFn: () => tradesApi.list(), + queryKey: ['trades', symbol], + queryFn: () => tradesApi.bySymbol(symbol), enabled: !!symbol, }); diff --git a/frontend/src/lib/ingestionStatus.ts b/frontend/src/lib/ingestionStatus.ts new file mode 100644 index 0000000..821b913 --- /dev/null +++ b/frontend/src/lib/ingestionStatus.ts @@ -0,0 +1,42 @@ +import type { FetchDataResult, IngestionSourceResult } from '../api/ingestion'; + +export type IngestionToastType = 'success' | 'error' | 'info'; + +export interface IngestionStatusSummary { + toastType: IngestionToastType; + message: string; +} + +export function summarizeIngestionResult( + result: FetchDataResult | null | undefined, + fallbackLabel: string, +): IngestionStatusSummary { + const sources = result?.sources; + if (!sources) { + return { + toastType: 'success', + message: `Data fetched for ${fallbackLabel}`, + }; + } + + const entries = Object.entries(sources) as [string, IngestionSourceResult][]; + const parts = entries.map(([name, info]) => { + const label = name.charAt(0).toUpperCase() + name.slice(1); + if (info.status === 'ok') { + return `${label} ✓`; + } + if (info.status === 'skipped') { + return `${label}: skipped${info.message ? ` (${info.message})` : ''}`; + } + return `${label} ✗${info.message ? `: ${info.message}` : ''}`; + }); + + const hasError = entries.some(([, source]) => source.status === 'error'); + const hasSkip = entries.some(([, source]) => source.status === 'skipped'); + const toastType: IngestionToastType = hasError ? 'error' : hasSkip ? 'info' : 'success'; + + return { + toastType, + message: parts.join(' · '), + }; +} diff --git a/frontend/src/lib/recommendation.ts b/frontend/src/lib/recommendation.ts new file mode 100644 index 0000000..8fe1038 --- /dev/null +++ b/frontend/src/lib/recommendation.ts @@ -0,0 +1,46 @@ +import type { TradeSetup } from './types'; + +export type RecommendationAction = NonNullable; + +export const RECOMMENDATION_ACTION_LABELS: Record = { + LONG_HIGH: 'LONG (High Confidence)', + LONG_MODERATE: 'LONG (Moderate Confidence)', + SHORT_HIGH: 'SHORT (High Confidence)', + SHORT_MODERATE: 'SHORT (Moderate Confidence)', + NEUTRAL: 'NEUTRAL (Conflicting Signals)', +}; + +export const RECOMMENDATION_ACTION_GLOSSARY: Array<{ action: RecommendationAction; description: string }> = [ + { + action: 'LONG_HIGH', + description: 'Ticker bias favors LONG strongly. LONG confidence is above the high threshold and clearly above SHORT.', + }, + { + action: 'LONG_MODERATE', + description: 'Ticker bias favors LONG, but with moderate conviction.', + }, + { + action: 'SHORT_HIGH', + description: 'Ticker bias favors SHORT strongly. SHORT confidence is above the high threshold and clearly above LONG.', + }, + { + action: 'SHORT_MODERATE', + description: 'Ticker bias favors SHORT, but with moderate conviction.', + }, + { + action: 'NEUTRAL', + description: 'No strong directional edge. Signals are mixed or confidence gap is too small.', + }, +]; + +export function recommendationActionLabel(action: TradeSetup['recommended_action']): string { + if (!action) return RECOMMENDATION_ACTION_LABELS.NEUTRAL; + return RECOMMENDATION_ACTION_LABELS[action] ?? RECOMMENDATION_ACTION_LABELS.NEUTRAL; +} + +export function recommendationActionDirection(action: TradeSetup['recommended_action']): 'long' | 'short' | 'neutral' { + if (!action || action === 'NEUTRAL') return 'neutral'; + if (action.startsWith('LONG')) return 'long'; + if (action.startsWith('SHORT')) return 'short'; + return 'neutral'; +} diff --git a/frontend/src/lib/types.ts b/frontend/src/lib/types.ts index 90e4986..115e53c 100644 --- a/frontend/src/lib/types.ts +++ b/frontend/src/lib/types.ts @@ -121,6 +121,32 @@ export interface TradeSetup { rr_ratio: number; composite_score: number; detected_at: string; + confidence_score: number | null; + targets: TradeTarget[]; + conflict_flags: string[]; + recommended_action: 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL' | null; + reasoning: string | null; + risk_level: 'Low' | 'Medium' | 'High' | null; + actual_outcome: string | null; + recommendation_summary?: RecommendationSummary; +} + +export interface TradeTarget { + price: number; + distance_from_entry: number; + distance_atr_multiple: number; + rr_ratio: number; + probability: number; + classification: 'Conservative' | 'Moderate' | 'Aggressive'; + sr_level_id: number; + sr_strength: number; +} + +export interface RecommendationSummary { + action: string; + reasoning: string | null; + risk_level: 'Low' | 'Medium' | 'High' | null; + composite_score: number; } // S/R Levels @@ -214,3 +240,51 @@ export interface SystemSetting { value: string; updated_at: string | null; } + +export interface RecommendationConfig { + high_confidence_threshold: number; + moderate_confidence_threshold: number; + confidence_diff_threshold: number; + signal_alignment_weight: number; + sr_strength_weight: number; + distance_penalty_factor: number; + momentum_technical_divergence_threshold: number; + fundamental_technical_divergence_threshold: number; +} + +export type TickerUniverse = 'sp500' | 'nasdaq100' | 'nasdaq_all'; + +export interface TickerUniverseSetting { + universe: TickerUniverse; +} + +export interface TickerUniverseBootstrapResult { + universe: TickerUniverse; + total_universe_symbols: number; + added: number; + already_tracked: number; + deleted: number; +} + +export interface PipelineReadiness { + symbol: string; + ohlcv_bars: number; + ohlcv_last_date: string | null; + dimensions: { + technical: number | null; + sr_quality: number | null; + sentiment: number | null; + fundamental: number | null; + momentum: number | null; + }; + sentiment_count: number; + sentiment_last_at: string | null; + has_fundamentals: boolean; + fundamentals_fetched_at: string | null; + sr_level_count: number; + has_composite: boolean; + composite_stale: boolean | null; + trade_setup_count: number; + missing_reasons: string[]; + ready_for_scanner: boolean; +} diff --git a/frontend/src/pages/AdminPage.tsx b/frontend/src/pages/AdminPage.tsx index d736d4f..f83f0ab 100644 --- a/frontend/src/pages/AdminPage.tsx +++ b/frontend/src/pages/AdminPage.tsx @@ -1,8 +1,11 @@ import { useState } from 'react'; import { DataCleanup } from '../components/admin/DataCleanup'; import { JobControls } from '../components/admin/JobControls'; +import { PipelineReadinessPanel } from '../components/admin/PipelineReadinessPanel'; +import { RecommendationSettings } from '../components/admin/RecommendationSettings'; import { SettingsForm } from '../components/admin/SettingsForm'; import { TickerManagement } from '../components/admin/TickerManagement'; +import { TickerUniverseBootstrap } from '../components/admin/TickerUniverseBootstrap'; import { UserTable } from '../components/admin/UserTable'; const tabs = ['Users', 'Tickers', 'Settings', 'Jobs', 'Cleanup'] as const; @@ -39,8 +42,19 @@ export default function AdminPage() {
{activeTab === 'Users' && } {activeTab === 'Tickers' && } - {activeTab === 'Settings' && } - {activeTab === 'Jobs' && } + {activeTab === 'Settings' && ( +
+ + + +
+ )} + {activeTab === 'Jobs' && ( +
+ + +
+ )} {activeTab === 'Cleanup' && }
diff --git a/frontend/src/pages/ScannerPage.tsx b/frontend/src/pages/ScannerPage.tsx index a557353..c1296da 100644 --- a/frontend/src/pages/ScannerPage.tsx +++ b/frontend/src/pages/ScannerPage.tsx @@ -6,17 +6,23 @@ import { SkeletonTable } from '../components/ui/Skeleton'; import { useToast } from '../components/ui/Toast'; import { triggerJob } from '../api/admin'; import type { TradeSetup } from '../lib/types'; +import { RECOMMENDATION_ACTION_GLOSSARY, RECOMMENDATION_ACTION_LABELS } from '../lib/recommendation'; type DirectionFilter = 'both' | 'long' | 'short'; +type ActionFilter = 'all' | 'LONG_HIGH' | 'LONG_MODERATE' | 'SHORT_HIGH' | 'SHORT_MODERATE' | 'NEUTRAL'; function filterTrades( trades: TradeSetup[], minRR: number, direction: DirectionFilter, + minConfidence: number, + action: ActionFilter, ): TradeSetup[] { return trades.filter((t) => { if (t.rr_ratio < minRR) return false; if (direction !== 'both' && t.direction !== direction) return false; + if (minConfidence > 0 && (t.confidence_score ?? 0) < minConfidence) return false; + if (action !== 'all' && t.recommended_action !== action) return false; return true; }); } @@ -28,6 +34,14 @@ function getComputedValue(trade: TradeSetup, column: SortColumn): number { case 'reward_amount': return analysis.reward_amount; case 'stop_pct': return analysis.stop_pct; case 'target_pct': return analysis.target_pct; + case 'confidence_score': return trade.confidence_score ?? -1; + case 'best_target_probability': + return trade.targets?.length ? Math.max(...trade.targets.map((t) => t.probability)) : -1; + case 'risk_level': + if (trade.risk_level === 'Low') return 1; + if (trade.risk_level === 'Medium') return 2; + if (trade.risk_level === 'High') return 3; + return 0; default: return 0; } } @@ -46,6 +60,9 @@ function sortTrades( case 'direction': cmp = a.direction.localeCompare(b.direction); break; + case 'recommended_action': + cmp = (a.recommended_action ?? '').localeCompare(b.recommended_action ?? ''); + break; case 'detected_at': cmp = new Date(a.detected_at).getTime() - new Date(b.detected_at).getTime(); break; @@ -53,6 +70,9 @@ function sortTrades( case 'reward_amount': case 'stop_pct': case 'target_pct': + case 'confidence_score': + case 'best_target_probability': + case 'risk_level': cmp = getComputedValue(a, column) - getComputedValue(b, column); break; case 'entry_price': @@ -75,6 +95,8 @@ export default function ScannerPage() { const [minRR, setMinRR] = useState(0); const [directionFilter, setDirectionFilter] = useState('both'); + const [minConfidence, setMinConfidence] = useState(0); + const [actionFilter, setActionFilter] = useState('all'); const [sortColumn, setSortColumn] = useState('rr_ratio'); const [sortDirection, setSortDirection] = useState('desc'); @@ -100,9 +122,9 @@ export default function ScannerPage() { const processed = useMemo(() => { if (!trades) return []; - const filtered = filterTrades(trades, minRR, directionFilter); + const filtered = filterTrades(trades, minRR, directionFilter, minConfidence, actionFilter); return sortTrades(filtered, sortColumn, sortDirection); - }, [trades, minRR, directionFilter, sortColumn, sortDirection]); + }, [trades, minRR, directionFilter, minConfidence, actionFilter, sortColumn, sortDirection]); return (
@@ -160,6 +182,51 @@ export default function ScannerPage() {
+
+ + setMinConfidence(Number(e.target.value) || 0)} + className="w-24 rounded border border-gray-700 bg-gray-800 px-3 py-1.5 text-sm text-gray-200 focus:border-blue-500 focus:outline-none transition-colors duration-150" + /> +
+
+ + +
+ + +
+

Recommended Action Glossary (Ticker-Level Bias)

+
+ {RECOMMENDATION_ACTION_GLOSSARY.map((item) => ( +

+ {RECOMMENDATION_ACTION_LABELS[item.action]}:{' '} + {item.description} +

+ ))} +
{/* Content */} diff --git a/frontend/src/pages/TickerDetailPage.tsx b/frontend/src/pages/TickerDetailPage.tsx index 1389228..be7f400 100644 --- a/frontend/src/pages/TickerDetailPage.tsx +++ b/frontend/src/pages/TickerDetailPage.tsx @@ -1,15 +1,14 @@ import { useMemo, useEffect } from 'react'; import { useParams } from 'react-router-dom'; -import { useMutation, useQueryClient } from '@tanstack/react-query'; import { useTickerDetail } from '../hooks/useTickerDetail'; +import { useFetchSymbolData } from '../hooks/useFetchSymbolData'; import { CandlestickChart } from '../components/charts/CandlestickChart'; import { ScoreCard } from '../components/ui/ScoreCard'; import { SkeletonCard } from '../components/ui/Skeleton'; import { SentimentPanel } from '../components/ticker/SentimentPanel'; import { FundamentalsPanel } from '../components/ticker/FundamentalsPanel'; import { IndicatorSelector } from '../components/ticker/IndicatorSelector'; -import { useToast } from '../components/ui/Toast'; -import { fetchData } from '../api/ingestion'; +import { RecommendationPanel } from '../components/ticker/RecommendationPanel'; import { formatPrice } from '../lib/format'; import type { TradeSetup } from '../lib/types'; @@ -67,43 +66,7 @@ function DataFreshnessBar({ items }: { items: DataStatusItem[] }) { export default function TickerDetailPage() { const { symbol = '' } = useParams<{ symbol: string }>(); const { ohlcv, scores, srLevels, sentiment, fundamentals, trades } = useTickerDetail(symbol); - const queryClient = useQueryClient(); - const { addToast } = useToast(); - - const ingestion = useMutation({ - mutationFn: () => fetchData(symbol), - onSuccess: (result: any) => { - // Show per-source status breakdown - const sources = result?.sources; - if (sources) { - const parts: string[] = []; - for (const [name, info] of Object.entries(sources) as [string, any][]) { - const label = name.charAt(0).toUpperCase() + name.slice(1); - if (info.status === 'ok') { - parts.push(`${label} ✓`); - } else if (info.status === 'skipped') { - parts.push(`${label}: skipped (${info.message})`); - } else { - parts.push(`${label} ✗: ${info.message}`); - } - } - const hasError = Object.values(sources).some((s: any) => s.status === 'error'); - const hasSkip = Object.values(sources).some((s: any) => s.status === 'skipped'); - const toastType = hasError ? 'error' : hasSkip ? 'info' : 'success'; - addToast(toastType, parts.join(' · ')); - } else { - addToast('success', `Data fetched for ${symbol.toUpperCase()}`); - } - queryClient.invalidateQueries({ queryKey: ['ohlcv', symbol] }); - queryClient.invalidateQueries({ queryKey: ['sentiment', symbol] }); - queryClient.invalidateQueries({ queryKey: ['fundamentals', symbol] }); - queryClient.invalidateQueries({ queryKey: ['sr-levels', symbol] }); - queryClient.invalidateQueries({ queryKey: ['scores', symbol] }); - }, - onError: (err: Error) => { - addToast('error', err.message || 'Failed to fetch data'); - }, - }); + const ingestion = useFetchSymbolData(); const dataStatus: DataStatusItem[] = useMemo(() => [ { @@ -140,18 +103,28 @@ export default function TickerDetailPage() { } }, [trades.error]); - // Pick the latest trade setup for the current symbol - const tradeSetup: TradeSetup | undefined = useMemo(() => { - if (trades.error || !trades.data) return undefined; - const matching = trades.data.filter( - (t) => t.symbol.toUpperCase() === symbol.toUpperCase(), - ); - if (matching.length === 0) return undefined; - return matching.reduce((latest, t) => - new Date(t.detected_at) > new Date(latest.detected_at) ? t : latest, - ); + const setupsForSymbol: TradeSetup[] = useMemo(() => { + if (trades.error || !trades.data) return []; + return trades.data.filter((t) => t.symbol.toUpperCase() === symbol.toUpperCase()); }, [trades.data, trades.error, symbol]); + const longSetup = useMemo( + () => setupsForSymbol?.find((s) => s.direction === 'long'), + [setupsForSymbol], + ); + + const shortSetup = useMemo( + () => setupsForSymbol?.find((s) => s.direction === 'short'), + [setupsForSymbol], + ); + + // Use the highest-confidence setup for chart overlay fallback. + const tradeSetup: TradeSetup | undefined = useMemo(() => { + const candidates = [longSetup, shortSetup].filter(Boolean) as TradeSetup[]; + if (candidates.length === 0) return undefined; + return candidates.sort((a, b) => (b.confidence_score ?? 0) - (a.confidence_score ?? 0))[0]; + }, [longSetup, shortSetup]); + // Sort visible S/R levels by strength for the table (only levels within chart zones) const sortedLevels = useMemo(() => { if (!srLevels.data?.visible_levels) return []; @@ -167,7 +140,7 @@ export default function TickerDetailPage() {

Ticker Detail