major update
Some checks failed
Deploy / lint (push) Failing after 8s
Deploy / test (push) Has been skipped
Deploy / deploy (push) Has been skipped

This commit is contained in:
Dennis Thiessen
2026-02-27 16:08:09 +01:00
parent 61ab24490d
commit 181cfe6588
71 changed files with 7647 additions and 281 deletions

3
.gitignore vendored
View File

@@ -30,3 +30,6 @@ frontend/dist/
# Alembic
alembic/versions/__pycache__/
# Generated SSL bundle
combined-ca-bundle.pem

116
.kiro/settings/mcp.json Normal file
View File

@@ -0,0 +1,116 @@
{
"mcpServers": {
"context7": {
"gallery": true,
"command": "npx",
"args": [
"-y",
"@upstash/context7-mcp@latest"
],
"env": {
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080"
},
"type": "stdio"
},
"aws.mcp": {
"command": "uvx",
"timeout": 100000,
"transport": "stdio",
"args": [
"mcp-proxy-for-aws@latest",
"https://aws-mcp.us-east-1.api.aws/mcp"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false,
"autoApprove": []
},
"aws.eks.mcp": {
"command": "uvx",
"timeout": 100000,
"transport": "stdio",
"args": [
"mcp-proxy-for-aws@latest",
"https://eks-mcp.eu-central-1.api.aws/mcp",
"--service",
"eks-mcp"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false,
"autoApprove": []
},
"aws.ecs.mcp": {
"command": "uvx",
"timeout": 100000,
"transport": "stdio",
"args": [
"mcp-proxy-for-aws@latest",
"https://ecs-mcp.us-east-1.api.aws/mcp",
"--service",
"ecs-mcp"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false,
"autoApprove": []
},
"iaws.support.agent": {
"command": "uvx",
"args": [
"mcp-proxy-for-aws@latest",
"https://bedrock-agentcore.eu-central-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aeu-central-1%3A228864602806%3Aruntime%2Fiaws_support_agent-NvMQxHFf9P/invocations?qualifier=DEFAULT",
"--metadata",
"AWS_REGION=eu-central-1"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false
},
"iaws.platform.agent": {
"command": "uvx",
"args": [
"mcp-proxy-for-aws@latest",
"https://bedrock-agentcore.eu-central-1.amazonaws.com/runtimes/arn%3Aaws%3Abedrock-agentcore%3Aeu-central-1%3A228864602806%3Aruntime%2Fiaws_platform_agent-jxCudsFEFj/invocations?qualifier=DEFAULT",
"--metadata",
"AWS_REGION=eu-central-1"
],
"env": {
"AWS_PROFILE": "409330224121_sc-ps-standard-admin",
"AWS_REGION": "eu-central-2",
"HTTP_PROXY": "http://aproxy.corproot.net:8080",
"HTTPS_PROXY": "http://aproxy.corproot.net:8080",
"SSL_CERT_FILE": "/Users/taathde3/combined-ca-bundle.pem",
"REQUESTS_CA_BUNDLE": "/Users/taathde3/combined-ca-bundle.pem"
},
"disabled": false
}
},
"inputs": []
}

View File

@@ -0,0 +1 @@
{"specId": "9b39d94f-51e1-42d3-bacc-68eb3961f2b1", "workflowType": "requirements-first", "specType": "feature"}

View File

@@ -0,0 +1,502 @@
# Design Document — Dashboard Enhancements
## Overview
This design covers four enhancements to the TickerDetailPage dashboard:
1. **Sentiment drill-down** — Store OpenAI reasoning text and web search citations in the DB; expose via API; render in an expandable detail section within SentimentPanel.
2. **Fundamentals drill-down** — Track which FMP endpoints returned 402 (paid-plan-required) and surface those reasons in the API and an expandable detail section within FundamentalsPanel.
3. **TradingView-style chart** — Add mouse-wheel zoom, click-drag pan, and a crosshair overlay with price/date labels to the existing canvas-based CandlestickChart.
4. **S/R clustering** — Cluster nearby S/R levels into zones with aggregated strength, filter to top N, and render as shaded rectangles instead of dashed lines.
All changes are additive to existing components and preserve the glassmorphism UI style.
## Architecture
```mermaid
graph TD
subgraph Backend
OAI[OpenAI Responses API] -->|reasoning + annotations| SP[OpenAISentimentProvider]
SP -->|SentimentData + reasoning + citations| SS[sentiment_service]
SS -->|persist| DB[(PostgreSQL)]
FMP[FMP Stable API] -->|402 metadata| FP[FMPFundamentalProvider]
FP -->|FundamentalData + unavailable_fields| FS[fundamental_service]
FS -->|persist| DB
DB -->|query| SRS[sr_service]
SRS -->|cluster_sr_zones| SRS
SRS -->|SRZone list| SRAPI[/sr-levels endpoint/]
end
subgraph Frontend
SRAPI -->|zones JSON| Chart[CandlestickChart]
Chart -->|canvas render| ZR[Zone rectangles + crosshair + zoom/pan]
SS -->|API| SentAPI[/sentiment endpoint/]
SentAPI -->|reasoning + citations| SPan[SentimentPanel]
SPan -->|expand/collapse| DD1[Detail Section]
FS -->|API| FundAPI[/fundamentals endpoint/]
FundAPI -->|unavailable_fields| FPan[FundamentalsPanel]
FPan -->|expand/collapse| DD2[Detail Section]
end
```
The changes touch four layers:
- **Provider layer** — Extract additional data from external API responses (OpenAI annotations, FMP 402 reasons).
- **Service layer** — Store new fields, add zone clustering logic.
- **API/Schema layer** — Extend response schemas with new fields.
- **Frontend components** — Add interactive chart features and expandable detail sections.
## Components and Interfaces
### 1. Sentiment Provider Changes (`app/providers/openai_sentiment.py`)
The `SentimentData` DTO gains two optional fields:
```python
@dataclass(frozen=True, slots=True)
class SentimentData:
ticker: str
classification: str
confidence: int
source: str
timestamp: datetime
reasoning: str = "" # NEW
citations: list[dict[str, str]] = field(default_factory=list) # NEW: [{"url": ..., "title": ...}]
```
The provider already parses `reasoning` from the JSON response but discards it. The change:
- Return `reasoning` from the parsed JSON in the `SentimentData`.
- Iterate `response.output` items looking for `type == "web_search_call"` output items, then extract URL annotations from the subsequent message content blocks that have `annotations` with `type == "url_citation"`. Each annotation yields `{"url": annotation.url, "title": annotation.title}`.
- If no annotations exist, return an empty list (no error).
### 2. Sentiment DB Model Changes (`app/models/sentiment.py`)
Add two columns to `SentimentScore`:
```python
reasoning: Mapped[str] = mapped_column(Text, nullable=False, default="")
citations_json: Mapped[str] = mapped_column(Text, nullable=False, default="[]")
```
Citations are stored as a JSON-encoded string (list of `{url, title}` dicts). This avoids a separate table for a simple list of links.
Alembic migration adds these two columns with defaults so existing rows are unaffected.
### 3. Sentiment Service Changes (`app/services/sentiment_service.py`)
`store_sentiment()` gains `reasoning: str` and `citations: list[dict]` parameters. It serializes citations to JSON and stores both fields.
### 4. Sentiment Schema Changes (`app/schemas/sentiment.py`)
```python
class CitationItem(BaseModel):
url: str
title: str
class SentimentScoreResult(BaseModel):
id: int
classification: Literal["bullish", "bearish", "neutral"]
confidence: int = Field(ge=0, le=100)
source: str
timestamp: datetime
reasoning: str = "" # NEW
citations: list[CitationItem] = [] # NEW
```
### 5. FMP Provider Changes (`app/providers/fmp.py`)
`FundamentalData` DTO gains an `unavailable_fields` dict:
```python
@dataclass(frozen=True, slots=True)
class FundamentalData:
ticker: str
pe_ratio: float | None
revenue_growth: float | None
earnings_surprise: float | None
market_cap: float | None
fetched_at: datetime
unavailable_fields: dict[str, str] = field(default_factory=dict) # NEW: {"pe_ratio": "requires paid plan", ...}
```
In `_fetch_json_optional`, when a 402 is received, the provider records which fields map to that endpoint. The mapping:
- `ratios-ttm``pe_ratio`
- `financial-growth``revenue_growth`
- `earnings``earnings_surprise`
After all fetches, any field that is `None` AND whose endpoint returned 402 gets an entry in `unavailable_fields`.
### 6. Fundamentals DB Model Changes (`app/models/fundamental.py`)
Add one column:
```python
unavailable_fields_json: Mapped[str] = mapped_column(Text, nullable=False, default="{}")
```
Stored as JSON-encoded `{"field_name": "reason"}` dict.
### 7. Fundamentals Schema Changes (`app/schemas/fundamental.py`)
```python
class FundamentalResponse(BaseModel):
symbol: str
pe_ratio: float | None = None
revenue_growth: float | None = None
earnings_surprise: float | None = None
market_cap: float | None = None
fetched_at: datetime | None = None
unavailable_fields: dict[str, str] = {} # NEW
```
### 8. S/R Zone Clustering (`app/services/sr_service.py`)
New function `cluster_sr_zones()`:
```python
def cluster_sr_zones(
levels: list[dict],
current_price: float,
tolerance: float = 0.02, # 2% default clustering tolerance
max_zones: int | None = None,
) -> list[dict]:
"""Cluster nearby S/R levels into zones.
Returns list of zone dicts:
{
"low": float,
"high": float,
"midpoint": float,
"strength": int, # sum of constituent strengths, capped at 100
"type": "support" | "resistance",
"level_count": int,
}
"""
```
Algorithm:
1. Sort levels by `price_level` ascending.
2. Greedy merge: walk sorted levels; if the next level is within `tolerance` (percentage of the current cluster midpoint) of the current cluster, merge it in. Otherwise, start a new cluster.
3. For each cluster: `low` = min price, `high` = max price, `midpoint` = (low + high) / 2, `strength` = sum of constituent strengths capped at 100.
4. Tag each zone as `"support"` if midpoint < current_price, else `"resistance"`.
5. Sort by strength descending.
6. If `max_zones` is set, return only the top N.
### 9. S/R Schema Changes (`app/schemas/sr_level.py`)
```python
class SRZoneResult(BaseModel):
low: float
high: float
midpoint: float
strength: int = Field(ge=0, le=100)
type: Literal["support", "resistance"]
level_count: int
class SRLevelResponse(BaseModel):
symbol: str
levels: list[SRLevelResult]
zones: list[SRZoneResult] = [] # NEW
count: int
```
### 10. S/R Router Changes (`app/routers/sr_levels.py`)
Add `max_zones` query parameter (default 6). After fetching levels, call `cluster_sr_zones()` and include zones in the response.
### 11. CandlestickChart Enhancements (`frontend/src/components/charts/CandlestickChart.tsx`)
State additions:
- `visibleRange: { start: number, end: number }` — indices into the data array for the currently visible window.
- `isPanning: boolean`, `panStartX: number` — for drag-to-pan.
- `crosshair: { x: number, y: number } | null` — cursor position for crosshair rendering.
New event handlers:
- `onWheel` — Adjust `visibleRange` (zoom in = narrow range, zoom out = widen range). Clamp to min 10 bars, max full dataset.
- `onMouseDown` / `onMouseMove` / `onMouseUp` — When zoomed in, click-drag pans the visible range left/right.
- `onMouseMove` (extended) — Track cursor position for crosshair. Draw vertical + horizontal lines and axis labels.
- `onMouseLeave` — Clear crosshair state.
The `draw()` function changes:
- Use `data.slice(visibleRange.start, visibleRange.end)` instead of full `data`.
- After drawing candles, if `crosshair` is set, draw crosshair lines and labels.
- Replace S/R dashed lines with shaded zone rectangles when `zones` prop is provided.
New prop: `zones?: SRZone[]` (from the API response).
### 12. SentimentPanel Drill-Down (`frontend/src/components/ticker/SentimentPanel.tsx`)
Add `useState<boolean>(false)` for expand/collapse. When expanded, render:
- Reasoning text in a `<p>` block.
- Citations as a list of `<a>` links with title and URL.
Toggle button uses a chevron icon below the summary metrics.
### 13. FundamentalsPanel Drill-Down (`frontend/src/components/ticker/FundamentalsPanel.tsx`)
Add `useState<boolean>(false)` for expand/collapse. Changes:
- When a metric is `null` and `unavailable_fields[field_name]` exists, show the reason text (e.g., "Requires paid plan") in amber instead of "—".
- When expanded, show data source name ("FMP"), fetch timestamp, and a list of unavailable fields with reasons.
### 14. Frontend Type Updates (`frontend/src/lib/types.ts`)
```typescript
// Sentiment additions
export interface CitationItem {
url: string;
title: string;
}
export interface SentimentScore {
id: number;
classification: 'bullish' | 'bearish' | 'neutral';
confidence: number;
source: string;
timestamp: string;
reasoning: string; // NEW
citations: CitationItem[]; // NEW
}
// Fundamentals additions
export interface FundamentalResponse {
symbol: string;
pe_ratio: number | null;
revenue_growth: number | null;
earnings_surprise: number | null;
market_cap: number | null;
fetched_at: string | null;
unavailable_fields: Record<string, string>; // NEW
}
// S/R Zone
export interface SRZone {
low: number;
high: number;
midpoint: number;
strength: number;
type: 'support' | 'resistance';
level_count: number;
}
export interface SRLevelResponse {
symbol: string;
levels: SRLevel[];
zones: SRZone[]; // NEW
count: number;
}
```
## Data Models
### Database Schema Changes
#### `sentiment_scores` table — new columns
| Column | Type | Default | Description |
|--------|------|---------|-------------|
| `reasoning` | TEXT | `""` | AI reasoning text from OpenAI response |
| `citations_json` | TEXT | `"[]"` | JSON array of `{url, title}` citation objects |
#### `fundamental_data` table — new column
| Column | Type | Default | Description |
|--------|------|---------|-------------|
| `unavailable_fields_json` | TEXT | `"{}"` | JSON dict of `{field_name: reason}` for missing data |
No new tables are needed. The S/R zones are computed on-the-fly from existing `sr_levels` rows — they are not persisted.
### Alembic Migration
A single migration file adds the three new columns with server defaults so existing rows are populated automatically:
```python
op.add_column('sentiment_scores', sa.Column('reasoning', sa.Text(), server_default='', nullable=False))
op.add_column('sentiment_scores', sa.Column('citations_json', sa.Text(), server_default='[]', nullable=False))
op.add_column('fundamental_data', sa.Column('unavailable_fields_json', sa.Text(), server_default='{}', nullable=False))
```
## Correctness Properties
*A property is a characteristic or behavior that should hold true across all valid executions of a system — essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.*
### Property 1: Sentiment reasoning extraction
*For any* valid OpenAI Responses API response containing a JSON body with a `reasoning` field, the `OpenAISentimentProvider.fetch_sentiment()` method should return a `SentimentData` whose `reasoning` field equals the reasoning string from the parsed JSON.
**Validates: Requirements 1.1**
### Property 2: Sentiment citations extraction
*For any* valid OpenAI Responses API response containing zero or more `url_citation` annotations across its output items, the `OpenAISentimentProvider.fetch_sentiment()` method should return a `SentimentData` whose `citations` list contains exactly the URLs and titles from those annotations, in order. When no annotations exist, the citations list should be empty (no error raised).
**Validates: Requirements 1.2, 1.4**
### Property 3: Sentiment data round-trip
*For any* sentiment record with arbitrary reasoning text and citations list, storing it via `store_sentiment()` and then retrieving it via the `/sentiment/{symbol}` API endpoint should return a response where the latest score's `reasoning` and `citations` fields match the originally stored values.
**Validates: Requirements 1.3**
### Property 4: Expanded sentiment detail displays all data
*For any* `SentimentScore` with non-empty reasoning and a list of citations, when the SentimentPanel detail section is expanded, the rendered output should contain the reasoning text and every citation's title and URL as clickable links.
**Validates: Requirements 2.2, 2.3**
### Property 5: Sentiment detail collapse hides content
*For any* SentimentPanel state where the detail section is expanded, collapsing it should result in the reasoning text and citations being hidden from the DOM while the summary metrics (classification, confidence, dimension score, source count) remain visible.
**Validates: Requirements 2.4**
### Property 6: FMP 402 reason recording
*For any* subset of supplementary FMP endpoints (ratios-ttm, financial-growth, earnings) that return HTTP 402, the `FMPFundamentalProvider.fetch_fundamentals()` method should return a `FundamentalData` whose `unavailable_fields` dict contains an entry for each corresponding metric field name with the reason "requires paid plan".
**Validates: Requirements 3.1**
### Property 7: Fundamentals unavailable_fields round-trip
*For any* fundamental data record with an arbitrary `unavailable_fields` dict, storing it via `store_fundamental()` and retrieving it via the `/fundamentals/{symbol}` API endpoint should return a response whose `unavailable_fields` matches the originally stored dict.
**Validates: Requirements 3.2**
### Property 8: Null field display depends on reason existence
*For any* `FundamentalResponse` where a metric field is null, the FundamentalsPanel should display the reason text from `unavailable_fields` (if present for that field) or a dash character "—" (if no reason exists for that field).
**Validates: Requirements 3.3, 3.4**
### Property 9: Fundamentals expanded detail content
*For any* `FundamentalResponse` with a fetch timestamp and unavailable fields, when the FundamentalsPanel detail section is expanded, the rendered output should contain the data source name, the formatted fetch timestamp, and each unavailable field's name and reason.
**Validates: Requirements 4.2**
### Property 10: Zoom adjusts visible range proportionally
*For any* dataset of length N (N ≥ 10) and any current visible range [start, end], applying a positive wheel delta (zoom in) should produce a new range that is strictly narrower (fewer bars), and applying a negative wheel delta (zoom out) should produce a new range that is strictly wider (more bars), unless already at the limit.
**Validates: Requirements 5.1, 5.2, 5.3**
### Property 11: Pan shifts visible range
*For any* dataset and any visible range that does not cover the full dataset, a horizontal drag of Δx pixels should shift the visible range start and end indices by a proportional amount in the corresponding direction, without changing the range width.
**Validates: Requirements 5.4**
### Property 12: Zoom range invariant
*For any* sequence of zoom and pan operations on a dataset of length N, the visible range should always satisfy: `end - start >= 10` AND `end - start <= N` AND `start >= 0` AND `end <= N`.
**Validates: Requirements 5.5**
### Property 13: Coordinate-to-value mapping
*For any* chart configuration with a visible price range [lo, hi] and visible data slice, the `yToPrice` function should map any y-coordinate within the chart area to a price within [lo, hi], and the `xToBarIndex` function should map any x-coordinate within the chart area to a valid index within the visible data slice.
**Validates: Requirements 6.3, 6.4**
### Property 14: Clustering merges nearby levels
*For any* set of S/R levels and a clustering tolerance T, after calling `cluster_sr_zones()`, no two distinct zones should have midpoints within T percent of each other. Equivalently, all input levels that are within T percent of each other must end up in the same zone.
**Validates: Requirements 7.2**
### Property 15: Zone strength is capped sum
*For any* SR zone produced by `cluster_sr_zones()`, the zone's strength should equal `min(100, sum(constituent_level_strengths))`.
**Validates: Requirements 7.3**
### Property 16: Zone type tagging
*For any* SR zone and current price, the zone's type should be `"support"` if the zone midpoint is less than the current price, and `"resistance"` otherwise.
**Validates: Requirements 7.4**
### Property 17: Zone filtering returns top N by strength
*For any* set of SR zones and a limit N, `cluster_sr_zones(..., max_zones=N)` should return at most N zones, and those zones should be the N zones with the highest strength scores from the full unfiltered set.
**Validates: Requirements 8.2**
## Error Handling
### Backend
| Scenario | Handling |
|----------|----------|
| OpenAI response has no `reasoning` field in JSON | Default to empty string `""` — no error |
| OpenAI response has no `url_citation` annotations | Return empty citations list — no error |
| OpenAI response JSON parse failure | Existing `ProviderError` handling unchanged |
| FMP endpoint returns 402 | Record in `unavailable_fields`, return `None` for that metric — no error |
| FMP profile endpoint fails | Existing `ProviderError` propagation unchanged |
| `citations_json` column contains invalid JSON | Catch `json.JSONDecodeError` in schema serialization, default to `[]` |
| `unavailable_fields_json` column contains invalid JSON | Catch `json.JSONDecodeError`, default to `{}` |
| `cluster_sr_zones()` receives empty levels list | Return empty zones list |
| `max_zones` is 0 or negative | Return empty zones list |
### Frontend
| Scenario | Handling |
|----------|----------|
| `reasoning` is empty string | Detail section shows "No reasoning available" placeholder |
| `citations` is empty array | Detail section omits citations subsection |
| `unavailable_fields` is empty object | All null metrics show "—" as before |
| Chart data has fewer than 10 bars | Disable zoom (show all bars, no zoom controls) |
| Wheel event fires rapidly | Debounce zoom recalculation to 1 frame via `requestAnimationFrame` |
| Zone `low` equals `high` (single-level zone) | Render as a thin line (minimum 2px height rectangle) |
## Testing Strategy
### Property-Based Testing
Library: **Hypothesis** (Python backend), **fast-check** (TypeScript frontend)
Each property test runs a minimum of 100 iterations. Each test is tagged with a comment referencing the design property:
```
# Feature: dashboard-enhancements, Property 14: Clustering merges nearby levels
```
Backend property tests (Hypothesis):
- **Property 1**: Generate random JSON strings with reasoning fields → verify extraction.
- **Property 2**: Generate mock OpenAI response objects with 010 annotations → verify citations list.
- **Property 3**: Generate random reasoning + citations → store → retrieve via test client → compare.
- **Property 6**: Generate random 402/200 combinations for 3 endpoints → verify unavailable_fields mapping.
- **Property 7**: Generate random unavailable_fields dicts → store → retrieve → compare.
- **Property 1012**: Generate random datasets (10500 bars) and zoom/pan sequences → verify range invariants.
- **Property 13**: Generate random chart dimensions and price ranges → verify coordinate mapping round-trips.
- **Property 14**: Generate random level sets (150 levels, prices 11000, strengths 1100) and tolerances (0.5%5%) → verify no two zones should have been merged.
- **Property 15**: Generate random level sets → cluster → verify each zone's strength = min(100, sum).
- **Property 16**: Generate random zones and current prices → verify type tagging.
- **Property 17**: Generate random zone sets and limits → verify top-N selection.
Frontend property tests (fast-check):
- **Property 4**: Generate random reasoning strings and citation lists → render SentimentPanel expanded → verify DOM content.
- **Property 5**: Generate random sentiment data → expand then collapse → verify summary visible, detail hidden.
- **Property 8**: Generate random FundamentalResponse with various null/non-null + reason combinations → verify displayed text.
- **Property 9**: Generate random FundamentalResponse → expand → verify source, timestamp, reasons in DOM.
### Unit Tests
Unit tests cover specific examples and edge cases:
- Sentiment provider with a real-shaped OpenAI response fixture (example for 1.1, 1.2).
- Sentiment provider with no annotations (edge case for 1.4).
- FMP provider with all-402 responses (edge case for 3.1).
- FMP provider with mixed 200/402 responses (example for 3.1).
- SentimentPanel default collapsed state (example for 2.5).
- FundamentalsPanel default collapsed state (example for 4.4).
- Chart with exactly 10 bars — zoom in should be blocked (edge case for 5.5).
- Chart with 1 bar — zoom disabled entirely (edge case).
- Crosshair removed on mouse leave (example for 6.5).
- `cluster_sr_zones()` with empty input (edge case).
- `cluster_sr_zones()` with all levels at the same price (edge case).
- `cluster_sr_zones()` with levels exactly at tolerance boundary (edge case).
- Default max_zones = 6 in the dashboard (example for 8.3).
- Zone with single constituent level (edge case — low == high).

View File

@@ -0,0 +1,124 @@
# Requirements Document
## Introduction
This specification covers four dashboard enhancements for the stock signal platform: sentiment drill-down with OpenAI response details, fundamentals drill-down with missing-data transparency, TradingView-style chart improvements (zoom, crosshair), and S/R level clustering into filterable shaded zones. All changes target the existing TickerDetailPage and its child components, preserving the glassmorphism UI style.
## Glossary
- **Dashboard**: The TickerDetailPage in the React frontend that displays ticker data, charts, scores, sentiment, and fundamentals.
- **Sentiment_Panel**: The SentimentPanel component that displays classification, confidence, dimension score, and source count for a ticker.
- **Fundamentals_Panel**: The FundamentalsPanel component that displays P/E Ratio, Revenue Growth, Earnings Surprise, and Market Cap for a ticker.
- **Chart_Component**: The CandlestickChart canvas-based component that renders OHLCV candlesticks and S/R level overlays.
- **SR_Service**: The backend service (sr_service.py) that detects, scores, merges, and tags support/resistance levels from OHLCV data.
- **Sentiment_Provider**: The OpenAISentimentProvider that calls the OpenAI Responses API with web_search_preview to produce sentiment classifications.
- **FMP_Provider**: The FMPFundamentalProvider that fetches fundamental data from Financial Modeling Prep stable endpoints.
- **SR_Zone**: A price range representing a cluster of nearby S/R levels, displayed as a shaded area on the chart instead of individual lines.
- **Detail_Section**: A collapsible/expandable UI region within a panel that reveals additional information on user interaction.
- **Data_Availability_Indicator**: A visual element within the Fundamentals_Panel that communicates which data fields are unavailable and the reason.
- **Crosshair**: A vertical and horizontal line overlay on the Chart_Component that tracks the cursor position and displays corresponding price and date values.
## Requirements
### Requirement 1: Sentiment Detail Storage
**User Story:** As a developer, I want the backend to store the full OpenAI response details (reasoning text, web search citations, and annotations) alongside the sentiment classification, so that the frontend can display drill-down information.
#### Acceptance Criteria
1. WHEN the Sentiment_Provider receives a response from the OpenAI Responses API, THE Sentiment_Provider SHALL extract and return the reasoning text from the parsed JSON response.
2. WHEN the Sentiment_Provider receives a response containing web_search_preview output items, THE Sentiment_Provider SHALL extract and return the list of source URLs and titles from the search result annotations.
3. THE sentiment API endpoint SHALL include the reasoning text and citations list in the response payload for each sentiment score.
4. IF the OpenAI response contains no annotations or citations, THEN THE Sentiment_Provider SHALL return an empty citations list without raising an error.
### Requirement 2: Sentiment Drill-Down UI
**User Story:** As a user, I want to drill into the sentiment analysis to see the AI reasoning and source citations, so that I can evaluate the quality of the sentiment classification.
#### Acceptance Criteria
1. THE Sentiment_Panel SHALL display a clickable expand/collapse toggle below the summary metrics.
2. WHEN the user expands the Detail_Section, THE Sentiment_Panel SHALL display the reasoning text from the latest sentiment score.
3. WHEN the user expands the Detail_Section and citations are available, THE Sentiment_Panel SHALL display each citation as a clickable link showing the source title and URL.
4. WHEN the user collapses the Detail_Section, THE Sentiment_Panel SHALL hide the reasoning and citations without removing the summary metrics.
5. THE Detail_Section SHALL default to the collapsed state on initial render.
### Requirement 3: Fundamentals Data Availability Transparency
**User Story:** As a user, I want to understand why certain fundamental metrics are missing for a ticker, so that I can distinguish between "data not available from provider" and "data not fetched."
#### Acceptance Criteria
1. WHEN the FMP_Provider receives an HTTP 402 response for a supplementary endpoint, THE FMP_Provider SHALL record the endpoint name and the reason "requires paid plan" in the response metadata.
2. THE fundamentals API endpoint SHALL include a field listing which data fields are unavailable and the corresponding reason for each.
3. WHEN a fundamental metric value is null and a corresponding unavailability reason exists, THE Fundamentals_Panel SHALL display the reason text (e.g., "Requires paid plan") instead of a dash character.
4. WHEN a fundamental metric value is null and no unavailability reason exists, THE Fundamentals_Panel SHALL display a dash character as the placeholder.
### Requirement 4: Fundamentals Drill-Down UI
**User Story:** As a user, I want to drill into the fundamentals data to see additional detail and data source information, so that I can better assess the fundamental metrics.
#### Acceptance Criteria
1. THE Fundamentals_Panel SHALL display a clickable expand/collapse toggle below the summary metrics.
2. WHEN the user expands the Detail_Section, THE Fundamentals_Panel SHALL display the data source name, the fetch timestamp, and any unavailability reasons for missing fields.
3. WHEN the user collapses the Detail_Section, THE Fundamentals_Panel SHALL hide the detail information without removing the summary metrics.
4. THE Detail_Section SHALL default to the collapsed state on initial render.
### Requirement 5: Chart Zoom Capability
**User Story:** As a user, I want to zoom in and out on the candlestick chart, so that I can examine specific time periods in detail or see the full price history.
#### Acceptance Criteria
1. WHEN the user scrolls the mouse wheel over the Chart_Component, THE Chart_Component SHALL zoom in or out by adjusting the visible date range.
2. WHEN the user zooms in, THE Chart_Component SHALL increase the candle width and reduce the number of visible bars proportionally.
3. WHEN the user zooms out, THE Chart_Component SHALL decrease the candle width and increase the number of visible bars proportionally.
4. WHEN the chart is zoomed in, THE Chart_Component SHALL allow the user to pan left and right by clicking and dragging horizontally.
5. THE Chart_Component SHALL constrain zoom limits so that the minimum visible range is 10 bars and the maximum visible range is the full dataset length.
6. THE Chart_Component SHALL re-render S/R overlays correctly at every zoom level.
### Requirement 6: Chart Crosshair
**User Story:** As a user, I want a crosshair overlay on the chart that tracks my cursor, so that I can precisely read price and date values at any point.
#### Acceptance Criteria
1. WHEN the user moves the cursor over the Chart_Component, THE Chart_Component SHALL draw a vertical line at the cursor x-position spanning the full chart height.
2. WHEN the user moves the cursor over the Chart_Component, THE Chart_Component SHALL draw a horizontal line at the cursor y-position spanning the full chart width.
3. THE Chart_Component SHALL display the corresponding price value as a label on the y-axis at the horizontal crosshair position.
4. THE Chart_Component SHALL display the corresponding date value as a label on the x-axis at the vertical crosshair position.
5. WHEN the cursor leaves the Chart_Component, THE Chart_Component SHALL remove the crosshair lines and labels.
### Requirement 7: S/R Level Clustering
**User Story:** As a user, I want nearby S/R levels to be clustered into zones, so that the chart is less cluttered and I can focus on the most significant price areas.
#### Acceptance Criteria
1. THE SR_Service SHALL accept a configurable clustering tolerance parameter that defines the maximum price distance (as a percentage) for grouping levels into a single SR_Zone.
2. WHEN two or more S/R levels fall within the clustering tolerance of each other, THE SR_Service SHALL merge those levels into a single SR_Zone with a price range (low bound, high bound) and an aggregated strength score.
3. THE SR_Service SHALL compute the aggregated strength of an SR_Zone as the sum of constituent level strengths, capped at 100.
4. THE SR_Service SHALL tag each SR_Zone as "support" or "resistance" based on the zone midpoint relative to the current price.
### Requirement 8: S/R Zone Filtering
**User Story:** As a user, I want to see only the strongest S/R zones on the chart, so that I can focus on the most significant price areas.
#### Acceptance Criteria
1. THE S/R API endpoint SHALL accept an optional parameter to limit the number of returned zones.
2. WHEN a zone limit is specified, THE SR_Service SHALL return only the zones with the highest aggregated strength scores, up to the specified limit.
3. THE Dashboard SHALL default to displaying a maximum of 6 SR_Zones on the chart.
### Requirement 9: S/R Zone Chart Rendering
**User Story:** As a user, I want S/R zones displayed as shaded areas on the chart instead of individual lines, so that I can visually identify price ranges of significance.
#### Acceptance Criteria
1. THE Chart_Component SHALL render each SR_Zone as a semi-transparent shaded rectangle spanning the zone price range (low bound to high bound) across the full chart width.
2. THE Chart_Component SHALL use green shading for support zones and red shading for resistance zones.
3. THE Chart_Component SHALL display a label for each SR_Zone showing the zone midpoint price and strength score.
4. THE Chart_Component SHALL render SR_Zones behind the candlestick bodies so that candles remain fully visible.
5. WHEN the chart is zoomed, THE Chart_Component SHALL re-render SR_Zones at the correct vertical positions for the current price scale.

View File

@@ -0,0 +1,219 @@
# Implementation Plan: Dashboard Enhancements
## Overview
Incremental implementation of four dashboard enhancements: sentiment drill-down, fundamentals drill-down, chart zoom/crosshair, and S/R zone clustering. Each feature area is built backend-first (model → service → schema → router) then frontend, with tests alongside implementation. All changes are additive to existing components.
## Tasks
- [x] 1. Sentiment drill-down — backend
- [x] 1.1 Add `reasoning` and `citations_json` columns to `SentimentScore` model and create Alembic migration
- Add `reasoning: Mapped[str] = mapped_column(Text, nullable=False, default="")` and `citations_json: Mapped[str] = mapped_column(Text, nullable=False, default="[]")` to `app/models/sentiment.py`
- Create Alembic migration with `server_default` so existing rows are backfilled
- _Requirements: 1.1, 1.2, 1.3_
- [x] 1.2 Update `OpenAISentimentProvider` to extract reasoning and citations from OpenAI response
- Add `reasoning` and `citations` fields to the `SentimentData` dataclass
- Extract `reasoning` from the parsed JSON response body
- Iterate `response.output` items for `url_citation` annotations, collect `{"url": ..., "title": ...}` dicts
- Return empty citations list when no annotations exist (no error)
- _Requirements: 1.1, 1.2, 1.4_
- [x] 1.3 Update `sentiment_service.store_sentiment()` to persist reasoning and citations
- Accept `reasoning` and `citations` parameters
- Serialize citations to JSON string before storing
- _Requirements: 1.3_
- [x] 1.4 Update sentiment schema and router to include reasoning and citations in API response
- Add `CitationItem` model and `reasoning`/`citations` fields to `SentimentScoreResult` in `app/schemas/sentiment.py`
- Deserialize `citations_json` when building the response, catch `JSONDecodeError` and default to `[]`
- _Requirements: 1.3_
- [ ]* 1.5 Write property tests for sentiment reasoning and citations extraction
- **Property 1: Sentiment reasoning extraction** — Generate random JSON with reasoning fields, verify extraction
- **Validates: Requirements 1.1**
- **Property 2: Sentiment citations extraction** — Generate mock OpenAI responses with 010 annotations, verify citations list
- **Validates: Requirements 1.2, 1.4**
- [ ]* 1.6 Write property test for sentiment data round-trip
- **Property 3: Sentiment data round-trip** — Generate random reasoning + citations, store, retrieve via test client, compare
- **Validates: Requirements 1.3**
- [x] 2. Sentiment drill-down — frontend
- [x] 2.1 Add `CitationItem`, `reasoning`, and `citations` fields to `SentimentScore` type in `frontend/src/lib/types.ts`
- _Requirements: 1.3, 2.2, 2.3_
- [x] 2.2 Add expandable detail section to `SentimentPanel`
- Add `useState<boolean>(false)` for expand/collapse toggle
- Render chevron toggle button below summary metrics
- When expanded: show reasoning text (or "No reasoning available" placeholder if empty) and citations as clickable `<a>` links
- When collapsed: hide reasoning and citations, keep summary metrics visible
- Default to collapsed state on initial render
- Preserve glassmorphism UI style
- _Requirements: 2.1, 2.2, 2.3, 2.4, 2.5_
- [ ]* 2.3 Write property tests for SentimentPanel drill-down
- **Property 4: Expanded sentiment detail displays all data** — Generate random reasoning and citations, render expanded, verify DOM content
- **Validates: Requirements 2.2, 2.3**
- **Property 5: Sentiment detail collapse hides content** — Expand then collapse, verify summary visible and detail hidden
- **Validates: Requirements 2.4**
- [x] 3. Checkpoint — Sentiment drill-down complete
- Ensure all tests pass, ask the user if questions arise.
- [x] 4. Fundamentals drill-down — backend
- [x] 4.1 Add `unavailable_fields_json` column to fundamental model and create Alembic migration
- Add `unavailable_fields_json: Mapped[str] = mapped_column(Text, nullable=False, default="{}")` to `app/models/fundamental.py`
- Add column to the same Alembic migration as sentiment columns (or a new one if migration 1.1 is already applied), with `server_default='{}'`
- _Requirements: 3.1, 3.2_
- [x] 4.2 Update `FMPFundamentalProvider` to record 402 reasons in `unavailable_fields`
- Add `unavailable_fields` field to `FundamentalData` dataclass
- In `_fetch_json_optional`, when HTTP 402 is received, map endpoint to field name: `ratios-ttm``pe_ratio`, `financial-growth``revenue_growth`, `earnings``earnings_surprise`
- Record `"requires paid plan"` as the reason for each affected field
- _Requirements: 3.1_
- [x] 4.3 Update `fundamental_service` to persist `unavailable_fields`
- Serialize `unavailable_fields` dict to JSON string before storing
- _Requirements: 3.2_
- [x] 4.4 Update fundamentals schema and router to include `unavailable_fields` in API response
- Add `unavailable_fields: dict[str, str] = {}` to `FundamentalResponse` in `app/schemas/fundamental.py`
- Deserialize `unavailable_fields_json` when building the response, catch `JSONDecodeError` and default to `{}`
- _Requirements: 3.2_
- [ ]* 4.5 Write property tests for FMP 402 reason recording and round-trip
- **Property 6: FMP 402 reason recording** — Generate random 402/200 combinations for 3 endpoints, verify unavailable_fields mapping
- **Validates: Requirements 3.1**
- **Property 7: Fundamentals unavailable_fields round-trip** — Generate random dicts, store, retrieve, compare
- **Validates: Requirements 3.2**
- [x] 5. Fundamentals drill-down — frontend
- [x] 5.1 Add `unavailable_fields` to `FundamentalResponse` type in `frontend/src/lib/types.ts`
- _Requirements: 3.2, 3.3_
- [x] 5.2 Update `FundamentalsPanel` to show unavailability reasons and expandable detail section
- When a metric is null and `unavailable_fields[field_name]` exists, display reason text in amber instead of "—"
- When a metric is null and no reason exists, display "—"
- Add expand/collapse toggle below summary metrics (default collapsed)
- When expanded: show data source name ("FMP"), fetch timestamp, and list of unavailable fields with reasons
- When collapsed: hide detail, keep summary metrics visible
- Preserve glassmorphism UI style
- _Requirements: 3.3, 3.4, 4.1, 4.2, 4.3, 4.4_
- [ ]* 5.3 Write property tests for FundamentalsPanel display logic
- **Property 8: Null field display depends on reason existence** — Generate random FundamentalResponse with various null/reason combos, verify displayed text
- **Validates: Requirements 3.3, 3.4**
- **Property 9: Fundamentals expanded detail content** — Generate random response, expand, verify source/timestamp/reasons in DOM
- **Validates: Requirements 4.2**
- [x] 6. Checkpoint — Fundamentals drill-down complete
- Ensure all tests pass, ask the user if questions arise.
- [x] 7. S/R zone clustering — backend
- [x] 7.1 Implement `cluster_sr_zones()` function in `app/services/sr_service.py`
- Sort levels by price ascending
- Greedy merge: walk sorted levels, merge if within tolerance % of current cluster midpoint
- Compute zone: low, high, midpoint, strength (sum capped at 100), level_count
- Tag zone type: "support" if midpoint < current_price, else "resistance"
- Sort by strength descending
- If `max_zones` set, return top N; if 0 or negative, return empty list
- Handle empty input by returning empty list
- _Requirements: 7.1, 7.2, 7.3, 7.4, 8.2_
- [x] 7.2 Add `SRZoneResult` schema and update `SRLevelResponse` in `app/schemas/sr_level.py`
- Add `SRZoneResult` model with `low`, `high`, `midpoint`, `strength`, `type`, `level_count`
- Add `zones: list[SRZoneResult] = []` to `SRLevelResponse`
- _Requirements: 7.2, 9.1_
- [x] 7.3 Update S/R router to accept `max_zones` parameter and return zones
- Add `max_zones: int = 6` query parameter to the S/R levels endpoint
- Call `cluster_sr_zones()` with fetched levels and current price
- Include zones in the response
- _Requirements: 8.1, 8.3_
- [ ]* 7.4 Write property tests for S/R zone clustering
- **Property 14: Clustering merges nearby levels** — Generate random level sets and tolerances, verify no two zones have midpoints within tolerance
- **Validates: Requirements 7.2**
- **Property 15: Zone strength is capped sum** — Generate random level sets, cluster, verify strength = min(100, sum)
- **Validates: Requirements 7.3**
- **Property 16: Zone type tagging** — Generate random zones and current prices, verify support/resistance tagging
- **Validates: Requirements 7.4**
- **Property 17: Zone filtering returns top N by strength** — Generate random zone sets and limits, verify top-N selection
- **Validates: Requirements 8.2**
- [x] 8. Checkpoint — S/R clustering backend complete
- Ensure all tests pass, ask the user if questions arise.
- [x] 9. Chart enhancements — zoom and pan
- [x] 9.1 Add `SRZone` and `SRLevelResponse.zones` types to `frontend/src/lib/types.ts`
- _Requirements: 9.1_
- [x] 9.2 Implement zoom (mouse wheel) on `CandlestickChart`
- Add `visibleRange: { start: number, end: number }` state initialized to full dataset
- Add `onWheel` handler: positive delta narrows range (zoom in), negative widens (zoom out)
- Clamp visible range to min 10 bars, max full dataset length
- Disable zoom if dataset has fewer than 10 bars
- Slice data by `visibleRange` for rendering
- Debounce zoom via `requestAnimationFrame`
- _Requirements: 5.1, 5.2, 5.3, 5.5_
- [x] 9.3 Implement pan (click-drag) on `CandlestickChart`
- Add `isPanning` and `panStartX` state
- `onMouseDown` starts pan, `onMouseMove` shifts visible range proportionally, `onMouseUp` ends pan
- Pan only active when zoomed in (visible range < full dataset)
- Clamp range to dataset bounds
- _Requirements: 5.4_
- [x] 9.4 Implement crosshair overlay on `CandlestickChart`
- Add `crosshair: { x: number, y: number } | null` state
- `onMouseMove` updates crosshair position
- Draw vertical line at cursor x spanning full chart height
- Draw horizontal line at cursor y spanning full chart width
- Display price label on y-axis at horizontal line position
- Display date label on x-axis at vertical line position
- `onMouseLeave` clears crosshair
- _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5_
- [ ]* 9.5 Write property tests for chart zoom/pan invariants
- **Property 10: Zoom adjusts visible range proportionally** — Generate random datasets and wheel deltas, verify range narrows/widens
- **Validates: Requirements 5.1, 5.2, 5.3**
- **Property 11: Pan shifts visible range** — Generate random ranges and drag deltas, verify shift without width change
- **Validates: Requirements 5.4**
- **Property 12: Zoom range invariant** — Generate random zoom/pan sequences, verify range bounds always valid
- **Validates: Requirements 5.5**
- **Property 13: Coordinate-to-value mapping** — Generate random chart configs, verify yToPrice and xToBarIndex mappings
- **Validates: Requirements 6.3, 6.4**
- [x] 10. S/R zone rendering on chart
- [x] 10.1 Update `CandlestickChart` to accept `zones` prop and render shaded zone rectangles
- Accept `zones?: SRZone[]` prop
- Render each zone as a semi-transparent rectangle spanning low→high price range across full chart width
- Use green shading (rgba) for support zones, red shading for resistance zones
- Draw zones behind candlestick bodies (render zones first, then candles)
- Display label with midpoint price and strength score for each zone
- Re-render zones correctly at every zoom level using current price scale
- _Requirements: 9.1, 9.2, 9.3, 9.4, 9.5_
- [x] 10.2 Update `SROverlay` and `TickerDetailPage` to pass zones to `CandlestickChart`
- Update `useTickerDetail` hook or `SROverlay` to extract zones from the S/R API response
- Pass zones array to `CandlestickChart` component
- Default to max 6 zones (handled by backend `max_zones=6` default)
- _Requirements: 8.3, 9.1_
- [x] 10.3 Ensure S/R overlays re-render correctly at all zoom levels
- Verify zone rectangles reposition when zoom/pan changes the visible price scale
- Handle single-level zones (low == high) as thin 2px-height rectangles
- _Requirements: 5.6, 9.5_
- [x] 11. Final checkpoint — All features integrated
- Ensure all tests pass, ask the user if questions arise.
## Notes
- Tasks marked with `*` are optional and can be skipped for faster MVP
- Each task references specific requirements for traceability
- Checkpoints ensure incremental validation after each feature area
- Property tests validate universal correctness properties from the design document
- The Alembic migration for sentiment and fundamentals columns should ideally be a single migration file
- S/R zones are computed on-the-fly (not persisted), so no additional migration is needed for zones

View File

@@ -0,0 +1 @@
{"specId": "997fa90b-08bc-4b72-b099-ecc0ad611b06", "workflowType": "requirements-first", "specType": "bugfix"}

View File

@@ -0,0 +1,39 @@
# Bugfix Requirements Document
## Introduction
The R:R scanner's `scan_ticker` function selects trade setup targets by picking whichever S/R level yields the highest R:R ratio. Because R:R = reward / risk and risk is fixed (ATR-based stop), this always favors the most distant S/R level. The result is unrealistic trade setups targeting far-away levels that price is unlikely to reach. The scanner should instead select the highest-quality target by balancing R:R ratio with level strength and proximity to current price.
## Bug Analysis
### Current Behavior (Defect)
1.1 WHEN scanning for long setups THEN the system iterates all resistance levels above entry price and selects the one with the maximum R:R ratio, which is always the most distant level since risk is fixed
1.2 WHEN scanning for short setups THEN the system iterates all support levels below entry price and selects the one with the maximum R:R ratio, which is always the most distant level since risk is fixed
1.3 WHEN multiple S/R levels exist at varying distances with different strength values THEN the system ignores the `strength` field entirely and selects based solely on R:R magnitude
1.4 WHEN a weak, distant S/R level exists alongside a strong, nearby S/R level THEN the system selects the weak distant level because it produces a higher R:R ratio, resulting in an unrealistic trade setup
### Expected Behavior (Correct)
2.1 WHEN scanning for long setups THEN the system SHALL compute a quality score for each candidate resistance level that factors in R:R ratio, S/R level strength, and proximity to entry price, and select the level with the highest quality score
2.2 WHEN scanning for short setups THEN the system SHALL compute a quality score for each candidate support level that factors in R:R ratio, S/R level strength, and proximity to entry price, and select the level with the highest quality score
2.3 WHEN multiple S/R levels exist at varying distances with different strength values THEN the system SHALL weight stronger levels higher in the quality score, favoring targets that price is more likely to reach
2.4 WHEN a weak, distant S/R level exists alongside a strong, nearby S/R level THEN the system SHALL prefer the strong nearby level unless the distant level's combined quality score (considering its lower proximity and strength factors) still exceeds the nearby level's score
### Unchanged Behavior (Regression Prevention)
3.1 WHEN no S/R levels exist above entry price for longs (or below for shorts) THEN the system SHALL CONTINUE TO produce no setup for that direction
3.2 WHEN no candidate level meets the R:R threshold THEN the system SHALL CONTINUE TO produce no setup for that direction
3.3 WHEN only one S/R level exists in the target direction THEN the system SHALL CONTINUE TO evaluate it against the R:R threshold and produce a setup if it qualifies
3.4 WHEN scanning all tickers THEN the system SHALL CONTINUE TO process each ticker independently and persist results to the database
3.5 WHEN fetching stored trade setups THEN the system SHALL CONTINUE TO return them sorted by R:R ratio descending with composite score as secondary sort

View File

@@ -0,0 +1,209 @@
# R:R Scanner Target Quality Bugfix Design
## Overview
The `scan_ticker` function in `app/services/rr_scanner_service.py` selects trade setup targets by iterating candidate S/R levels and picking the one with the highest R:R ratio. Because risk is fixed (ATR × multiplier), R:R is a monotonically increasing function of distance from entry price. This means the scanner always selects the most distant S/R level, producing unrealistic trade setups.
The fix replaces the `max(rr)` selection with a quality score that balances three factors: R:R ratio, S/R level strength (0100), and proximity to current price. The quality score is computed as a weighted sum of normalized components, and the candidate with the highest quality score is selected as the target.
## Glossary
- **Bug_Condition (C)**: Multiple candidate S/R levels exist in the target direction, and the current code selects the most distant one purely because it has the highest R:R ratio, ignoring strength and proximity
- **Property (P)**: The scanner should select the candidate with the highest quality score (a weighted combination of R:R ratio, strength, and proximity) rather than the highest raw R:R ratio
- **Preservation**: All behavior for single-candidate scenarios, no-candidate scenarios, R:R threshold filtering, database persistence, and `get_trade_setups` sorting must remain unchanged
- **scan_ticker**: The function in `app/services/rr_scanner_service.py` that scans a single ticker for long and short trade setups
- **SRLevel.strength**: An integer 0100 representing how many times price has touched this level relative to total bars (computed by `sr_service._strength_from_touches`)
- **quality_score**: New scoring metric: `w_rr * norm_rr + w_strength * norm_strength + w_proximity * norm_proximity`
## Bug Details
### Fault Condition
The bug manifests when multiple S/R levels exist in the target direction (above entry for longs, below entry for shorts) and the scanner selects the most distant level because it has the highest R:R ratio, even though a closer, stronger level would be a more realistic target.
**Formal Specification:**
```
FUNCTION isBugCondition(input)
INPUT: input of type {entry_price, risk, candidate_levels: list[{price_level, strength}]}
OUTPUT: boolean
candidates := [lv for lv in candidate_levels where reward(lv) / risk >= rr_threshold]
IF len(candidates) < 2 THEN RETURN false
max_rr_level := argmax(candidates, key=lambda lv: reward(lv) / risk)
max_quality_level := argmax(candidates, key=lambda lv: quality_score(lv, entry_price, risk))
RETURN max_rr_level != max_quality_level
END FUNCTION
```
### Examples
- **Long, 2 resistance levels**: Entry=100, ATR-stop=97 (risk=3). Level A: price=103, strength=80 (R:R=1.0). Level B: price=115, strength=10 (R:R=5.0). Current code picks B (highest R:R). Expected: picks A (strong, nearby, realistic).
- **Long, 3 resistance levels**: Entry=50, risk=2. Level A: price=53, strength=90 (R:R=1.5). Level B: price=58, strength=40 (R:R=4.0). Level C: price=70, strength=5 (R:R=10.0). Current code picks C. Expected: picks A or B depending on quality weights.
- **Short, 2 support levels**: Entry=200, risk=5. Level A: price=192, strength=70 (R:R=1.6). Level B: price=170, strength=15 (R:R=6.0). Current code picks B. Expected: picks A.
- **Single candidate (no bug)**: Entry=100, risk=3. Only Level A: price=106, strength=50 (R:R=2.0). Both old and new code select A — no divergence.
## Expected Behavior
### Preservation Requirements
**Unchanged Behaviors:**
- When no S/R levels exist in the target direction, no setup is produced for that direction
- When no candidate level meets the R:R threshold, no setup is produced
- When only one S/R level exists in the target direction, it is evaluated against the R:R threshold and used if it qualifies
- `scan_all_tickers` processes each ticker independently; one failure does not stop others
- `get_trade_setups` returns results sorted by R:R ratio descending with composite score as secondary sort
- Database persistence: old setups are deleted and new ones inserted per ticker
- ATR computation, OHLCV fetching, and stop-loss calculation remain unchanged
- The TradeSetup model fields and their rounding (4 decimal places) remain unchanged
**Scope:**
All inputs where only zero or one candidate S/R levels exist in the target direction are completely unaffected by this fix. The fix only changes the selection logic when multiple qualifying candidates exist.
## Hypothesized Root Cause
Based on the bug description, the root cause is straightforward:
1. **Selection by max R:R only**: The inner loop in `scan_ticker` tracks `best_rr` and `best_target`, selecting whichever level produces the highest `rr = reward / risk`. Since `risk` is constant (ATR-based), `rr` is proportional to distance. The code has no mechanism to factor in `SRLevel.strength` or proximity.
2. **No quality scoring exists**: The `SRLevel.strength` field (0100) is available in the database and loaded by the query, but the selection loop never reads it. There is no quality score computation anywhere in the codebase.
3. **No proximity normalization**: Distance from entry is used only to compute reward, never as a penalty. Closer levels are always disadvantaged.
## Correctness Properties
Property 1: Fault Condition - Quality Score Selection Replaces Max R:R
_For any_ input where multiple candidate S/R levels exist in the target direction and meet the R:R threshold, the fixed `scan_ticker` function SHALL select the candidate with the highest quality score (weighted combination of normalized R:R, normalized strength, and normalized proximity) rather than the candidate with the highest raw R:R ratio.
**Validates: Requirements 2.1, 2.2, 2.3, 2.4**
Property 2: Preservation - Single/Zero Candidate Behavior Unchanged
_For any_ input where zero or one candidate S/R levels exist in the target direction, the fixed `scan_ticker` function SHALL produce the same result as the original function, preserving the existing filtering, persistence, and output behavior.
**Validates: Requirements 3.1, 3.2, 3.3, 3.4, 3.5**
## Fix Implementation
### Changes Required
Assuming our root cause analysis is correct:
**File**: `app/services/rr_scanner_service.py`
**Function**: `scan_ticker`
**Specific Changes**:
1. **Add `_compute_quality_score` helper function**: A new module-level function that computes the quality score for a candidate S/R level given entry price, risk, and configurable weights.
```python
def _compute_quality_score(
rr: float,
strength: int,
distance: float,
entry_price: float,
*,
w_rr: float = 0.35,
w_strength: float = 0.35,
w_proximity: float = 0.30,
rr_cap: float = 10.0,
) -> float:
norm_rr = min(rr / rr_cap, 1.0)
norm_strength = strength / 100.0
norm_proximity = 1.0 - min(distance / entry_price, 1.0)
return w_rr * norm_rr + w_strength * norm_strength + w_proximity * norm_proximity
```
- `norm_rr`: R:R capped at `rr_cap` (default 10) and divided to get 01 range
- `norm_strength`: Strength divided by 100 (already 0100 integer)
- `norm_proximity`: `1 - (distance / entry_price)`, so closer levels score higher
- Default weights: 0.35 R:R, 0.35 strength, 0.30 proximity (sum = 1.0)
2. **Replace long setup selection loop**: Instead of tracking `best_rr` / `best_target`, iterate candidates, compute quality score for each, and track `best_quality` / `best_candidate`. Still filter by `rr >= rr_threshold` before scoring. Store the selected level's R:R in the TradeSetup (not the quality score — R:R remains the reported metric).
3. **Replace short setup selection loop**: Same change as longs but for levels below entry.
4. **Pass `SRLevel` object through selection**: The loop already has access to `lv.strength` from the query. No additional DB queries needed.
5. **No changes to `get_trade_setups`**: Sorting by `rr_ratio` descending remains. The `rr_ratio` stored in TradeSetup is the actual R:R of the selected level, not the quality score.
## Testing Strategy
### Validation Approach
The testing strategy follows a two-phase approach: first, surface counterexamples that demonstrate the bug on unfixed code, then verify the fix works correctly and preserves existing behavior.
### Exploratory Fault Condition Checking
**Goal**: Surface counterexamples that demonstrate the bug BEFORE implementing the fix. Confirm or refute the root cause analysis. If we refute, we will need to re-hypothesize.
**Test Plan**: Create mock scenarios with multiple S/R levels of varying strength and distance. Run `scan_ticker` on unfixed code and assert that the selected target is NOT the most distant level. These tests will fail on unfixed code, confirming the bug.
**Test Cases**:
1. **Long with strong-near vs weak-far**: Entry=100, risk=3. Near level (103, strength=80) vs far level (115, strength=10). Assert selected target != 115 (will fail on unfixed code)
2. **Short with strong-near vs weak-far**: Entry=200, risk=5. Near level (192, strength=70) vs far level (170, strength=15). Assert selected target != 170 (will fail on unfixed code)
3. **Three candidates with varying profiles**: Entry=50, risk=2. Three levels at different distances/strengths. Assert selection is not purely distance-based (will fail on unfixed code)
**Expected Counterexamples**:
- The unfixed code always selects the most distant level regardless of strength
- Root cause confirmed: selection loop only tracks `best_rr` which is proportional to distance
### Fix Checking
**Goal**: Verify that for all inputs where the bug condition holds, the fixed function produces the expected behavior.
**Pseudocode:**
```
FOR ALL input WHERE isBugCondition(input) DO
result := scan_ticker_fixed(input)
selected_level := result.target
ASSERT selected_level == argmax(candidates, key=quality_score)
ASSERT quality_score(selected_level) >= quality_score(any_other_candidate)
END FOR
```
### Preservation Checking
**Goal**: Verify that for all inputs where the bug condition does NOT hold, the fixed function produces the same result as the original function.
**Pseudocode:**
```
FOR ALL input WHERE NOT isBugCondition(input) DO
ASSERT scan_ticker_original(input) == scan_ticker_fixed(input)
END FOR
```
**Testing Approach**: Property-based testing is recommended for preservation checking because:
- It generates many test cases automatically across the input domain
- It catches edge cases that manual unit tests might miss
- It provides strong guarantees that behavior is unchanged for all non-buggy inputs
**Test Plan**: Observe behavior on UNFIXED code first for zero-candidate and single-candidate scenarios, then write property-based tests capturing that behavior.
**Test Cases**:
1. **Zero candidates preservation**: Generate random tickers with no S/R levels in target direction. Verify no setup is produced (same as original).
2. **Single candidate preservation**: Generate random tickers with exactly one qualifying S/R level. Verify same setup is produced as original.
3. **Below-threshold preservation**: Generate random tickers where all candidates have R:R below threshold. Verify no setup is produced.
4. **Database persistence preservation**: Verify old setups are deleted and new ones inserted identically.
### Unit Tests
- Test `_compute_quality_score` with known inputs and verify output matches expected formula
- Test that quality score components are properly normalized to 01 range
- Test that `rr_cap` correctly caps the R:R normalization
- Test edge cases: strength=0, strength=100, distance=0, single candidate
### Property-Based Tests
- Generate random sets of S/R levels with varying strengths and distances; verify the selected target always has the highest quality score among candidates
- Generate random single-candidate scenarios; verify output matches what the original function would produce
- Generate random inputs with all candidates below R:R threshold; verify no setup is produced
### Integration Tests
- Test full `scan_ticker` flow with mocked DB containing multiple S/R levels of varying quality
- Test `scan_all_tickers` still processes each ticker independently
- Test that `get_trade_setups` returns correct sorting after fix

View File

@@ -0,0 +1,35 @@
# Tasks
## 1. Add quality score helper function
- [x] 1.1 Create `_compute_quality_score(rr, strength, distance, entry_price, *, w_rr=0.35, w_strength=0.35, w_proximity=0.30, rr_cap=10.0) -> float` function in `app/services/rr_scanner_service.py` that computes a weighted sum of normalized R:R, normalized strength, and normalized proximity
- [x] 1.2 Implement normalization: `norm_rr = min(rr / rr_cap, 1.0)`, `norm_strength = strength / 100.0`, `norm_proximity = 1.0 - min(distance / entry_price, 1.0)`
- [x] 1.3 Return `w_rr * norm_rr + w_strength * norm_strength + w_proximity * norm_proximity`
## 2. Replace long setup selection logic
- [x] 2.1 In `scan_ticker`, replace the long setup loop that tracks `best_rr` / `best_target` with a loop that computes `quality_score` for each candidate via `_compute_quality_score` and tracks `best_quality` / `best_candidate_rr` / `best_candidate_target`
- [x] 2.2 Keep the `rr >= rr_threshold` filter — only candidates meeting the threshold are scored
- [x] 2.3 Store the selected candidate's actual R:R ratio (not the quality score) in `TradeSetup.rr_ratio`
## 3. Replace short setup selection logic
- [x] 3.1 Apply the same quality-score selection change to the short setup loop, mirroring the long setup changes
- [x] 3.2 Ensure distance is computed as `entry_price - lv.price_level` for short candidates
## 4. Write unit tests for `_compute_quality_score`
- [x] 4.1 Create `tests/unit/test_rr_scanner_quality_score.py` with tests for known inputs verifying the formula output
- [x] 4.2 Test edge cases: strength=0, strength=100, distance=0, rr at cap, rr above cap
- [x] 4.3 Test that all normalized components stay in 01 range
## 5. Write exploratory bug-condition tests (run on unfixed code to confirm bug)
- [x] 5.1 [PBT-exploration] Create `tests/unit/test_rr_scanner_bug_exploration.py` with a property test that generates multiple S/R levels with varying strengths and distances, calls `scan_ticker`, and asserts the selected target is NOT always the most distant level — expected to FAIL on unfixed code, confirming the bug
## 6. Write fix-checking tests
- [x] 6.1 [PBT-fix] Create `tests/unit/test_rr_scanner_fix_check.py` with a property test that generates multiple candidate S/R levels meeting the R:R threshold, calls `scan_ticker` on fixed code, and asserts the selected target has the highest quality score among all candidates
## 7. Write preservation tests
- [x] 7.1 [PBT-preservation] Create `tests/unit/test_rr_scanner_preservation.py` with a property test that generates zero-candidate and single-candidate scenarios and asserts the fixed function produces the same output as the original (no setup for zero candidates, same setup for single candidate)
- [x] 7.2 Add unit test verifying that when no S/R levels exist, no setup is produced (unchanged)
- [x] 7.3 Add unit test verifying that when only one candidate meets threshold, it is selected (unchanged)
- [x] 7.4 Add unit test verifying `get_trade_setups` sorting is unchanged (R:R desc, composite desc)
## 8. Integration test
- [x] 8.1 Add integration test in `tests/unit/test_rr_scanner_integration.py` that mocks DB with multiple S/R levels of varying quality, runs `scan_ticker`, and verifies the full flow: quality-based selection, correct TradeSetup fields, database persistence

View File

@@ -0,0 +1 @@
{"specId": "9b39d94f-51e1-42d3-bacc-68eb3961f2b1", "workflowType": "requirements-first", "specType": "feature"}

View File

@@ -0,0 +1,351 @@
# Design Document: Score Transparency & Trade Overlay
## Overview
This feature extends the stock signal platform in two areas:
1. **Score Transparency** — The scoring API and UI are enhanced to expose the full breakdown of how each dimension score and the composite score are calculated. Each dimension returns its sub-scores, raw input values, weights, and formula descriptions. The frontend renders expandable panels showing this detail.
2. **Trade Setup Chart Overlay** — When a trade setup exists for a ticker (from the R:R scanner), the candlestick chart renders colored zones for entry, stop-loss, and take-profit levels. The ticker detail page fetches trade data and passes it to the chart.
Both features are additive — they extend existing API responses and UI components without breaking current behavior.
## Architecture
The changes follow the existing layered architecture:
```
┌─────────────────────────────────────────────────────┐
│ Frontend (React) │
│ ┌──────────────┐ ┌──────────────┐ ┌───────────┐ │
│ │ ScoreCard │ │ Dimension │ │Candlestick│ │
│ │ (composite │ │ Panel │ │Chart │ │
│ │ weights) │ │ (breakdowns) │ │(trade │ │
│ └──────┬───────┘ └──────┬───────┘ │ overlay) │ │
│ │ │ └─────┬─────┘ │
│ ┌──────┴─────────────────┴────────────────┴─────┐ │
│ │ useTickerDetail + useTrades │ │
│ └──────────────────┬────────────────────────────┘ │
└─────────────────────┼───────────────────────────────┘
│ HTTP
┌─────────────────────┼───────────────────────────────┐
│ Backend (FastAPI) │
│ ┌──────────────────┴────────────────────────────┐ │
│ │ scores router │ │
│ │ GET /api/v1/scores/{symbol} │ │
│ │ (extended response with breakdowns) │ │
│ └──────────────────┬────────────────────────────┘ │
│ ┌──────────────────┴────────────────────────────┐ │
│ │ scoring_service.py │ │
│ │ _compute_*_score → returns ScoreBreakdown │ │
│ │ get_score → assembles full breakdown response │ │
│ └───────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────┘
```
Key design decisions:
- **Backend-driven breakdowns**: Each `_compute_*_score` function is refactored to return a `ScoreBreakdown` dict alongside the numeric score, rather than computing breakdowns separately. This ensures the breakdown always matches the actual score.
- **Single API call**: The existing `GET /api/v1/scores/{symbol}` endpoint is extended (not a new endpoint) to include breakdowns in the response. This avoids extra round-trips.
- **Trade overlay via props**: The `CandlestickChart` component receives an optional `tradeSetup` prop. The chart draws overlay elements using the existing canvas rendering pipeline — no new library needed.
- **Trade data reuse**: The frontend reuses the existing `useTrades` hook and trades API. The `TickerDetailPage` filters for the current symbol client-side.
## Components and Interfaces
### Backend
#### Modified: `scoring_service.py` — Dimension compute functions
Each `_compute_*_score` function changes from returning `float | None` to returning a tuple `(float | None, ScoreBreakdown | None)` where `ScoreBreakdown` is a typed dict:
```python
class SubScoreDetail(TypedDict):
name: str
score: float
weight: float
raw_value: float | str | None
description: str
class ScoreBreakdown(TypedDict):
sub_scores: list[SubScoreDetail]
formula: str
unavailable: list[dict[str, str]] # [{"name": ..., "reason": ...}]
```
- `_compute_technical_score` → returns sub-scores for ADX (0.4), EMA (0.3), RSI (0.3) with raw indicator values
- `_compute_sentiment_score` → returns record count, decay rate, lookback window, weighted average formula
- `_compute_fundamental_score` → returns PE Ratio, Revenue Growth, Earnings Surprise sub-scores with raw values
- `_compute_momentum_score` → returns 5-day ROC (0.5), 20-day ROC (0.5) with raw percentages
- `_compute_sr_quality_score` → returns strong count (max 40), proximity (max 30), avg strength (max 30) with inputs
#### Modified: `scoring_service.py` — `get_score`
Assembles the full response including breakdowns per dimension and composite weight info (available vs missing dimensions, re-normalized weights).
#### Modified: `app/schemas/score.py`
New Pydantic models:
```python
class SubScoreResponse(BaseModel):
name: str
score: float
weight: float
raw_value: float | str | None = None
description: str = ""
class ScoreBreakdownResponse(BaseModel):
sub_scores: list[SubScoreResponse]
formula: str
unavailable: list[dict[str, str]] = []
class DimensionScoreResponse(BaseModel): # extended
dimension: str
score: float
is_stale: bool
computed_at: datetime | None = None
breakdown: ScoreBreakdownResponse | None = None # NEW
class CompositeBreakdownResponse(BaseModel):
weights: dict[str, float]
available_dimensions: list[str]
missing_dimensions: list[str]
renormalized_weights: dict[str, float]
formula: str
class ScoreResponse(BaseModel): # extended
# ... existing fields ...
composite_breakdown: CompositeBreakdownResponse | None = None # NEW
```
#### Modified: `app/routers/scores.py`
The `read_score` endpoint populates the new breakdown fields from the service response.
### Frontend
#### Modified: `frontend/src/lib/types.ts`
New TypeScript types:
```typescript
interface SubScore {
name: string;
score: number;
weight: number;
raw_value: number | string | null;
description: string;
}
interface ScoreBreakdown {
sub_scores: SubScore[];
formula: string;
unavailable: { name: string; reason: string }[];
}
interface CompositeBreakdown {
weights: Record<string, number>;
available_dimensions: string[];
missing_dimensions: string[];
renormalized_weights: Record<string, number>;
formula: string;
}
```
Extended existing types:
- `DimensionScoreDetail` gains `breakdown?: ScoreBreakdown`
- `ScoreResponse` gains `composite_breakdown?: CompositeBreakdown`
#### New: `frontend/src/components/ticker/DimensionBreakdownPanel.tsx`
An expandable panel component that renders inside the ScoreCard for each dimension. Shows:
- Chevron toggle for expand/collapse
- Sub-score rows: name, bar visualization, score value, weight badge, raw input value
- Formula description text
- Muted "unavailable" labels for missing sub-scores
#### Modified: `frontend/src/components/ui/ScoreCard.tsx`
- Each dimension row becomes clickable/expandable, rendering `DimensionBreakdownPanel` when expanded
- Composite score section shows dimension weights next to each bar
- Missing dimensions shown with muted styling and "redistributed" indicator
- Tooltip/inline text explaining weighted average with re-normalization
#### Modified: `frontend/src/components/charts/CandlestickChart.tsx`
New optional prop: `tradeSetup?: TradeSetup`
When provided, the chart draws:
- Entry price: dashed horizontal line (blue/white) spanning full width
- Stop-loss zone: red semi-transparent rectangle between entry and stop-loss
- Take-profit zone: green semi-transparent rectangle between entry and target
- Price labels on y-axis for entry, stop, target
- All three price levels included in y-axis range calculation
- Hover tooltip showing direction, entry, stop, target, R:R ratio
#### Modified: `frontend/src/pages/TickerDetailPage.tsx`
- Calls `useTrades()` to fetch all trade setups
- Filters for current symbol, picks latest by `detected_at`
- Passes `tradeSetup` prop to `CandlestickChart`
- Renders a trade setup summary card below the chart when a setup exists
- Handles trades API failure gracefully (chart renders without overlay, error logged)
#### Modified: `frontend/src/hooks/useTickerDetail.ts`
Adds trades query to the hook return value so the page has access to trade data.
## Data Models
### Backend Schema Changes
No new database tables. The breakdown data is computed on-the-fly from existing data and returned in the API response only.
### API Response Shape (extended `GET /api/v1/scores/{symbol}`)
```json
{
"status": "success",
"data": {
"symbol": "AAPL",
"composite_score": 72.5,
"composite_stale": false,
"weights": { "technical": 0.25, "sr_quality": 0.20, "sentiment": 0.15, "fundamental": 0.20, "momentum": 0.20 },
"composite_breakdown": {
"weights": { "technical": 0.25, "sr_quality": 0.20, "sentiment": 0.15, "fundamental": 0.20, "momentum": 0.20 },
"available_dimensions": ["technical", "sr_quality", "fundamental", "momentum"],
"missing_dimensions": ["sentiment"],
"renormalized_weights": { "technical": 0.294, "sr_quality": 0.235, "fundamental": 0.235, "momentum": 0.235 },
"formula": "Weighted average of available dimensions with re-normalized weights: sum(weight_i * score_i) / sum(weight_i)"
},
"dimensions": [
{
"dimension": "technical",
"score": 68.2,
"is_stale": false,
"computed_at": "2024-01-15T10:30:00Z",
"breakdown": {
"sub_scores": [
{ "name": "ADX", "score": 72.0, "weight": 0.4, "raw_value": 72.0, "description": "ADX value (0-100). Higher = stronger trend." },
{ "name": "EMA", "score": 65.0, "weight": 0.3, "raw_value": 1.5, "description": "Price 1.5% above EMA(20). Score: 50 + pct_diff * 10." },
{ "name": "RSI", "score": 62.0, "weight": 0.3, "raw_value": 62.0, "description": "RSI(14) value. Score equals RSI." }
],
"formula": "Weighted average: 0.4*ADX + 0.3*EMA + 0.3*RSI, re-normalized if any sub-score unavailable.",
"unavailable": []
}
}
],
"missing_dimensions": ["sentiment"],
"computed_at": "2024-01-15T10:30:00Z"
}
}
```
### Trade Setup Data (existing, no changes)
The `TradeSetup` type already exists in `frontend/src/lib/types.ts` with all needed fields: `symbol`, `direction`, `entry_price`, `stop_loss`, `target`, `rr_ratio`, `detected_at`.
## Correctness Properties
*A property is a characteristic or behavior that should hold true across all valid executions of a system — essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.*
### Property 1: Dimension breakdown contains correct sub-scores
*For any* dimension type (technical, sentiment, fundamental, momentum, sr_quality) and any valid input data sufficient to compute that dimension, the returned `ScoreBreakdown` shall contain exactly the expected sub-score names with the correct weights for that dimension type, and each sub-score's `raw_value` shall be non-null.
Specifically:
- technical → ADX (0.4), EMA (0.3), RSI (0.3)
- sentiment → record_count, decay_rate, lookback_window as sub-score metadata
- fundamental → PE Ratio, Revenue Growth, Earnings Surprise (equal weight)
- momentum → 5-day ROC (0.5), 20-day ROC (0.5)
- sr_quality → Strong Count (max 40), Proximity (max 30), Avg Strength (max 30)
**Validates: Requirements 1.1, 1.2, 1.3, 1.4, 1.5, 1.6**
### Property 2: Composite re-normalization correctness
*For any* set of dimension scores where at least one dimension is available and zero or more are missing, the composite breakdown shall:
- List exactly the available dimensions in `available_dimensions`
- List exactly the missing dimensions in `missing_dimensions`
- Have `renormalized_weights` that sum to 1.0 (within floating-point tolerance)
- Have each renormalized weight equal to `original_weight / sum(available_original_weights)`
**Validates: Requirements 1.7, 3.2**
### Property 3: Dimension breakdown UI rendering completeness
*For any* `ScoreBreakdown` object with N sub-scores, the `DimensionBreakdownPanel` component shall render exactly N sub-score rows, each containing the sub-score name, numeric score value, weight, and raw input value.
**Validates: Requirements 2.1**
### Property 4: Composite weight display
*For any* score response with K dimensions (some available, some missing), the `ScoreCard` component shall render the weight value next to each dimension bar, and missing dimensions shall be rendered with a visually distinct (muted/dimmed) style.
**Validates: Requirements 3.1, 3.2**
### Property 5: Trade overlay y-axis range includes all trade levels
*For any* OHLCV dataset and any `TradeSetup` (with entry_price, stop_loss, target), the chart's computed y-axis range `[lo, hi]` shall satisfy: `lo <= min(entry_price, stop_loss, target)` and `hi >= max(entry_price, stop_loss, target)`.
**Validates: Requirements 4.4**
### Property 6: Trade setup selection picks latest matching symbol
*For any* non-empty list of `TradeSetup` objects and any symbol string, filtering for that symbol and selecting by latest `detected_at` shall return the setup with the maximum `detected_at` among all setups matching that symbol. If no setups match, the result shall be null/undefined.
**Validates: Requirements 5.1, 5.5**
## Error Handling
| Scenario | Behavior |
|---|---|
| Dimension computation fails (insufficient data) | Score returns `None`, breakdown includes unavailable sub-scores with reason strings (Req 1.8) |
| Individual sub-score fails (e.g., ADX needs 28 bars but only 20 available) | Sub-score omitted from breakdown, added to `unavailable` list with reason. Remaining sub-scores re-normalized (Req 1.8) |
| Trades API request fails on TickerDetailPage | Chart renders without trade overlay. Error logged to console. Page remains functional (Req 5.4) |
| No trade setup exists for current symbol | Chart renders normally without any overlay elements (Req 4.6) |
| Score breakdown data is null (stale or never computed) | DimensionPanel shows score without expandable breakdown section |
| Composite has zero available dimensions | `composite_score` is `null`, `composite_breakdown` shows all dimensions as missing |
## Testing Strategy
### Unit Tests
Unit tests cover specific examples and edge cases:
- **Backend**: Test each `_compute_*_score` function returns correct breakdown structure for known input data. Test edge cases: missing sub-scores, all sub-scores missing, single sub-score available.
- **Frontend components**: Test `DimensionBreakdownPanel` renders correctly for each dimension type with known breakdown data. Test expand/collapse behavior. Test unavailable sub-score rendering.
- **Trade overlay**: Test `CandlestickChart` draws overlay elements when `tradeSetup` prop is provided. Test no overlay when prop is absent. Test tooltip content on hover.
- **Trade setup selection**: Test filtering and latest-selection logic with specific examples including edge cases (no matches, single match, multiple matches with same timestamp).
- **Composite display**: Test `ScoreCard` renders weights, missing dimension indicators, and re-normalization explanation.
### Property-Based Tests
Property-based tests use `hypothesis` (Python backend) and `fast-check` (TypeScript frontend) with minimum 100 iterations per property.
Each property test references its design document property:
- **Property 1** — `Feature: score-transparency-trade-overlay, Property 1: Dimension breakdown contains correct sub-scores`
Generate random valid indicator data for each dimension type, compute the score, and verify the breakdown structure matches the expected sub-score names and weights.
- **Property 2** — `Feature: score-transparency-trade-overlay, Property 2: Composite re-normalization correctness`
Generate random subsets of 5 dimensions (1-5 available), assign random weights, compute re-normalized weights, and verify they sum to 1.0 and each equals `original / sum(available)`.
- **Property 3** — `Feature: score-transparency-trade-overlay, Property 3: Dimension breakdown UI rendering completeness`
Generate random `ScoreBreakdown` objects with 1-5 sub-scores, render `DimensionBreakdownPanel`, and verify the DOM contains exactly N sub-score rows with all required fields.
- **Property 4** — `Feature: score-transparency-trade-overlay, Property 4: Composite weight display`
Generate random score responses with random available/missing dimension combinations, render `ScoreCard`, and verify weight labels are present and missing dimensions are visually distinct.
- **Property 5** — `Feature: score-transparency-trade-overlay, Property 5: Trade overlay y-axis range includes all trade levels`
Generate random OHLCV data and random trade setups, compute the chart y-axis range, and verify all three trade levels fall within `[lo, hi]`.
- **Property 6** — `Feature: score-transparency-trade-overlay, Property 6: Trade setup selection picks latest matching symbol`
Generate random lists of trade setups with random symbols and timestamps, apply the selection logic, and verify the result is the latest setup for the target symbol.
### Test Configuration
- Python property tests: `hypothesis` library, `@settings(max_examples=100)`
- TypeScript property tests: `fast-check` library, `fc.assert(property, { numRuns: 100 })`
- Each property test tagged with a comment: `Feature: score-transparency-trade-overlay, Property N: <title>`
- Each correctness property implemented by a single property-based test

View File

@@ -0,0 +1,87 @@
# Requirements Document
## Introduction
This feature adds two capabilities to the stock trading signal platform:
1. **Score Transparency** — Each dimension score (sentiment, fundamental, momentum, technical, sr_quality) and the composite score currently appear as opaque numbers. This feature exposes the scoring formulas, sub-scores, weights, and input values so users can understand exactly how each score was calculated.
2. **Trade Setup Chart Overlay** — When a trade setup exists for a ticker (from the R:R scanner), the candlestick chart on the ticker detail page renders visual overlays showing the entry price, stop-loss zone, and take-profit zone as colored regions, similar to TradingView trade visualization.
## Glossary
- **Scoring_API**: The backend API endpoint (`GET /api/v1/scores/{symbol}`) that returns composite and dimension scores for a ticker
- **Score_Breakdown**: A structured object containing the sub-scores, input values, weights, and formula description for a single dimension score
- **Dimension_Panel**: A frontend UI component that displays a single dimension score along with its Score_Breakdown details
- **ScoreCard_Component**: The frontend component (`ScoreCard.tsx`) that displays the composite score ring and dimension bar list
- **CandlestickChart_Component**: The frontend canvas-based chart component (`CandlestickChart.tsx`) that renders OHLCV data with overlays
- **Trade_Overlay**: A set of visual elements drawn on the CandlestickChart_Component representing a trade setup's entry, stop-loss, and target levels
- **TradeSetup**: A data object with fields: symbol, direction, entry_price, stop_loss, target, rr_ratio, representing a detected trade opportunity
- **TickerDetail_Page**: The page (`TickerDetailPage.tsx`) that displays all data for a single ticker
## Requirements
### Requirement 1: Score Breakdown API Response
**User Story:** As a trader, I want the scores API to return detailed breakdowns for each dimension score, so that the frontend can display how scores were calculated.
#### Acceptance Criteria
1. WHEN a score request is made for a symbol, THE Scoring_API SHALL return a Score_Breakdown object for each dimension containing: sub-score names, sub-score values, input values used, weights applied, and the formula description
2. WHEN the technical dimension is computed, THE Scoring_API SHALL include sub-scores for ADX (weight 0.4), EMA (weight 0.3), and RSI (weight 0.3), along with the raw indicator values (adx_value, ema_value, latest_close, rsi_value)
3. WHEN the sentiment dimension is computed, THE Scoring_API SHALL include the number of sentiment records used, the decay rate, the lookback window, and the time-decay weighted average formula parameters
4. WHEN the fundamental dimension is computed, THE Scoring_API SHALL include sub-scores for PE Ratio, Revenue Growth, and Earnings Surprise, along with the raw metric values and the normalization formula for each
5. WHEN the momentum dimension is computed, THE Scoring_API SHALL include sub-scores for 5-day ROC (weight 0.5) and 20-day ROC (weight 0.5), along with the raw ROC percentage values
6. WHEN the sr_quality dimension is computed, THE Scoring_API SHALL include sub-scores for strong level count (max 40 pts), proximity (max 30 pts), and average strength (max 30 pts), along with the input values (strong_count, nearest_distance_pct, avg_strength)
7. WHEN the composite score is computed, THE Scoring_API SHALL include the per-dimension weights used and indicate which dimensions were available versus missing for the re-normalization calculation
8. IF a sub-score component has insufficient data, THEN THE Scoring_API SHALL omit that sub-score from the breakdown and include a reason string explaining the data gap
### Requirement 2: Score Transparency UI — Dimension Panels
**User Story:** As a trader, I want to see a detailed breakdown of each dimension score in the UI, so that I can understand what drives each score.
#### Acceptance Criteria
1. WHEN a dimension score is displayed in the ScoreCard_Component, THE Dimension_Panel SHALL show an expandable breakdown section listing each sub-score name, its value, its weight, and the raw input value
2. WHEN the user expands a dimension breakdown, THE Dimension_Panel SHALL display the formula description as human-readable text explaining how the sub-scores combine into the dimension score
3. WHEN the technical dimension breakdown is expanded, THE Dimension_Panel SHALL display ADX score and raw ADX value, EMA score and percentage difference from EMA, and RSI score and raw RSI value, each with their respective weights (40%, 30%, 30%)
4. WHEN the sentiment dimension breakdown is expanded, THE Dimension_Panel SHALL display the number of sentiment records, the decay rate, and the weighted average calculation summary
5. WHEN the fundamental dimension breakdown is expanded, THE Dimension_Panel SHALL display PE Ratio sub-score with raw PE value, Revenue Growth sub-score with raw growth percentage, and Earnings Surprise sub-score with raw surprise percentage
6. WHEN the momentum dimension breakdown is expanded, THE Dimension_Panel SHALL display 5-day ROC sub-score with raw ROC percentage and 20-day ROC sub-score with raw ROC percentage
7. WHEN the sr_quality dimension breakdown is expanded, THE Dimension_Panel SHALL display strong level count score, proximity score, and average strength score with their respective input values
8. IF a sub-score is unavailable due to insufficient data, THEN THE Dimension_Panel SHALL display a muted label indicating the sub-score is unavailable with the reason
### Requirement 3: Composite Score Transparency
**User Story:** As a trader, I want to see how the composite score is calculated from dimension scores, so that I can understand the overall signal strength.
#### Acceptance Criteria
1. WHEN the composite score is displayed, THE ScoreCard_Component SHALL show the weight assigned to each dimension next to its bar in the dimensions list
2. WHEN a dimension is missing from the composite calculation, THE ScoreCard_Component SHALL visually indicate the missing dimension and show that its weight was redistributed
3. WHEN the user views the composite score section, THE ScoreCard_Component SHALL display a tooltip or inline text explaining that the composite is a weighted average of available dimensions with re-normalized weights
### Requirement 4: Trade Setup Chart Overlay
**User Story:** As a trader, I want to see my trade setup (entry, stop-loss, target) overlaid on the candlestick chart, so that I can visually assess the trade relative to price action.
#### Acceptance Criteria
1. WHEN a TradeSetup exists for the current ticker, THE CandlestickChart_Component SHALL render an entry price line as a dashed horizontal line in blue or white color spanning the full chart width
2. WHEN a TradeSetup exists for the current ticker, THE CandlestickChart_Component SHALL render a stop-loss zone as a red semi-transparent shaded rectangle between the entry price and the stop-loss price level
3. WHEN a TradeSetup exists for the current ticker, THE CandlestickChart_Component SHALL render a take-profit zone as a green semi-transparent shaded rectangle between the entry price and the target price level
4. THE CandlestickChart_Component SHALL include the entry price, stop-loss price, and target price in the y-axis price range calculation so that all trade overlay levels are visible within the chart viewport
5. WHEN the user hovers over the trade overlay area, THE CandlestickChart_Component SHALL display a tooltip showing the trade direction, entry price, stop-loss price, target price, and R:R ratio
6. IF no TradeSetup exists for the current ticker, THEN THE CandlestickChart_Component SHALL render the chart without any trade overlay elements
### Requirement 5: Trade Setup Data Integration on Ticker Detail Page
**User Story:** As a trader, I want the ticker detail page to automatically fetch and display trade setups for the current ticker, so that I see the trade overlay without extra navigation.
#### Acceptance Criteria
1. WHEN the TickerDetail_Page loads for a symbol, THE TickerDetail_Page SHALL fetch trade setups from the trades API and filter for setups matching the current symbol
2. WHEN a matching TradeSetup is found, THE TickerDetail_Page SHALL pass the trade setup data to the CandlestickChart_Component as a prop
3. WHEN a matching TradeSetup is found, THE TickerDetail_Page SHALL display a trade setup summary card below the chart showing direction, entry price, stop-loss, target, and R:R ratio
4. IF the trades API request fails, THEN THE TickerDetail_Page SHALL render the chart without trade overlays and log the error without disrupting the page
5. IF multiple TradeSetups exist for the same symbol, THEN THE TickerDetail_Page SHALL use the most recently detected setup (latest detected_at)

View File

@@ -0,0 +1,142 @@
# Implementation Plan: Score Transparency & Trade Overlay
## Overview
Extend the scoring API and UI to expose full score breakdowns (sub-scores, weights, raw values, formulas) for each dimension and the composite score. Add trade setup chart overlays (entry, stop-loss, take-profit zones) to the candlestick chart on the ticker detail page. Backend changes are in Python/FastAPI, frontend in React/TypeScript.
## Tasks
- [x] 1. Add score breakdown schemas and refactor scoring service
- [x] 1.1 Add breakdown Pydantic models to `app/schemas/score.py`
- Add `SubScoreResponse`, `ScoreBreakdownResponse`, `CompositeBreakdownResponse` models
- Extend `DimensionScoreResponse` with optional `breakdown: ScoreBreakdownResponse` field
- Extend `ScoreResponse` with optional `composite_breakdown: CompositeBreakdownResponse` field
- _Requirements: 1.1, 1.7_
- [x] 1.2 Refactor `_compute_technical_score` in `app/services/scoring_service.py` to return breakdown
- Change return type to `tuple[float | None, ScoreBreakdown | None]`
- Return sub-scores for ADX (weight 0.4), EMA (weight 0.3), RSI (weight 0.3) with raw indicator values
- Include formula description string
- Add unavailable sub-scores with reason when data is insufficient
- _Requirements: 1.2, 1.8_
- [x] 1.3 Refactor `_compute_sentiment_score` to return breakdown
- Return record count, decay rate, lookback window, and weighted average formula parameters as sub-score metadata
- _Requirements: 1.3, 1.8_
- [x] 1.4 Refactor `_compute_fundamental_score` to return breakdown
- Return PE Ratio, Revenue Growth, Earnings Surprise sub-scores with raw metric values and normalization formula
- _Requirements: 1.4, 1.8_
- [x] 1.5 Refactor `_compute_momentum_score` to return breakdown
- Return 5-day ROC (weight 0.5) and 20-day ROC (weight 0.5) sub-scores with raw ROC percentages
- _Requirements: 1.5, 1.8_
- [x] 1.6 Refactor `_compute_sr_quality_score` to return breakdown
- Return strong count (max 40 pts), proximity (max 30 pts), avg strength (max 30 pts) sub-scores with input values
- _Requirements: 1.6, 1.8_
- [x] 1.7 Update `get_score` to assemble composite breakdown and pass dimension breakdowns through
- Build `CompositeBreakdownResponse` with original weights, available/missing dimensions, re-normalized weights, and formula
- Wire dimension breakdowns into the response dict
- _Requirements: 1.7, 3.2_
- [x] 1.8 Update `read_score` in `app/routers/scores.py` to populate breakdown fields from service response
- Map breakdown dicts from service into the new Pydantic response models
- _Requirements: 1.1_
- [ ]* 1.9 Write property test: Dimension breakdown contains correct sub-scores (Property 1)
- **Property 1: Dimension breakdown contains correct sub-scores**
- Use `hypothesis` to generate valid input data for each dimension type
- Verify returned breakdown has expected sub-score names, correct weights, and non-null raw values
- **Validates: Requirements 1.1, 1.2, 1.3, 1.4, 1.5, 1.6**
- [ ]* 1.10 Write property test: Composite re-normalization correctness (Property 2)
- **Property 2: Composite re-normalization correctness**
- Use `hypothesis` to generate random subsets of dimensions with random weights
- Verify re-normalized weights sum to 1.0 and each equals `original_weight / sum(available_weights)`
- **Validates: Requirements 1.7, 3.2**
- [x] 2. Checkpoint — Backend score breakdown
- Ensure all tests pass, ask the user if questions arise.
- [x] 3. Add frontend types and DimensionBreakdownPanel component
- [x] 3.1 Extend frontend types in `frontend/src/lib/types.ts`
- Add `SubScore`, `ScoreBreakdown`, `CompositeBreakdown` interfaces
- Extend `DimensionScoreDetail` with optional `breakdown` field
- Extend `ScoreResponse` with optional `composite_breakdown` field
- _Requirements: 1.1, 1.7_
- [x] 3.2 Create `frontend/src/components/ticker/DimensionBreakdownPanel.tsx`
- Expandable panel showing sub-score rows: name, score value, weight badge, raw input value
- Formula description text section
- Muted "unavailable" labels for missing sub-scores with reason
- _Requirements: 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8_
- [x] 3.3 Modify `frontend/src/components/ui/ScoreCard.tsx` for composite transparency
- Make each dimension row expandable, rendering `DimensionBreakdownPanel` when expanded
- Show dimension weight next to each bar
- Show missing dimensions with muted styling and "redistributed" indicator
- Add tooltip/inline text explaining weighted average with re-normalization
- _Requirements: 3.1, 3.2, 3.3_
- [ ]* 3.4 Write property test: Dimension breakdown UI rendering completeness (Property 3)
- **Property 3: Dimension breakdown UI rendering completeness**
- Use `fast-check` to generate random `ScoreBreakdown` objects with 1-5 sub-scores
- Render `DimensionBreakdownPanel` and verify DOM contains exactly N sub-score rows with all required fields
- **Validates: Requirements 2.1**
- [ ]* 3.5 Write property test: Composite weight display (Property 4)
- **Property 4: Composite weight display**
- Use `fast-check` to generate random score responses with random available/missing dimension combinations
- Render `ScoreCard` and verify weight labels present and missing dimensions visually distinct
- **Validates: Requirements 3.1, 3.2**
- [x] 4. Checkpoint — Score transparency UI
- Ensure all tests pass, ask the user if questions arise.
- [x] 5. Add trade setup chart overlay
- [x] 5.1 Modify `frontend/src/components/charts/CandlestickChart.tsx` to accept and render trade overlay
- Add optional `tradeSetup?: TradeSetup` prop
- Draw entry price as dashed horizontal line (blue/white) spanning full chart width
- Draw stop-loss zone as red semi-transparent rectangle between entry and stop-loss
- Draw take-profit zone as green semi-transparent rectangle between entry and target
- Include entry, stop-loss, target in y-axis price range calculation
- Add hover tooltip showing direction, entry, stop, target, R:R ratio
- Render no overlay when prop is absent
- _Requirements: 4.1, 4.2, 4.3, 4.4, 4.5, 4.6_
- [ ]* 5.2 Write property test: Trade overlay y-axis range includes all trade levels (Property 5)
- **Property 5: Trade overlay y-axis range includes all trade levels**
- Use `fast-check` to generate random OHLCV data and random trade setups
- Extract the y-axis range computation logic and verify all three trade levels fall within `[lo, hi]`
- **Validates: Requirements 4.4**
- [x] 6. Integrate trade setup data on ticker detail page
- [x] 6.1 Update `frontend/src/hooks/useTickerDetail.ts` to include trades data
- Add trades query to the hook return value
- _Requirements: 5.1_
- [x] 6.2 Modify `frontend/src/pages/TickerDetailPage.tsx` to wire trade overlay
- Fetch trade setups via `useTrades()`, filter for current symbol, pick latest by `detected_at`
- Pass `tradeSetup` prop to `CandlestickChart`
- Render trade setup summary card below chart (direction, entry, stop, target, R:R)
- Handle trades API failure gracefully — chart renders without overlay, error logged
- _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5_
- [ ]* 6.3 Write property test: Trade setup selection picks latest matching symbol (Property 6)
- **Property 6: Trade setup selection picks latest matching symbol**
- Use `fast-check` to generate random lists of trade setups with random symbols and timestamps
- Verify selection logic returns the latest setup for the target symbol, or null if no match
- **Validates: Requirements 5.1, 5.5**
- [x] 7. Final checkpoint — Ensure all tests pass
- Ensure all tests pass, ask the user if questions arise.
## Notes
- Tasks marked with `*` are optional and can be skipped for faster MVP
- Each task references specific requirements for traceability
- Property tests use `hypothesis` (Python) and `fast-check` (TypeScript) with minimum 100 iterations
- No new database tables — breakdowns are computed on-the-fly from existing data
- Trade overlay uses the existing `lightweight-charts` rendering pipeline, no new library needed

View File

@@ -0,0 +1 @@
{"specId": "b047fbd7-17a8-437c-8c32-ebc8482b2aba", "workflowType": "design-first", "specType": "feature"}

View File

@@ -0,0 +1,391 @@
# Design Document: UX Improvements
## Overview
This feature addresses three UX pain points across the application: (1) unbalanced S/R zone selection that favors one side based on ticker trend, (2) a Trade Scanner page that lacks explanatory context and detailed R:R analysis, and (3) a Rankings page weights form that requires awkward decimal input. The changes span both backend zone-selection logic and frontend presentation components.
## Architecture
```mermaid
graph TD
subgraph Backend
A[sr_service.py<br/>cluster_sr_zones] -->|balanced selection| B[sr_levels router]
B -->|zones + filtered levels| C[API Response]
end
subgraph Frontend - Ticker Detail
C --> D[CandlestickChart]
C --> E[S/R Levels Table<br/>filtered to chart zones]
end
subgraph Frontend - Scanner
F[ScannerPage] --> G[Explainer Banner]
F --> H[TradeTable<br/>expanded columns]
end
subgraph Frontend - Rankings
I[RankingsPage] --> J[WeightsForm<br/>slider-based input]
end
```
## Components and Interfaces
### Component 1: Balanced S/R Zone Selection (Backend)
**Purpose**: Ensure `cluster_sr_zones()` returns a mix of both support and resistance zones instead of only the strongest zones regardless of type.
**Current behavior**: Zones are sorted by strength descending and the top N are returned. For a strongly bullish ticker, all top zones may be support; for bearish, all resistance.
**Proposed algorithm**:
```mermaid
sequenceDiagram
participant Caller
participant cluster_sr_zones
participant ZonePool
Caller->>cluster_sr_zones: levels, current_price, max_zones=6
cluster_sr_zones->>ZonePool: Cluster all levels into zones
cluster_sr_zones->>ZonePool: Split into support[] and resistance[]
cluster_sr_zones->>ZonePool: Sort each list by strength desc
cluster_sr_zones->>ZonePool: Interleave pick (round-robin by strength)
cluster_sr_zones-->>Caller: balanced zones (e.g. 3S + 3R)
```
**Selection rules**:
1. Cluster all levels into zones (existing merge logic unchanged).
2. Tag each zone as support or resistance (existing logic).
3. Split zones into two pools: `support_zones` and `resistance_zones`, each sorted by strength descending.
4. Interleave selection: alternate picking the strongest remaining zone from each pool until `max_zones` is reached.
5. If one pool is exhausted, fill remaining slots from the other pool.
6. Final result sorted by strength descending for consistent ordering.
This naturally produces balanced output (e.g., 3+3 for max_zones=6) while gracefully degrading when one side has fewer strong zones (e.g., 1R + 5S if only 1 resistance zone exists).
**Interface change to `SRLevelResponse`**:
Add a `visible_levels` field that contains only the individual S/R levels whose price falls within one of the returned zones. This allows the frontend table to show only what's on the chart.
```python
class SRLevelResponse(BaseModel):
symbol: str
levels: list[SRLevelResult] # all levels (unchanged, for backward compat)
zones: list[SRZoneResult] # balanced zones shown on chart
visible_levels: list[SRLevelResult] # levels within returned zones only
count: int
```
**Responsibilities**:
- Guarantee both support and resistance representation when both exist
- Compute `visible_levels` by filtering `levels` to those within zone boundaries
- Maintain backward compatibility (existing `levels` field unchanged)
---
### Component 2: S/R Levels Table Filtering (Frontend)
**Purpose**: The S/R levels table below the chart currently shows all detected levels. It should only show levels that correspond to zones visible on the chart.
**Current behavior**: `TickerDetailPage` renders `sortedLevels` from `srLevels.data.levels` — the full unfiltered list.
**Proposed change**: Use `srLevels.data.visible_levels` instead of `srLevels.data.levels` for the table. The chart continues to receive `zones` as before.
```mermaid
graph LR
A[API Response] -->|zones| B[CandlestickChart]
A -->|visible_levels| C[S/R Levels Table]
A -->|levels| D[Other consumers<br/>backward compat]
```
**Interface**:
```typescript
// Updated SRLevelResponse type
interface SRLevelResponse {
symbol: string;
levels: SRLevel[]; // all levels
zones: SRZone[]; // balanced zones on chart
visible_levels: SRLevel[]; // only levels within chart zones
count: number;
}
```
**Responsibilities**:
- Render only `visible_levels` in the table
- Keep table sorted by strength descending
- Show zone type color coding (green for support, red for resistance)
---
### Component 3: Trade Scanner Explainer & R:R Analysis (Frontend)
**Purpose**: Add contextual explanation to the Scanner page and surface detailed trade analysis data (entry, stop-loss, target, R:R, risk amount, reward amount) so users can evaluate setups.
**Best practices for R:R presentation** (informed by trading UX conventions):
- Show entry price, stop-loss, and take-profit (target) as the core trio
- Display R:R ratio prominently with color coding (green ≥ 3:1, amber ≥ 2:1, red < 2:1)
- Show absolute risk and reward amounts (dollar values) so traders can size positions
- Include percentage distance from entry to stop and entry to target
- Visual risk/reward bar showing proportional risk vs reward
**Explainer banner content**: A brief description of what the scanner does — it scans tracked tickers for asymmetric risk-reward trade setups using S/R levels as targets and ATR-based stops.
```mermaid
graph TD
subgraph ScannerPage
A[Explainer Banner] --> B[Filter Controls]
B --> C[TradeTable]
end
subgraph TradeTable Columns
D[Symbol]
E[Direction]
F[Entry Price]
G[Stop Loss]
H[Target / TP]
I[Risk $]
J[Reward $]
K[R:R Ratio]
L[% to Stop]
M[% to Target]
N[Score]
O[Detected]
end
```
**New computed fields** (frontend-only, derived from existing data):
```typescript
// Computed per trade row — no backend changes needed
interface TradeAnalysis {
risk_amount: number; // |entry_price - stop_loss|
reward_amount: number; // |target - entry_price|
stop_pct: number; // risk_amount / entry_price * 100
target_pct: number; // reward_amount / entry_price * 100
}
```
**Responsibilities**:
- Render explainer banner at top of ScannerPage
- Compute risk/reward amounts and percentages client-side
- Add new columns to TradeTable: Risk $, Reward $, % to Stop, % to Target
- Color-code R:R ratio values (green ≥ 3, amber ≥ 2, red < 2)
---
### Component 4: Rankings Weights Slider Form (Frontend)
**Purpose**: Replace the raw decimal number inputs in `WeightsForm` with range sliders using whole-number values (0100) for better UX.
**Current behavior**: Each weight is a `<input type="number" step="any">` accepting arbitrary decimals. Users must type values like `0.25` which is error-prone.
**Proposed UX**:
```mermaid
graph LR
subgraph Current
A[Number Input<br/>step=any<br/>e.g. 0.25]
end
subgraph Proposed
B[Range Slider 0-100<br/>with numeric display]
C[Live value label]
B --> C
end
```
**Design**:
- Each weight gets a horizontal range slider (`<input type="range" min={0} max={100} step={1}>`)
- Current value displayed next to the slider as a whole number
- On submit, values are normalized to sum to 1.0 before sending to the API (divide each by total)
- Visual feedback: slider track colored proportionally
- Label shows the weight name (humanized) and current value
**Normalization logic**:
```
On submit:
total = sum of all slider values
if total > 0:
normalized[key] = slider_value[key] / total
else:
normalized[key] = 0
```
This means a user setting all sliders to 50 gets equal weights (each 1/N), and setting one to 100 and others to 0 gives that dimension full weight. The UX is intuitive — higher number = more importance.
**Interface**:
```typescript
interface WeightsFormProps {
weights: Record<string, number>; // API values (0-1 decimals)
}
// Internal state uses whole numbers 0-100
// Convert on mount: Math.round(apiWeight * 100)
// Convert on submit: sliderValue / sum(allSliderValues)
```
**Responsibilities**:
- Convert API decimal weights to 0100 scale on mount
- Render slider per weight dimension with live value display
- Normalize back to decimal weights on submit
- Maintain existing mutation hook (`useUpdateWeights`)
---
## Data Models
### Updated SRLevelResponse (Backend)
```python
class SRLevelResponse(BaseModel):
symbol: str
levels: list[SRLevelResult] # all detected levels
zones: list[SRZoneResult] # balanced S/R zones for chart
visible_levels: list[SRLevelResult] # levels within chart zones
count: int # total level count
```
**Validation Rules**:
- `visible_levels` is a subset of `levels`
- Each entry in `visible_levels` has a price within the bounds of at least one zone
- `zones` contains at most `max_zones` entries
- When both support and resistance zones exist, `zones` contains at least one of each type
### TradeAnalysis (Frontend — computed, not persisted)
```typescript
interface TradeAnalysis {
risk_amount: number; // always positive
reward_amount: number; // always positive
stop_pct: number; // percentage, always positive
target_pct: number; // percentage, always positive
}
```
**Validation Rules**:
- All values are non-negative
- `risk_amount = Math.abs(entry_price - stop_loss)`
- `reward_amount = Math.abs(target - entry_price)`
---
## Error Handling
### Scenario 1: No zones of one type exist
**Condition**: All S/R levels cluster on one side of current price (e.g., price at all-time high — no resistance levels).
**Response**: Fill all `max_zones` slots from the available type. `visible_levels` reflects only that type.
**Recovery**: No special handling needed — the balanced algorithm gracefully fills from the available pool.
### Scenario 2: Zero S/R levels
**Condition**: No S/R levels detected for a ticker.
**Response**: Return empty `zones`, empty `visible_levels`, empty `levels`. Chart renders without overlays. Table section hidden.
**Recovery**: User can click "Fetch Data" to trigger recalculation.
### Scenario 3: Weight sliders all set to zero
**Condition**: User drags all weight sliders to 0.
**Response**: Disable the submit button and show a validation message ("At least one weight must be greater than zero").
**Recovery**: User adjusts at least one slider above 0.
---
## Testing Strategy
### Unit Testing Approach
- Test `cluster_sr_zones()` with balanced selection: verify mixed output when both types exist
- Test edge cases: all support, all resistance, single zone, empty input
- Test `visible_levels` filtering: levels within zone bounds included, others excluded
- Test weight normalization: verify sum-to-1 property, all-zero guard, single-weight case
### Property-Based Testing Approach
**Property Test Library**: Hypothesis (Python backend), fast-check (frontend)
- For any input to `cluster_sr_zones()` with both support and resistance levels present, the output contains at least one of each type (when max_zones ≥ 2)
- For any set of slider values where at least one > 0, normalized weights sum to 1.0 (within floating-point tolerance)
- `visible_levels` is always a subset of `levels`
### Integration Testing Approach
- E2E test: load ticker detail page, verify chart zones contain both types, verify table matches chart zones
- E2E test: load scanner page, verify explainer text visible, verify computed columns present
- E2E test: load rankings page, verify sliders render, adjust slider, submit, verify API call with normalized weights
---
## Correctness Properties
*A property is a characteristic or behavior that should hold true across all valid executions of a system — essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.*
### Property 1: Balanced zone selection guarantees both types
*For any* set of S/R levels that produce at least one support zone and at least one resistance zone, and any `max_zones` ≥ 2, the output of `cluster_sr_zones()` shall contain at least one support zone and at least one resistance zone.
**Validates: Requirement 1.1**
### Property 2: Interleave selection correctness
*For any* set of S/R levels producing support zones S (sorted by strength desc) and resistance zones R (sorted by strength desc), the zones selected by `cluster_sr_zones()` shall match the result of round-robin picking from S and R alternately (strongest first from each pool) until `max_zones` is reached or both pools are exhausted.
**Validates: Requirements 1.2, 1.3**
### Property 3: Zone output is sorted by strength
*For any* input to `cluster_sr_zones()`, the returned zones list shall be sorted by strength in descending order.
**Validates: Requirement 1.4**
### Property 4: Visible levels are a subset within zone bounds
*For any* SRLevelResponse, every entry in `visible_levels` shall (a) also appear in `levels`, and (b) have a `price_level` that falls within the `[low, high]` range of at least one entry in `zones`.
**Validates: Requirements 2.1, 2.2**
### Property 5: Trade analysis computation correctness
*For any* trade setup with positive `entry_price`, `stop_loss`, and `target`, the computed Trade_Analysis values shall satisfy: `risk_amount == |entry_price - stop_loss|`, `reward_amount == |target - entry_price|`, `stop_pct == risk_amount / entry_price * 100`, and `target_pct == reward_amount / entry_price * 100`.
**Validates: Requirements 5.2, 5.3, 5.4, 5.5**
### Property 6: Weight conversion round-trip
*For any* decimal weight value `w` in [0, 1], converting to slider scale via `Math.round(w * 100)` and then normalizing back (dividing by the sum of all slider values) shall preserve the relative proportions of the original weights within floating-point tolerance.
**Validates: Requirement 6.3**
### Property 7: Normalized weights sum to one
*For any* set of slider values (integers 0100) where at least one value is greater than zero, the normalized weights (each divided by the sum of all values) shall sum to 1.0 within floating-point tolerance (±1e-9).
**Validates: Requirement 7.1**
---
## Performance Considerations
- Balanced zone selection adds negligible overhead — it's a simple split + interleave over an already-small list (typically < 50 zones)
- `visible_levels` filtering is O(levels × zones), both small — no concern
- Frontend computed columns (risk/reward amounts) are derived inline per row — trivial cost
- Slider rendering uses native `<input type="range">` — no performance impact vs current number inputs
---
## Security Considerations
- No new API endpoints or authentication changes
- Weight normalization happens client-side before submission; backend should still validate that weights are non-negative and sum to ~1.0
- No user-generated content introduced (explainer text is static)
---
## Dependencies
- No new external libraries required
- Backend: existing FastAPI, Pydantic, SQLAlchemy stack
- Frontend: existing React, Recharts, TanStack Query stack
- Slider styling can use Tailwind CSS utilities (already in project) — no additional UI library needed

View File

@@ -0,0 +1,107 @@
# Requirements Document
## Introduction
This specification covers three UX improvements to the stock signal platform: (1) balanced support/resistance zone selection that ensures both zone types are represented on the chart, with a filtered levels table; (2) a Trade Scanner page enhanced with an explainer banner and detailed risk/reward analysis columns; and (3) a Rankings page weights form that replaces decimal number inputs with intuitive range sliders and automatic normalization.
## Glossary
- **SR_Service**: The backend service (`sr_service.py`) containing `cluster_sr_zones()` that clusters, scores, and selects S/R zones.
- **SR_API**: The FastAPI router endpoint (`/sr-levels/{symbol}`) that returns S/R levels and zones for a ticker.
- **SRLevelResponse**: The Pydantic response model returned by the SR_API containing levels, zones, and metadata.
- **Zone_Selector**: The interleave-based selection logic within `cluster_sr_zones()` that picks zones from support and resistance pools alternately.
- **Visible_Levels**: The subset of all detected S/R levels whose price falls within the bounds of at least one returned zone.
- **Ticker_Detail_Page**: The frontend page (`TickerDetailPage.tsx`) displaying chart, scores, sentiment, fundamentals, and S/R data for a single ticker.
- **SR_Levels_Table**: The HTML table on the Ticker_Detail_Page that lists individual S/R levels sorted by strength.
- **Scanner_Page**: The frontend page (`ScannerPage.tsx`) displaying trade setups with filtering and sorting.
- **Trade_Table**: The table component (`TradeTable.tsx`) rendering trade setup rows on the Scanner_Page.
- **Explainer_Banner**: A static informational banner at the top of the Scanner_Page describing what the scanner does.
- **Trade_Analysis**: A set of computed fields (risk amount, reward amount, stop percentage, target percentage) derived client-side from each trade setup row.
- **Rankings_Page**: The frontend page displaying ticker rankings with configurable scoring weights.
- **Weights_Form**: The form component (`WeightsForm.tsx`) on the Rankings_Page for adjusting scoring dimension weights.
- **Weight_Slider**: A range input (0100) replacing the current decimal number input for each scoring weight dimension.
- **Normalization**: The process of dividing each slider value by the sum of all slider values to produce decimal weights that sum to 1.0.
## Requirements
### Requirement 1: Balanced Zone Selection
**User Story:** As a trader, I want the S/R zone selection to include both support and resistance zones when both exist, so that I get a balanced view of key price levels regardless of the ticker's trend direction.
#### Acceptance Criteria
1. WHEN both support and resistance zones exist and `max_zones` is 2 or greater, THE Zone_Selector SHALL return at least one support zone and at least one resistance zone.
2. THE Zone_Selector SHALL select zones by alternating picks from the support pool and the resistance pool, each sorted by strength descending, until `max_zones` is reached.
3. WHEN one pool is exhausted before `max_zones` is reached, THE Zone_Selector SHALL fill the remaining slots from the other pool in strength-descending order.
4. THE Zone_Selector SHALL sort the final selected zones by strength descending before returning them.
5. WHEN no S/R levels are provided, THE SR_Service SHALL return an empty zones list.
6. WHEN `max_zones` is zero or negative, THE SR_Service SHALL return an empty zones list.
### Requirement 2: Visible Levels Filtering
**User Story:** As a trader, I want the API to provide a filtered list of S/R levels that correspond to the zones shown on the chart, so that the levels table only shows relevant data.
#### Acceptance Criteria
1. THE SRLevelResponse SHALL include a `visible_levels` field containing only the S/R levels whose price falls within the bounds of at least one returned zone.
2. THE `visible_levels` field SHALL be a subset of the `levels` field in the same response.
3. THE SRLevelResponse SHALL continue to include the full `levels` field for backward compatibility.
4. WHEN the zones list is empty, THE SRLevelResponse SHALL return an empty `visible_levels` list.
### Requirement 3: S/R Levels Table Filtering
**User Story:** As a trader, I want the S/R levels table below the chart to show only levels that correspond to zones visible on the chart, so that the table and chart are consistent.
#### Acceptance Criteria
1. THE SR_Levels_Table SHALL render levels from the `visible_levels` field of the API response instead of the full `levels` field.
2. THE SR_Levels_Table SHALL sort displayed levels by strength descending.
3. THE SR_Levels_Table SHALL color-code each level row green for support and red for resistance.
4. WHEN `visible_levels` is empty, THE Ticker_Detail_Page SHALL hide the SR_Levels_Table section.
### Requirement 4: Trade Scanner Explainer Banner
**User Story:** As a user, I want to see a brief explanation of what the Trade Scanner does when I visit the page, so that I understand the purpose and methodology of the displayed trade setups.
#### Acceptance Criteria
1. THE Scanner_Page SHALL display an Explainer_Banner above the filter controls.
2. THE Explainer_Banner SHALL contain static text describing that the scanner identifies asymmetric risk-reward trade setups using S/R levels as targets and ATR-based stops.
3. THE Explainer_Banner SHALL be visible on initial page load without requiring user interaction.
### Requirement 5: Trade Scanner R:R Analysis Columns
**User Story:** As a trader, I want to see detailed risk/reward analysis data (risk amount, reward amount, percentage distances, and color-coded R:R ratio) for each trade setup, so that I can evaluate and compare setups at a glance.
#### Acceptance Criteria
1. THE Trade_Table SHALL display the following additional columns: Risk $ (absolute risk amount), Reward $ (absolute reward amount), % to Stop (percentage distance from entry to stop-loss), and % to Target (percentage distance from entry to target).
2. THE Trade_Analysis risk_amount SHALL be computed as the absolute difference between entry_price and stop_loss.
3. THE Trade_Analysis reward_amount SHALL be computed as the absolute difference between target and entry_price.
4. THE Trade_Analysis stop_pct SHALL be computed as risk_amount divided by entry_price multiplied by 100.
5. THE Trade_Analysis target_pct SHALL be computed as reward_amount divided by entry_price multiplied by 100.
6. WHEN the R:R ratio is 3.0 or greater, THE Trade_Table SHALL display the R:R value with green color coding.
7. WHEN the R:R ratio is 2.0 or greater but less than 3.0, THE Trade_Table SHALL display the R:R value with amber color coding.
8. WHEN the R:R ratio is less than 2.0, THE Trade_Table SHALL display the R:R value with red color coding.
### Requirement 6: Rankings Weight Slider Input
**User Story:** As a user, I want to adjust scoring weights using range sliders with whole-number values instead of typing decimal numbers, so that the input is intuitive and less error-prone.
#### Acceptance Criteria
1. THE Weights_Form SHALL render each weight dimension as a Weight_Slider with a range of 0 to 100 and a step of 1.
2. THE Weights_Form SHALL display the current whole-number value next to each Weight_Slider.
3. WHEN the Weights_Form receives API weight values (decimals between 0 and 1), THE Weights_Form SHALL convert each value to the 0100 scale by multiplying by 100 and rounding to the nearest integer.
4. THE Weights_Form SHALL display a humanized label for each weight dimension by replacing underscores with spaces.
### Requirement 7: Weight Normalization on Submit
**User Story:** As a user, I want my slider values to be automatically normalized to valid decimal weights when I submit, so that I don't need to manually ensure they sum to 1.0.
#### Acceptance Criteria
1. WHEN the user submits the Weights_Form and at least one slider value is greater than zero, THE Weights_Form SHALL normalize each slider value by dividing it by the sum of all slider values.
2. WHEN all slider values are zero, THE Weights_Form SHALL disable the submit button.
3. WHEN all slider values are zero, THE Weights_Form SHALL display a validation message stating that at least one weight must be greater than zero.
4. THE Weights_Form SHALL send the normalized decimal weights to the API using the existing mutation hook.

View File

@@ -0,0 +1,124 @@
# Implementation Plan: UX Improvements
## Overview
Implement four UX improvements: balanced S/R zone selection in the backend, visible levels filtering (backend + frontend), Trade Scanner explainer banner and R:R analysis columns, and Rankings weight slider form. Backend changes use Python (FastAPI/Pydantic), frontend changes use TypeScript (React).
## Tasks
- [x] 1. Implement balanced S/R zone selection in `cluster_sr_zones()`
- [x] 1.1 Refactor `cluster_sr_zones()` to use interleave-based balanced selection
- In `app/services/sr_service.py`, after clustering and computing zones, split zones into `support_zones` and `resistance_zones` pools sorted by strength descending
- Implement round-robin interleave picking: alternate strongest from each pool until `max_zones` is reached or both pools exhausted
- If one pool is exhausted, fill remaining slots from the other pool
- Sort final selected zones by strength descending before returning
- _Requirements: 1.1, 1.2, 1.3, 1.4, 1.5, 1.6_
- [ ]* 1.2 Write property test: balanced zone selection guarantees both types
- **Property 1: Balanced zone selection guarantees both types**
- **Validates: Requirement 1.1**
- In `tests/unit/test_cluster_sr_zones.py`, use Hypothesis to generate sets of levels with at least one support and one resistance zone, verify output contains at least one of each type when `max_zones >= 2`
- [ ]* 1.3 Write property test: interleave selection correctness
- **Property 2: Interleave selection correctness**
- **Validates: Requirements 1.2, 1.3**
- Verify the selected zones match the expected round-robin interleave from support and resistance pools
- [ ]* 1.4 Write property test: zone output sorted by strength
- **Property 3: Zone output is sorted by strength**
- **Validates: Requirement 1.4**
- For any input, verify returned zones are sorted by strength descending
- [x] 1.5 Update existing unit tests for balanced selection behavior
- Update `tests/unit/test_cluster_sr_zones.py` to add tests for: mixed support/resistance input produces balanced output, all-support input fills from support only, all-resistance input fills from resistance only, single zone edge case
- _Requirements: 1.1, 1.2, 1.3, 1.5, 1.6_
- [x] 2. Implement `visible_levels` filtering in backend
- [x] 2.1 Add `visible_levels` field to `SRLevelResponse` schema
- In `app/schemas/sr_level.py`, add `visible_levels: list[SRLevelResult] = []` to `SRLevelResponse`
- _Requirements: 2.1, 2.3_
- [x] 2.2 Compute `visible_levels` in the SR levels router
- In `app/routers/sr_levels.py`, after computing zones, filter `level_results` to only those whose `price_level` falls within the `[low, high]` range of at least one zone
- Set the filtered list as `visible_levels` on the `SRLevelResponse`
- When zones list is empty, `visible_levels` should be empty
- _Requirements: 2.1, 2.2, 2.4_
- [ ]* 2.3 Write property test: visible levels subset within zone bounds
- **Property 4: Visible levels are a subset within zone bounds**
- **Validates: Requirements 2.1, 2.2**
- Verify every entry in `visible_levels` appears in `levels` and has a price within at least one zone's `[low, high]` range
- [x] 2.4 Update router unit tests for `visible_levels`
- In `tests/unit/test_sr_levels_router.py`, add tests verifying: `visible_levels` is present in response, `visible_levels` contains only levels within zone bounds, `visible_levels` is empty when zones are empty
- _Requirements: 2.1, 2.2, 2.4_
- [x] 3. Checkpoint - Ensure all backend tests pass
- Ensure all tests pass, ask the user if questions arise.
- [x] 4. Update frontend types and S/R levels table filtering
- [x] 4.1 Add `visible_levels` to frontend `SRLevelResponse` type
- In `frontend/src/lib/types.ts`, add `visible_levels: SRLevel[]` to the `SRLevelResponse` interface
- _Requirements: 2.1_
- [x] 4.2 Update `TickerDetailPage` to use `visible_levels` for the S/R table
- In `frontend/src/pages/TickerDetailPage.tsx`, change `sortedLevels` to derive from `srLevels.data.visible_levels` instead of `srLevels.data.levels`
- Keep sorting by strength descending
- Hide the S/R Levels Table section when `visible_levels` is empty
- Maintain existing color coding (green for support, red for resistance)
- _Requirements: 3.1, 3.2, 3.3, 3.4_
- [x] 5. Add Trade Scanner explainer banner and R:R analysis columns
- [x] 5.1 Add explainer banner to `ScannerPage`
- In `frontend/src/pages/ScannerPage.tsx`, add a static informational banner above the filter controls
- Banner text: describe that the scanner identifies asymmetric risk-reward trade setups using S/R levels as targets and ATR-based stops
- Banner should be visible on initial page load without user interaction
- _Requirements: 4.1, 4.2, 4.3_
- [x] 5.2 Add R:R analysis columns to `TradeTable`
- In `frontend/src/components/scanner/TradeTable.tsx`, add computed columns: Risk $ (`|entry_price - stop_loss|`), Reward $ (`|target - entry_price|`), % to Stop (`risk / entry * 100`), % to Target (`reward / entry * 100`)
- Color-code the existing R:R ratio column: green for ≥ 3.0, amber for ≥ 2.0, red for < 2.0
- Update the `SortColumn` type and `columns` array to include the new columns
- Update `sortTrades` in `ScannerPage.tsx` to handle sorting by new computed columns
- _Requirements: 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 5.7, 5.8_
- [ ]* 5.3 Write property test: trade analysis computation correctness
- **Property 5: Trade analysis computation correctness**
- **Validates: Requirements 5.2, 5.3, 5.4, 5.5**
- Using fast-check, for any trade with positive entry_price, stop_loss, and target, verify `risk_amount == |entry_price - stop_loss|`, `reward_amount == |target - entry_price|`, `stop_pct == risk_amount / entry_price * 100`, `target_pct == reward_amount / entry_price * 100`
- [x] 6. Convert Rankings weight inputs to sliders
- [x] 6.1 Replace number inputs with range sliders in `WeightsForm`
- In `frontend/src/components/rankings/WeightsForm.tsx`, replace `<input type="number">` with `<input type="range" min={0} max={100} step={1}>`
- On mount, convert API decimal weights to 0100 scale: `Math.round(w * 100)`
- Display current whole-number value next to each slider
- Show humanized label (replace underscores with spaces)
- _Requirements: 6.1, 6.2, 6.3, 6.4_
- [x] 6.2 Implement weight normalization on submit
- On submit, normalize slider values: divide each by the sum of all values
- Disable submit button when all sliders are zero
- Show validation message "At least one weight must be greater than zero" when all are zero
- Send normalized decimal weights via existing `useUpdateWeights` mutation
- _Requirements: 7.1, 7.2, 7.3, 7.4_
- [ ]* 6.3 Write property test: weight conversion round-trip
- **Property 6: Weight conversion round-trip**
- **Validates: Requirement 6.3**
- Using fast-check, verify that converting decimal weights to slider scale and normalizing back preserves relative proportions within floating-point tolerance
- [ ]* 6.4 Write property test: normalized weights sum to one
- **Property 7: Normalized weights sum to one**
- **Validates: Requirement 7.1**
- Using fast-check, for any set of slider values (integers 0100) where at least one > 0, verify normalized weights sum to 1.0 within ±1e-9
- [x] 7. Final checkpoint - Ensure all tests pass
- Ensure all tests pass, ask the user if questions arise.
## Notes
- Tasks marked with `*` are optional and can be skipped for faster MVP
- Backend uses Python (Hypothesis for property tests), frontend uses TypeScript/React (fast-check for property tests)
- Each task references specific requirements for traceability
- Checkpoints ensure incremental validation after backend and full implementation phases
- Property tests validate universal correctness properties from the design document

View File

@@ -0,0 +1,41 @@
"""add reasoning and citations_json to sentiment_scores, unavailable_fields_json to fundamental_data
Revision ID: 002
Revises: 001
Create Date: 2025-01-02 00:00:00.000000
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = "002"
down_revision: Union[str, None] = "001"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column(
"sentiment_scores",
sa.Column("reasoning", sa.Text(), server_default="", nullable=False),
)
op.add_column(
"sentiment_scores",
sa.Column("citations_json", sa.Text(), server_default="[]", nullable=False),
)
op.add_column(
"fundamental_data",
sa.Column(
"unavailable_fields_json", sa.Text(), server_default="{}", nullable=False
),
)
def downgrade() -> None:
op.drop_column("fundamental_data", "unavailable_fields_json")
op.drop_column("sentiment_scores", "citations_json")
op.drop_column("sentiment_scores", "reasoning")

View File

@@ -15,10 +15,14 @@ class Settings(BaseSettings):
alpaca_api_key: str = ""
alpaca_api_secret: str = ""
# Sentiment Provider — Gemini with Search Grounding
# Sentiment Provider — Gemini with Search Grounding (legacy)
gemini_api_key: str = ""
gemini_model: str = "gemini-2.0-flash"
# Sentiment Provider — OpenAI
openai_api_key: str = ""
openai_model: str = "gpt-4o-mini"
# Fundamentals Provider — Financial Modeling Prep
fmp_api_key: str = ""
@@ -30,7 +34,7 @@ class Settings(BaseSettings):
# Scoring Defaults
default_watchlist_auto_size: int = 10
default_rr_threshold: float = 3.0
default_rr_threshold: float = 1.5
# Database Pool
db_pool_size: int = 5

View File

@@ -1,5 +1,57 @@
"""FastAPI application entry point with lifespan management."""
# ---------------------------------------------------------------------------
# SSL + proxy injection — MUST happen before any HTTP client imports
# ---------------------------------------------------------------------------
import os as _os
import ssl as _ssl
from pathlib import Path as _Path
_COMBINED_CERT = _Path(__file__).resolve().parent.parent / "combined-ca-bundle.pem"
if _COMBINED_CERT.exists():
_cert_path = str(_COMBINED_CERT)
# Env vars for libraries that respect them (requests, urllib3)
_os.environ["SSL_CERT_FILE"] = _cert_path
_os.environ["REQUESTS_CA_BUNDLE"] = _cert_path
_os.environ["CURL_CA_BUNDLE"] = _cert_path
# Monkey-patch ssl.create_default_context so that ALL libraries
# (aiohttp, httpx, google-genai, alpaca-py, etc.) automatically
# use our combined CA bundle that includes the corporate root cert.
_original_create_default_context = _ssl.create_default_context
def _patched_create_default_context(
purpose=_ssl.Purpose.SERVER_AUTH, *, cafile=None, capath=None, cadata=None
):
ctx = _original_create_default_context(
purpose, cafile=cafile, capath=capath, cadata=cadata
)
# Always load our combined bundle on top of whatever was loaded
ctx.load_verify_locations(cafile=_cert_path)
return ctx
_ssl.create_default_context = _patched_create_default_context
# Also patch aiohttp's cached SSL context objects directly, since
# aiohttp creates them at import time and may have already cached
# a context without our corporate CA bundle.
try:
import aiohttp.connector as _aio_conn
if hasattr(_aio_conn, '_SSL_CONTEXT_VERIFIED') and _aio_conn._SSL_CONTEXT_VERIFIED is not None:
_aio_conn._SSL_CONTEXT_VERIFIED.load_verify_locations(cafile=_cert_path)
if hasattr(_aio_conn, '_SSL_CONTEXT_UNVERIFIED') and _aio_conn._SSL_CONTEXT_UNVERIFIED is not None:
_aio_conn._SSL_CONTEXT_UNVERIFIED.load_verify_locations(cafile=_cert_path)
except ImportError:
pass
# Corporate proxy — needed when Kiro spawns the process (no .zshrc sourced)
_PROXY = "http://aproxy.corproot.net:8080"
_NO_PROXY = "corproot.net,sharedtcs.net,127.0.0.1,localhost,bix.swisscom.com,swisscom.com"
_os.environ.setdefault("HTTP_PROXY", _PROXY)
_os.environ.setdefault("HTTPS_PROXY", _PROXY)
_os.environ.setdefault("NO_PROXY", _NO_PROXY)
import logging
import sys
from contextlib import asynccontextmanager

View File

@@ -1,6 +1,6 @@
from datetime import datetime
from sqlalchemy import DateTime, Float, ForeignKey
from sqlalchemy import DateTime, Float, ForeignKey, Text
from sqlalchemy.orm import Mapped, mapped_column, relationship
from app.database import Base
@@ -20,5 +20,8 @@ class FundamentalData(Base):
fetched_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), nullable=False
)
unavailable_fields_json: Mapped[str] = mapped_column(
Text, nullable=False, default="{}"
)
ticker = relationship("Ticker", back_populates="fundamental_data")

View File

@@ -1,6 +1,6 @@
from datetime import datetime
from sqlalchemy import DateTime, ForeignKey, Integer, String
from sqlalchemy import DateTime, ForeignKey, Integer, String, Text
from sqlalchemy.orm import Mapped, mapped_column, relationship
from app.database import Base
@@ -20,4 +20,7 @@ class SentimentScore(Base):
DateTime(timezone=True), nullable=False
)
reasoning: Mapped[str] = mapped_column(Text, nullable=False, default="")
citations_json: Mapped[str] = mapped_column(Text, nullable=False, default="[]")
ticker = relationship("Ticker", back_populates="sentiment_scores")

View File

@@ -1,9 +1,15 @@
"""Financial Modeling Prep (FMP) fundamentals provider using httpx."""
"""Financial Modeling Prep (FMP) fundamentals provider using httpx.
Uses the stable API endpoints (https://financialmodelingprep.com/stable/)
which replaced the legacy /api/v3/ endpoints deprecated in Aug 2025.
"""
from __future__ import annotations
import logging
import os
from datetime import datetime, timezone
from pathlib import Path
import httpx
@@ -12,7 +18,14 @@ from app.providers.protocol import FundamentalData
logger = logging.getLogger(__name__)
_FMP_BASE_URL = "https://financialmodelingprep.com/api/v3"
_FMP_STABLE_URL = "https://financialmodelingprep.com/stable"
# Resolve CA bundle for explicit httpx verify
_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "")
if not _CA_BUNDLE or not Path(_CA_BUNDLE).exists():
_CA_BUNDLE_PATH: str | bool = True # use system default
else:
_CA_BUNDLE_PATH = _CA_BUNDLE
class FMPFundamentalProvider:
@@ -23,17 +36,54 @@ class FMPFundamentalProvider:
raise ProviderError("FMP API key is required")
self._api_key = api_key
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
"""Fetch P/E, revenue growth, earnings surprise, and market cap."""
try:
async with httpx.AsyncClient(timeout=30.0) as client:
profile = await self._fetch_profile(client, ticker)
earnings = await self._fetch_earnings_surprise(client, ticker)
# Mapping from FMP endpoint name to the FundamentalData field it populates
_ENDPOINT_FIELD_MAP: dict[str, str] = {
"ratios-ttm": "pe_ratio",
"financial-growth": "revenue_growth",
"earnings": "earnings_surprise",
}
pe_ratio = self._safe_float(profile.get("pe"))
revenue_growth = self._safe_float(profile.get("revenueGrowth"))
market_cap = self._safe_float(profile.get("mktCap"))
earnings_surprise = self._safe_float(earnings)
async def fetch_fundamentals(self, ticker: str) -> FundamentalData:
"""Fetch P/E, revenue growth, earnings surprise, and market cap.
Fetches from multiple stable endpoints. If a supplementary endpoint
(ratios, growth, earnings) returns 402 (paid tier), we gracefully
degrade and return partial data rather than failing entirely, and
record the affected field in ``unavailable_fields``.
"""
try:
endpoints_402: set[str] = set()
async with httpx.AsyncClient(timeout=30.0, verify=_CA_BUNDLE_PATH) as client:
params = {"symbol": ticker, "apikey": self._api_key}
# Profile is the primary source — must succeed
profile = await self._fetch_json(client, "profile", params, ticker)
# Supplementary sources — degrade gracefully on 402
ratios, was_402 = await self._fetch_json_optional(client, "ratios-ttm", params, ticker)
if was_402:
endpoints_402.add("ratios-ttm")
growth, was_402 = await self._fetch_json_optional(client, "financial-growth", params, ticker)
if was_402:
endpoints_402.add("financial-growth")
earnings, was_402 = await self._fetch_json_optional(client, "earnings", params, ticker)
if was_402:
endpoints_402.add("earnings")
pe_ratio = self._safe_float(ratios.get("priceToEarningsRatioTTM"))
revenue_growth = self._safe_float(growth.get("revenueGrowth"))
market_cap = self._safe_float(profile.get("marketCap"))
earnings_surprise = self._compute_earnings_surprise(earnings)
# Build unavailable_fields from 402 endpoints
unavailable_fields: dict[str, str] = {
self._ENDPOINT_FIELD_MAP[ep]: "requires paid plan"
for ep in endpoints_402
if ep in self._ENDPOINT_FIELD_MAP
}
return FundamentalData(
ticker=ticker,
@@ -42,6 +92,7 @@ class FMPFundamentalProvider:
earnings_surprise=earnings_surprise,
market_cap=market_cap,
fetched_at=datetime.now(timezone.utc),
unavailable_fields=unavailable_fields,
)
except (ProviderError, RateLimitError):
@@ -50,27 +101,52 @@ class FMPFundamentalProvider:
logger.error("FMP provider error for %s: %s", ticker, exc)
raise ProviderError(f"FMP provider error for {ticker}: {exc}") from exc
async def _fetch_profile(self, client: httpx.AsyncClient, ticker: str) -> dict:
"""Fetch company profile (P/E, revenue growth, market cap)."""
url = f"{_FMP_BASE_URL}/profile/{ticker}"
resp = await client.get(url, params={"apikey": self._api_key})
self._check_response(resp, ticker, "profile")
async def _fetch_json(
self,
client: httpx.AsyncClient,
endpoint: str,
params: dict,
ticker: str,
) -> dict:
"""Fetch a stable endpoint and return the first item (or empty dict)."""
url = f"{_FMP_STABLE_URL}/{endpoint}"
resp = await client.get(url, params=params)
self._check_response(resp, ticker, endpoint)
data = resp.json()
if isinstance(data, list) and data:
return data[0]
if isinstance(data, list):
return data[0] if data else {}
return data if isinstance(data, dict) else {}
async def _fetch_earnings_surprise(
self, client: httpx.AsyncClient, ticker: str
) -> float | None:
"""Fetch the most recent earnings surprise percentage."""
url = f"{_FMP_BASE_URL}/earnings-surprises/{ticker}"
resp = await client.get(url, params={"apikey": self._api_key})
self._check_response(resp, ticker, "earnings-surprises")
async def _fetch_json_optional(
self,
client: httpx.AsyncClient,
endpoint: str,
params: dict,
ticker: str,
) -> tuple[dict, bool]:
"""Fetch a stable endpoint, returning ``({}, True)`` on 402 (paid tier).
Returns a tuple of (data_dict, was_402) so callers can track which
endpoints required a paid plan.
"""
url = f"{_FMP_STABLE_URL}/{endpoint}"
resp = await client.get(url, params=params)
if resp.status_code == 402:
logger.warning("FMP %s requires paid plan — skipping for %s", endpoint, ticker)
return {}, True
self._check_response(resp, ticker, endpoint)
data = resp.json()
if isinstance(data, list) and data:
return self._safe_float(data[0].get("actualEarningResult"))
return None
if isinstance(data, list):
return (data[0] if data else {}, False)
return (data if isinstance(data, dict) else {}, False)
def _compute_earnings_surprise(self, earnings_data: dict) -> float | None:
"""Compute earnings surprise % from the most recent actual vs estimated EPS."""
actual = self._safe_float(earnings_data.get("epsActual"))
estimated = self._safe_float(earnings_data.get("epsEstimated"))
if actual is None or estimated is None or estimated == 0:
return None
return ((actual - estimated) / abs(estimated)) * 100
def _check_response(
self, resp: httpx.Response, ticker: str, endpoint: str
@@ -78,6 +154,10 @@ class FMPFundamentalProvider:
"""Raise appropriate errors for non-200 responses."""
if resp.status_code == 429:
raise RateLimitError(f"FMP rate limit hit for {ticker} ({endpoint})")
if resp.status_code == 403:
raise ProviderError(
f"FMP {endpoint} access denied for {ticker}: HTTP 403 — check API key validity and plan tier"
)
if resp.status_code != 200:
raise ProviderError(
f"FMP {endpoint} error for {ticker}: HTTP {resp.status_code}"

View File

@@ -4,7 +4,10 @@ from __future__ import annotations
import json
import logging
import os
import ssl
from datetime import datetime, timezone
from pathlib import Path
from google import genai
from google.genai import types
@@ -14,6 +17,19 @@ from app.providers.protocol import SentimentData
logger = logging.getLogger(__name__)
# Ensure aiohttp's cached SSL context includes our corporate CA bundle.
# aiohttp creates _SSL_CONTEXT_VERIFIED at import time; we must patch it
# after import so that google-genai's aiohttp session trusts our proxy CA.
_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "")
if _CA_BUNDLE and Path(_CA_BUNDLE).exists():
try:
import aiohttp.connector as _aio_conn
if hasattr(_aio_conn, "_SSL_CONTEXT_VERIFIED") and _aio_conn._SSL_CONTEXT_VERIFIED is not None:
_aio_conn._SSL_CONTEXT_VERIFIED.load_verify_locations(cafile=_CA_BUNDLE)
logger.debug("Patched aiohttp _SSL_CONTEXT_VERIFIED with %s", _CA_BUNDLE)
except Exception:
logger.warning("Could not patch aiohttp SSL context", exc_info=True)
_SENTIMENT_PROMPT = """\
Analyze the current market sentiment for the stock ticker {ticker}.
Search the web for recent news articles, social media mentions, and analyst opinions.
@@ -84,7 +100,7 @@ class GeminiSentimentProvider:
raise
except Exception as exc:
msg = str(exc).lower()
if "rate" in msg or "quota" in msg or "429" in msg:
if "429" in msg or "resource exhausted" in msg or "quota" in msg or ("rate" in msg and "limit" in msg):
raise RateLimitError(f"Gemini rate limit hit for {ticker}") from exc
logger.error("Gemini provider error for %s: %s", ticker, exc)
raise ProviderError(f"Gemini provider error for {ticker}: {exc}") from exc

View File

@@ -0,0 +1,136 @@
"""OpenAI sentiment provider using the Responses API with web search."""
from __future__ import annotations
import json
import logging
import os
from datetime import datetime, timezone
from pathlib import Path
import httpx
from openai import AsyncOpenAI
from app.exceptions import ProviderError, RateLimitError
from app.providers.protocol import SentimentData
logger = logging.getLogger(__name__)
_CA_BUNDLE = os.environ.get("SSL_CERT_FILE", "")
_SENTIMENT_PROMPT = """\
Search the web for the LATEST news, analyst opinions, and market developments \
about the stock ticker {ticker} from the past 24-48 hours.
Based on your web search findings, analyze the CURRENT market sentiment.
Respond ONLY with a JSON object in this exact format (no markdown, no extra text):
{{"classification": "<bullish|bearish|neutral>", "confidence": <0-100>, "reasoning": "<brief explanation citing recent news>"}}
Rules:
- classification must be exactly one of: bullish, bearish, neutral
- confidence must be an integer from 0 to 100
- reasoning should cite specific recent news or events you found
"""
VALID_CLASSIFICATIONS = {"bullish", "bearish", "neutral"}
class OpenAISentimentProvider:
"""Fetches sentiment analysis from OpenAI Responses API with live web search."""
def __init__(self, api_key: str, model: str = "gpt-4o-mini") -> None:
if not api_key:
raise ProviderError("OpenAI API key is required")
http_kwargs: dict = {}
if _CA_BUNDLE and Path(_CA_BUNDLE).exists():
http_kwargs["verify"] = _CA_BUNDLE
http_client = httpx.AsyncClient(**http_kwargs)
self._client = AsyncOpenAI(api_key=api_key, http_client=http_client)
self._model = model
async def fetch_sentiment(self, ticker: str) -> SentimentData:
"""Use the Responses API with web_search_preview to get live sentiment."""
try:
response = await self._client.responses.create(
model=self._model,
tools=[{"type": "web_search_preview"}],
instructions="You are a financial sentiment analyst. Always respond with valid JSON only, no markdown fences.",
input=_SENTIMENT_PROMPT.format(ticker=ticker),
)
# Extract text from the ResponseOutputMessage in the output
raw_text = ""
for item in response.output:
if item.type == "message" and item.content:
for block in item.content:
if hasattr(block, "text") and block.text:
raw_text = block.text
break
if raw_text:
break
if not raw_text:
raise ProviderError(f"No text output from OpenAI for {ticker}")
raw_text = raw_text.strip()
logger.debug("OpenAI raw response for %s: %s", ticker, raw_text)
# Strip markdown fences if present
clean = raw_text
if clean.startswith("```"):
clean = clean.split("\n", 1)[1] if "\n" in clean else clean[3:]
if clean.endswith("```"):
clean = clean[:-3]
clean = clean.strip()
parsed = json.loads(clean)
classification = parsed.get("classification", "").lower()
if classification not in VALID_CLASSIFICATIONS:
raise ProviderError(
f"Invalid classification '{classification}' from OpenAI for {ticker}"
)
confidence = int(parsed.get("confidence", 50))
confidence = max(0, min(100, confidence))
reasoning = parsed.get("reasoning", "")
if reasoning:
logger.info("OpenAI sentiment for %s: %s (confidence=%d) — %s",
ticker, classification, confidence, reasoning)
# Extract url_citation annotations from response output
citations: list[dict[str, str]] = []
for item in response.output:
if item.type == "message" and item.content:
for block in item.content:
if hasattr(block, "annotations") and block.annotations:
for annotation in block.annotations:
if getattr(annotation, "type", None) == "url_citation":
citations.append({
"url": getattr(annotation, "url", ""),
"title": getattr(annotation, "title", ""),
})
return SentimentData(
ticker=ticker,
classification=classification,
confidence=confidence,
source="openai",
timestamp=datetime.now(timezone.utc),
reasoning=reasoning,
citations=citations,
)
except json.JSONDecodeError as exc:
logger.error("Failed to parse OpenAI JSON for %s: %s — raw: %s", ticker, exc, raw_text)
raise ProviderError(f"Invalid JSON from OpenAI for {ticker}") from exc
except ProviderError:
raise
except Exception as exc:
msg = str(exc).lower()
if "429" in msg or "rate" in msg or "quota" in msg:
raise RateLimitError(f"OpenAI rate limit hit for {ticker}") from exc
logger.error("OpenAI provider error for %s: %s", ticker, exc)
raise ProviderError(f"OpenAI provider error for {ticker}: {exc}") from exc

View File

@@ -7,7 +7,7 @@ transfer data between providers and the service layer.
from __future__ import annotations
from dataclasses import dataclass
from dataclasses import dataclass, field
from datetime import date, datetime
from typing import Protocol
@@ -39,6 +39,8 @@ class SentimentData:
confidence: int # 0-100
source: str
timestamp: datetime
reasoning: str = ""
citations: list[dict[str, str]] = field(default_factory=list) # [{"url": ..., "title": ...}]
@dataclass(frozen=True, slots=True)
@@ -51,6 +53,7 @@ class FundamentalData:
earnings_surprise: float | None
market_cap: float | None
fetched_at: datetime
unavailable_fields: dict[str, str] = field(default_factory=dict)
# ---------------------------------------------------------------------------

View File

@@ -1,5 +1,7 @@
"""Fundamentals router — fundamental data endpoints."""
import json
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
@@ -11,6 +13,17 @@ from app.services.fundamental_service import get_fundamental
router = APIRouter(tags=["fundamentals"])
def _parse_unavailable_fields(raw_json: str) -> dict[str, str]:
"""Deserialize unavailable_fields_json, defaulting to {} on invalid JSON."""
try:
parsed = json.loads(raw_json)
except (json.JSONDecodeError, TypeError):
return {}
if not isinstance(parsed, dict):
return {}
return {k: v for k, v in parsed.items() if isinstance(k, str) and isinstance(v, str)}
@router.get("/fundamentals/{symbol}", response_model=APIEnvelope)
async def read_fundamentals(
symbol: str,
@@ -30,6 +43,7 @@ async def read_fundamentals(
earnings_surprise=record.earnings_surprise,
market_cap=record.market_cap,
fetched_at=record.fetched_at,
unavailable_fields=_parse_unavailable_fields(record.unavailable_fields_json),
)
return APIEnvelope(status="success", data=data.model_dump())

View File

@@ -19,7 +19,7 @@ from app.exceptions import ProviderError
from app.models.user import User
from app.providers.alpaca import AlpacaOHLCVProvider
from app.providers.fmp import FMPFundamentalProvider
from app.providers.gemini_sentiment import GeminiSentimentProvider
from app.providers.openai_sentiment import OpenAISentimentProvider
from app.schemas.common import APIEnvelope
from app.services import fundamental_service, ingestion_service, sentiment_service
@@ -67,10 +67,10 @@ async def fetch_symbol(
sources["ohlcv"] = {"status": "error", "records": 0, "message": str(exc)}
# --- Sentiment ---
if settings.gemini_api_key:
if settings.openai_api_key:
try:
sent_provider = GeminiSentimentProvider(
settings.gemini_api_key, settings.gemini_model
sent_provider = OpenAISentimentProvider(
settings.openai_api_key, settings.openai_model
)
data = await sent_provider.fetch_sentiment(symbol_upper)
await sentiment_service.store_sentiment(
@@ -80,6 +80,8 @@ async def fetch_symbol(
confidence=data.confidence,
source=data.source,
timestamp=data.timestamp,
reasoning=data.reasoning,
citations=data.citations,
)
sources["sentiment"] = {
"status": "ok",
@@ -93,7 +95,7 @@ async def fetch_symbol(
else:
sources["sentiment"] = {
"status": "skipped",
"message": "Gemini API key not configured",
"message": "OpenAI API key not configured",
}
# --- Fundamentals ---
@@ -108,6 +110,7 @@ async def fetch_symbol(
revenue_growth=fdata.revenue_growth,
earnings_surprise=fdata.earnings_surprise,
market_cap=fdata.market_cap,
unavailable_fields=fdata.unavailable_fields,
)
sources["fundamentals"] = {"status": "ok", "message": None}
except Exception as exc:

View File

@@ -6,14 +6,41 @@ from sqlalchemy.ext.asyncio import AsyncSession
from app.dependencies import get_db, require_access
from app.schemas.common import APIEnvelope
from app.schemas.score import (
CompositeBreakdownResponse,
DimensionScoreResponse,
RankingEntry,
RankingResponse,
ScoreBreakdownResponse,
ScoreResponse,
SubScoreResponse,
WeightUpdateRequest,
)
from app.services.scoring_service import get_rankings, get_score, update_weights
def _map_breakdown(raw: dict | None) -> ScoreBreakdownResponse | None:
"""Convert a raw breakdown dict from the scoring service into a Pydantic model."""
if raw is None:
return None
return ScoreBreakdownResponse(
sub_scores=[SubScoreResponse(**s) for s in raw.get("sub_scores", [])],
formula=raw.get("formula", ""),
unavailable=raw.get("unavailable", []),
)
def _map_composite_breakdown(raw: dict | None) -> CompositeBreakdownResponse | None:
"""Convert a raw composite breakdown dict into a Pydantic model."""
if raw is None:
return None
return CompositeBreakdownResponse(
weights=raw["weights"],
available_dimensions=raw["available_dimensions"],
missing_dimensions=raw["missing_dimensions"],
renormalized_weights=raw["renormalized_weights"],
formula=raw["formula"],
)
router = APIRouter(tags=["scores"])
@@ -32,10 +59,20 @@ async def read_score(
composite_stale=result["composite_stale"],
weights=result["weights"],
dimensions=[
DimensionScoreResponse(**d) for d in result["dimensions"]
DimensionScoreResponse(
dimension=d["dimension"],
score=d["score"],
is_stale=d["is_stale"],
computed_at=d.get("computed_at"),
breakdown=_map_breakdown(d.get("breakdown")),
)
for d in result["dimensions"]
],
missing_dimensions=result["missing_dimensions"],
computed_at=result["computed_at"],
composite_breakdown=_map_composite_breakdown(
result.get("composite_breakdown")
),
)
return APIEnvelope(status="success", data=data.model_dump(mode="json"))

View File

@@ -1,11 +1,13 @@
"""Sentiment router — sentiment data endpoints."""
import json
from fastapi import APIRouter, Depends, Query
from sqlalchemy.ext.asyncio import AsyncSession
from app.dependencies import get_db, require_access
from app.schemas.common import APIEnvelope
from app.schemas.sentiment import SentimentResponse, SentimentScoreResult
from app.schemas.sentiment import CitationItem, SentimentResponse, SentimentScoreResult
from app.services.sentiment_service import (
compute_sentiment_dimension_score,
get_sentiment_scores,
@@ -14,6 +16,17 @@ from app.services.sentiment_service import (
router = APIRouter(tags=["sentiment"])
def _parse_citations(citations_json: str) -> list[CitationItem]:
"""Deserialize citations_json, defaulting to [] on invalid JSON."""
try:
raw = json.loads(citations_json)
except (json.JSONDecodeError, TypeError):
return []
if not isinstance(raw, list):
return []
return [CitationItem(**item) for item in raw if isinstance(item, dict)]
@router.get("/sentiment/{symbol}", response_model=APIEnvelope)
async def read_sentiment(
symbol: str,
@@ -36,6 +49,8 @@ async def read_sentiment(
confidence=s.confidence,
source=s.source,
timestamp=s.timestamp,
reasoning=s.reasoning,
citations=_parse_citations(s.citations_json),
)
for s in scores
],

View File

@@ -5,8 +5,9 @@ from sqlalchemy.ext.asyncio import AsyncSession
from app.dependencies import get_db, require_access
from app.schemas.common import APIEnvelope
from app.schemas.sr_level import SRLevelResponse, SRLevelResult
from app.services.sr_service import get_sr_levels
from app.schemas.sr_level import SRLevelResponse, SRLevelResult, SRZoneResult
from app.services.price_service import query_ohlcv
from app.services.sr_service import cluster_sr_zones, get_sr_levels
router = APIRouter(tags=["sr-levels"])
@@ -15,24 +16,55 @@ router = APIRouter(tags=["sr-levels"])
async def read_sr_levels(
symbol: str,
tolerance: float = Query(0.005, ge=0, le=0.1, description="Merge tolerance (default 0.5%)"),
max_zones: int = Query(6, ge=0, description="Max S/R zones to return (default 6)"),
_user=Depends(require_access),
db: AsyncSession = Depends(get_db),
) -> APIEnvelope:
"""Get support/resistance levels for a symbol, sorted by strength descending."""
levels = await get_sr_levels(db, symbol, tolerance)
level_results = [
SRLevelResult(
id=lvl.id,
price_level=lvl.price_level,
type=lvl.type,
strength=lvl.strength,
detection_method=lvl.detection_method,
created_at=lvl.created_at,
)
for lvl in levels
]
# Compute S/R zones from the fetched levels
zones: list[SRZoneResult] = []
if levels and max_zones > 0:
# Get current price from latest OHLCV close
ohlcv_records = await query_ohlcv(db, symbol)
if ohlcv_records:
current_price = ohlcv_records[-1].close
level_dicts = [
{"price_level": lvl.price_level, "strength": lvl.strength}
for lvl in levels
]
raw_zones = cluster_sr_zones(
level_dicts, current_price, tolerance=0.02, max_zones=max_zones
)
zones = [SRZoneResult(**z) for z in raw_zones]
# Filter levels to only those within at least one zone's [low, high] range
visible_levels: list[SRLevelResult] = []
if zones:
visible_levels = [
lvl
for lvl in level_results
if any(z.low <= lvl.price_level <= z.high for z in zones)
]
data = SRLevelResponse(
symbol=symbol.upper(),
levels=[
SRLevelResult(
id=lvl.id,
price_level=lvl.price_level,
type=lvl.type,
strength=lvl.strength,
detection_method=lvl.detection_method,
created_at=lvl.created_at,
)
for lvl in levels
],
levels=level_results,
zones=zones,
visible_levels=visible_levels,
count=len(levels),
)
return APIEnvelope(status="success", data=data.model_dump())

View File

@@ -27,7 +27,7 @@ from app.models.settings import SystemSetting
from app.models.ticker import Ticker
from app.providers.alpaca import AlpacaOHLCVProvider
from app.providers.fmp import FMPFundamentalProvider
from app.providers.gemini_sentiment import GeminiSentimentProvider
from app.providers.openai_sentiment import OpenAISentimentProvider
from app.services import fundamental_service, ingestion_service, sentiment_service
from app.services.rr_scanner_service import scan_all_tickers
@@ -174,7 +174,7 @@ async def collect_ohlcv() -> None:
async def collect_sentiment() -> None:
"""Fetch sentiment for all tracked tickers via Gemini.
"""Fetch sentiment for all tracked tickers via OpenAI.
Processes each ticker independently. On rate limit, records last
successful ticker for resume.
@@ -194,12 +194,12 @@ async def collect_sentiment() -> None:
symbols = _resume_tickers(symbols, job_name)
if not settings.gemini_api_key:
logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "gemini key not configured"}))
if not settings.openai_api_key:
logger.warning(json.dumps({"event": "job_skipped", "job": job_name, "reason": "openai key not configured"}))
return
try:
provider = GeminiSentimentProvider(settings.gemini_api_key, settings.gemini_model)
provider = OpenAISentimentProvider(settings.openai_api_key, settings.openai_model)
except Exception as exc:
logger.error(json.dumps({"event": "job_error", "job": job_name, "error_type": type(exc).__name__, "message": str(exc)}))
return
@@ -217,6 +217,8 @@ async def collect_sentiment() -> None:
confidence=data.confidence,
source=data.source,
timestamp=data.timestamp,
reasoning=data.reasoning,
citations=data.citations,
)
_last_successful[job_name] = symbol
processed += 1
@@ -292,6 +294,7 @@ async def collect_fundamentals() -> None:
revenue_growth=data.revenue_growth,
earnings_surprise=data.earnings_surprise,
market_cap=data.market_cap,
unavailable_fields=data.unavailable_fields,
)
_last_successful[job_name] = symbol
processed += 1

View File

@@ -16,3 +16,4 @@ class FundamentalResponse(BaseModel):
earnings_surprise: float | None = None
market_cap: float | None = None
fetched_at: datetime | None = None
unavailable_fields: dict[str, str] = {}

View File

@@ -7,6 +7,34 @@ from datetime import datetime
from pydantic import BaseModel, Field
class SubScoreResponse(BaseModel):
"""A single sub-score within a dimension breakdown."""
name: str
score: float
weight: float
raw_value: float | str | None = None
description: str = ""
class ScoreBreakdownResponse(BaseModel):
"""Breakdown of a dimension score into sub-scores."""
sub_scores: list[SubScoreResponse]
formula: str
unavailable: list[dict[str, str]] = []
class CompositeBreakdownResponse(BaseModel):
"""Breakdown of the composite score showing dimension weights and re-normalization."""
weights: dict[str, float]
available_dimensions: list[str]
missing_dimensions: list[str]
renormalized_weights: dict[str, float]
formula: str
class DimensionScoreResponse(BaseModel):
"""A single dimension score."""
@@ -14,6 +42,7 @@ class DimensionScoreResponse(BaseModel):
score: float
is_stale: bool
computed_at: datetime | None = None
breakdown: ScoreBreakdownResponse | None = None
class ScoreResponse(BaseModel):
@@ -26,6 +55,7 @@ class ScoreResponse(BaseModel):
dimensions: list[DimensionScoreResponse] = []
missing_dimensions: list[str] = []
computed_at: datetime | None = None
composite_breakdown: CompositeBreakdownResponse | None = None
class WeightUpdateRequest(BaseModel):

View File

@@ -8,6 +8,13 @@ from typing import Literal
from pydantic import BaseModel, Field
class CitationItem(BaseModel):
"""A single citation from the sentiment analysis."""
url: str
title: str
class SentimentScoreResult(BaseModel):
"""A single sentiment score record."""
@@ -16,6 +23,8 @@ class SentimentScoreResult(BaseModel):
confidence: int = Field(ge=0, le=100)
source: str
timestamp: datetime
reasoning: str = ""
citations: list[CitationItem] = []
class SentimentResponse(BaseModel):

View File

@@ -19,9 +19,22 @@ class SRLevelResult(BaseModel):
created_at: datetime
class SRZoneResult(BaseModel):
"""A clustered S/R zone spanning a price range."""
low: float
high: float
midpoint: float
strength: int = Field(ge=0, le=100)
type: Literal["support", "resistance"]
level_count: int
class SRLevelResponse(BaseModel):
"""Envelope-ready S/R levels response."""
symbol: str
levels: list[SRLevelResult]
zones: list[SRZoneResult] = []
visible_levels: list[SRLevelResult] = []
count: int

View File

@@ -6,6 +6,7 @@ and marks the fundamental dimension score as stale on new data.
from __future__ import annotations
import json
import logging
from datetime import datetime, timezone
@@ -37,6 +38,7 @@ async def store_fundamental(
revenue_growth: float | None = None,
earnings_surprise: float | None = None,
market_cap: float | None = None,
unavailable_fields: dict[str, str] | None = None,
) -> FundamentalData:
"""Store or update fundamental data for a ticker.
@@ -52,6 +54,7 @@ async def store_fundamental(
existing = result.scalar_one_or_none()
now = datetime.now(timezone.utc)
unavailable_fields_json = json.dumps(unavailable_fields or {})
if existing is not None:
existing.pe_ratio = pe_ratio
@@ -59,6 +62,7 @@ async def store_fundamental(
existing.earnings_surprise = earnings_surprise
existing.market_cap = market_cap
existing.fetched_at = now
existing.unavailable_fields_json = unavailable_fields_json
record = existing
else:
record = FundamentalData(
@@ -68,6 +72,7 @@ async def store_fundamental(
earnings_surprise=earnings_surprise,
market_cap=market_cap,
fetched_at=now,
unavailable_fields_json=unavailable_fields_json,
)
db.add(record)

View File

@@ -34,10 +34,32 @@ async def _get_ticker(db: AsyncSession, symbol: str) -> Ticker:
return ticker
def _compute_quality_score(
rr: float,
strength: int,
distance: float,
entry_price: float,
*,
w_rr: float = 0.35,
w_strength: float = 0.35,
w_proximity: float = 0.30,
rr_cap: float = 10.0,
) -> float:
"""Compute a quality score for a candidate S/R level.
Combines normalized R:R ratio, level strength, and proximity to entry
into a single 01 score using configurable weights.
"""
norm_rr = min(rr / rr_cap, 1.0)
norm_strength = strength / 100.0
norm_proximity = 1.0 - min(distance / entry_price, 1.0)
return w_rr * norm_rr + w_strength * norm_strength + w_proximity * norm_proximity
async def scan_ticker(
db: AsyncSession,
symbol: str,
rr_threshold: float = 3.0,
rr_threshold: float = 1.5,
atr_multiplier: float = 1.5,
) -> list[TradeSetup]:
"""Scan a single ticker for trade setups meeting the R:R threshold.
@@ -120,41 +142,65 @@ async def scan_ticker(
setups: list[TradeSetup] = []
# Long setup: target = nearest SR above, stop = entry - ATR × multiplier
# Check all resistance levels above and pick the one with the best quality score
if levels_above:
target = levels_above[0].price_level
stop = entry_price - (atr_value * atr_multiplier)
reward = target - entry_price
risk = entry_price - stop
if risk > 0 and reward > 0:
rr = reward / risk
if rr >= rr_threshold:
if risk > 0:
best_quality = 0.0
best_candidate_rr = 0.0
best_candidate_target = 0.0
for lv in levels_above:
reward = lv.price_level - entry_price
if reward > 0:
rr = reward / risk
if rr >= rr_threshold:
distance = lv.price_level - entry_price
quality = _compute_quality_score(rr, lv.strength, distance, entry_price)
if quality > best_quality:
best_quality = quality
best_candidate_rr = rr
best_candidate_target = lv.price_level
if best_candidate_rr > 0:
setups.append(TradeSetup(
ticker_id=ticker.id,
direction="long",
entry_price=round(entry_price, 4),
stop_loss=round(stop, 4),
target=round(target, 4),
rr_ratio=round(rr, 4),
target=round(best_candidate_target, 4),
rr_ratio=round(best_candidate_rr, 4),
composite_score=round(composite_score, 4),
detected_at=now,
))
# Short setup: target = nearest SR below, stop = entry + ATR × multiplier
# Check all support levels below and pick the one with the best quality score
if levels_below:
target = levels_below[0].price_level
stop = entry_price + (atr_value * atr_multiplier)
reward = entry_price - target
risk = stop - entry_price
if risk > 0 and reward > 0:
rr = reward / risk
if rr >= rr_threshold:
if risk > 0:
best_quality = 0.0
best_candidate_rr = 0.0
best_candidate_target = 0.0
for lv in levels_below:
reward = entry_price - lv.price_level
if reward > 0:
rr = reward / risk
if rr >= rr_threshold:
distance = entry_price - lv.price_level
quality = _compute_quality_score(rr, lv.strength, distance, entry_price)
if quality > best_quality:
best_quality = quality
best_candidate_rr = rr
best_candidate_target = lv.price_level
if best_candidate_rr > 0:
setups.append(TradeSetup(
ticker_id=ticker.id,
direction="short",
entry_price=round(entry_price, 4),
stop_loss=round(stop, 4),
target=round(target, 4),
rr_ratio=round(rr, 4),
target=round(best_candidate_target, 4),
rr_ratio=round(best_candidate_rr, 4),
composite_score=round(composite_score, 4),
detected_at=now,
))
@@ -177,7 +223,7 @@ async def scan_ticker(
async def scan_all_tickers(
db: AsyncSession,
rr_threshold: float = 3.0,
rr_threshold: float = 1.5,
atr_multiplier: float = 1.5,
) -> list[TradeSetup]:
"""Scan all tracked tickers for trade setups.

View File

@@ -85,8 +85,14 @@ async def _save_weights(db: AsyncSession, weights: dict[str, float]) -> None:
# Dimension score computation
# ---------------------------------------------------------------------------
async def _compute_technical_score(db: AsyncSession, symbol: str) -> float | None:
"""Compute technical dimension score from ADX, EMA, RSI."""
async def _compute_technical_score(
db: AsyncSession, symbol: str
) -> tuple[float | None, dict | None]:
"""Compute technical dimension score from ADX, EMA, RSI.
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
TypedDict shape: {sub_scores, formula, unavailable}.
"""
from app.services.indicator_service import (
compute_adx,
compute_ema,
@@ -97,147 +103,366 @@ async def _compute_technical_score(db: AsyncSession, symbol: str) -> float | Non
records = await query_ohlcv(db, symbol)
if not records:
return None
return None, None
_, highs, lows, closes, _ = _extract_ohlcv(records)
scores: list[tuple[float, float]] = [] # (weight, score)
sub_scores: list[dict] = []
unavailable: list[dict[str, str]] = []
# ADX (weight 0.4) — needs 28+ bars
try:
adx_result = compute_adx(highs, lows, closes)
scores.append((0.4, adx_result["score"]))
except Exception:
pass
sub_scores.append({
"name": "ADX",
"score": adx_result["score"],
"weight": 0.4,
"raw_value": adx_result["adx"],
"description": "ADX value (0-100). Higher = stronger trend.",
})
except Exception as exc:
unavailable.append({"name": "ADX", "reason": str(exc) or "Insufficient data for ADX"})
# EMA (weight 0.3) — needs period+1 bars
try:
ema_result = compute_ema(closes)
pct_diff = (
round(
(ema_result["latest_close"] - ema_result["ema"])
/ ema_result["ema"]
* 100.0,
4,
)
if ema_result["ema"] != 0
else 0.0
)
scores.append((0.3, ema_result["score"]))
except Exception:
pass
sub_scores.append({
"name": "EMA",
"score": ema_result["score"],
"weight": 0.3,
"raw_value": pct_diff,
"description": f"Price {pct_diff}% {'above' if pct_diff >= 0 else 'below'} EMA(20). Score: 50 + pct_diff * 10.",
})
except Exception as exc:
unavailable.append({"name": "EMA", "reason": str(exc) or "Insufficient data for EMA"})
# RSI (weight 0.3) — needs 15+ bars
try:
rsi_result = compute_rsi(closes)
scores.append((0.3, rsi_result["score"]))
except Exception:
pass
sub_scores.append({
"name": "RSI",
"score": rsi_result["score"],
"weight": 0.3,
"raw_value": rsi_result["rsi"],
"description": "RSI(14) value. Score equals RSI.",
})
except Exception as exc:
unavailable.append({"name": "RSI", "reason": str(exc) or "Insufficient data for RSI"})
if not scores:
return None
breakdown: dict = {
"sub_scores": [],
"formula": "Weighted average: 0.4*ADX + 0.3*EMA + 0.3*RSI, re-normalized if any sub-score unavailable.",
"unavailable": unavailable,
}
return None, breakdown
total_weight = sum(w for w, _ in scores)
if total_weight == 0:
return None
return None, None
weighted = sum(w * s for w, s in scores) / total_weight
return max(0.0, min(100.0, weighted))
final_score = max(0.0, min(100.0, weighted))
breakdown = {
"sub_scores": sub_scores,
"formula": "Weighted average: 0.4*ADX + 0.3*EMA + 0.3*RSI, re-normalized if any sub-score unavailable.",
"unavailable": unavailable,
}
return final_score, breakdown
async def _compute_sr_quality_score(db: AsyncSession, symbol: str) -> float | None:
async def _compute_sr_quality_score(
db: AsyncSession, symbol: str
) -> tuple[float | None, dict | None]:
"""Compute S/R quality dimension score.
Based on number of strong levels, proximity to current price, avg strength.
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
TypedDict shape: {sub_scores, formula, unavailable}.
"""
from app.services.price_service import query_ohlcv
from app.services.sr_service import get_sr_levels
formula = "Sum of sub-scores: Strong Count (max 40) + Proximity (max 30) + Avg Strength (max 30), clamped to [0, 100]."
records = await query_ohlcv(db, symbol)
if not records:
return None
return None, None
current_price = float(records[-1].close)
if current_price <= 0:
return None
return None, None
try:
levels = await get_sr_levels(db, symbol)
except Exception:
return None
return None, None
if not levels:
return None
return None, None
sub_scores: list[dict] = []
# Factor 1: Number of strong levels (strength >= 50) — max 40 pts
strong_count = sum(1 for lv in levels if lv.strength >= 50)
count_score = min(40.0, strong_count * 10.0)
sub_scores.append({
"name": "Strong Count",
"score": count_score,
"weight": 40.0,
"raw_value": strong_count,
"description": f"{strong_count} strong level(s) (strength >= 50). Score: min(40, count * 10).",
})
# Factor 2: Proximity of nearest level to current price — max 30 pts
distances = [
abs(lv.price_level - current_price) / current_price for lv in levels
]
nearest_dist = min(distances) if distances else 1.0
nearest_dist_pct = round(nearest_dist * 100.0, 4)
# Closer = higher score. 0% distance = 30, 5%+ = 0
proximity_score = max(0.0, min(30.0, 30.0 * (1.0 - nearest_dist / 0.05)))
sub_scores.append({
"name": "Proximity",
"score": proximity_score,
"weight": 30.0,
"raw_value": nearest_dist_pct,
"description": f"Nearest S/R level is {nearest_dist_pct}% from price. Score: 30 * (1 - dist/5%), clamped to [0, 30].",
})
# Factor 3: Average strength — max 30 pts
avg_strength = sum(lv.strength for lv in levels) / len(levels)
strength_score = min(30.0, avg_strength * 0.3)
sub_scores.append({
"name": "Avg Strength",
"score": strength_score,
"weight": 30.0,
"raw_value": round(avg_strength, 4),
"description": f"Average level strength: {round(avg_strength, 2)}. Score: min(30, avg * 0.3).",
})
total = count_score + proximity_score + strength_score
return max(0.0, min(100.0, total))
final_score = max(0.0, min(100.0, total))
breakdown: dict = {
"sub_scores": sub_scores,
"formula": formula,
"unavailable": [],
}
return final_score, breakdown
async def _compute_sentiment_score(db: AsyncSession, symbol: str) -> float | None:
"""Compute sentiment dimension score via sentiment service."""
from app.services.sentiment_service import compute_sentiment_dimension_score
async def _compute_sentiment_score(
db: AsyncSession, symbol: str
) -> tuple[float | None, dict | None]:
"""Compute sentiment dimension score via sentiment service.
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
TypedDict shape: {sub_scores, formula, unavailable}.
"""
from app.services.sentiment_service import (
compute_sentiment_dimension_score,
get_sentiment_scores,
)
lookback_hours: float = 24
decay_rate: float = 0.1
try:
return await compute_sentiment_dimension_score(db, symbol)
scores = await get_sentiment_scores(db, symbol, lookback_hours)
except Exception:
return None
return None, None
if not scores:
breakdown: dict = {
"sub_scores": [],
"formula": (
f"Time-decay weighted average over {lookback_hours}h window "
f"with decay_rate={decay_rate}: "
"sum(base_score * exp(-decay_rate * hours_since)) / sum(exp(-decay_rate * hours_since))"
),
"unavailable": [
{"name": "sentiment_records", "reason": "No sentiment records in lookback window"}
],
}
return None, breakdown
try:
score = await compute_sentiment_dimension_score(db, symbol, lookback_hours, decay_rate)
except Exception:
return None, None
sub_scores: list[dict] = [
{
"name": "record_count",
"score": score if score is not None else 0.0,
"weight": 1.0,
"raw_value": len(scores),
"description": f"Number of sentiment records used in the lookback window ({lookback_hours}h).",
},
{
"name": "decay_rate",
"score": score if score is not None else 0.0,
"weight": 1.0,
"raw_value": decay_rate,
"description": "Exponential decay rate applied to older records (higher = faster decay).",
},
{
"name": "lookback_window",
"score": score if score is not None else 0.0,
"weight": 1.0,
"raw_value": lookback_hours,
"description": f"Lookback window in hours for sentiment records ({lookback_hours}h).",
},
]
formula = (
f"Time-decay weighted average over {lookback_hours}h window "
f"with decay_rate={decay_rate}: "
"sum(base_score * exp(-decay_rate * hours_since)) / sum(exp(-decay_rate * hours_since))"
)
breakdown = {
"sub_scores": sub_scores,
"formula": formula,
"unavailable": [],
}
return score, breakdown
async def _compute_fundamental_score(db: AsyncSession, symbol: str) -> float | None:
async def _compute_fundamental_score(
db: AsyncSession, symbol: str
) -> tuple[float | None, dict | None]:
"""Compute fundamental dimension score.
Normalized composite of P/E (lower is better), revenue growth
(higher is better), earnings surprise (higher is better).
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
TypedDict shape: {sub_scores, formula, unavailable}.
"""
from app.services.fundamental_service import get_fundamental
fund = await get_fundamental(db, symbol)
if fund is None:
return None
return None, None
weight = 1.0 / 3.0
scores: list[float] = []
sub_scores: list[dict] = []
unavailable: list[dict[str, str]] = []
formula = (
"Equal-weighted average of available sub-scores: "
"(PE_Ratio + Revenue_Growth + Earnings_Surprise) / count. "
"PE: 100 - (pe - 15) * (100/30), clamped [0,100]. "
"Revenue Growth: 50 + growth% * 2.5, clamped [0,100]. "
"Earnings Surprise: 50 + surprise% * 5.0, clamped [0,100]."
)
# P/E: lower is better. 0-15 = 100, 15-30 = 50-100, 30+ = 0-50
if fund.pe_ratio is not None and fund.pe_ratio > 0:
pe_score = max(0.0, min(100.0, 100.0 - (fund.pe_ratio - 15.0) * (100.0 / 30.0)))
scores.append(pe_score)
sub_scores.append({
"name": "PE Ratio",
"score": pe_score,
"weight": weight,
"raw_value": fund.pe_ratio,
"description": "PE ratio (lower is better). Score: 100 - (pe - 15) * (100/30), clamped [0,100].",
})
else:
unavailable.append({
"name": "PE Ratio",
"reason": "PE ratio not available or not positive",
})
# Revenue growth: higher is better. 0% = 50, 20%+ = 100, -20% = 0
if fund.revenue_growth is not None:
rg_score = max(0.0, min(100.0, 50.0 + fund.revenue_growth * 2.5))
scores.append(rg_score)
sub_scores.append({
"name": "Revenue Growth",
"score": rg_score,
"weight": weight,
"raw_value": fund.revenue_growth,
"description": "Revenue growth %. Score: 50 + growth% * 2.5, clamped [0,100].",
})
else:
unavailable.append({
"name": "Revenue Growth",
"reason": "Revenue growth data not available",
})
# Earnings surprise: higher is better. 0% = 50, 10%+ = 100, -10% = 0
if fund.earnings_surprise is not None:
es_score = max(0.0, min(100.0, 50.0 + fund.earnings_surprise * 5.0))
scores.append(es_score)
sub_scores.append({
"name": "Earnings Surprise",
"score": es_score,
"weight": weight,
"raw_value": fund.earnings_surprise,
"description": "Earnings surprise %. Score: 50 + surprise% * 5.0, clamped [0,100].",
})
else:
unavailable.append({
"name": "Earnings Surprise",
"reason": "Earnings surprise data not available",
})
breakdown: dict = {
"sub_scores": sub_scores,
"formula": formula,
"unavailable": unavailable,
}
if not scores:
return None
return None, breakdown
return sum(scores) / len(scores)
return sum(scores) / len(scores), breakdown
async def _compute_momentum_score(db: AsyncSession, symbol: str) -> float | None:
async def _compute_momentum_score(
db: AsyncSession, symbol: str
) -> tuple[float | None, dict | None]:
"""Compute momentum dimension score.
Rate of change of price over 5-day and 20-day lookback periods.
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
TypedDict shape: {sub_scores, formula, unavailable}.
"""
from app.services.price_service import query_ohlcv
formula = "Weighted average: 0.5 * ROC_5 + 0.5 * ROC_20, re-normalized if any sub-score unavailable."
records = await query_ohlcv(db, symbol)
if not records or len(records) < 6:
return None
return None, None
closes = [float(r.close) for r in records]
latest = closes[-1]
scores: list[tuple[float, float]] = [] # (weight, score)
sub_scores: list[dict] = []
unavailable: list[dict[str, str]] = []
# 5-day ROC (weight 0.5)
if len(closes) >= 6 and closes[-6] > 0:
@@ -245,21 +470,52 @@ async def _compute_momentum_score(db: AsyncSession, symbol: str) -> float | None
# Map: -10% → 0, 0% → 50, +10% → 100
score_5 = max(0.0, min(100.0, 50.0 + roc_5 * 5.0))
scores.append((0.5, score_5))
sub_scores.append({
"name": "5-day ROC",
"score": score_5,
"weight": 0.5,
"raw_value": round(roc_5, 4),
"description": f"5-day rate of change: {round(roc_5, 2)}%. Score: 50 + ROC * 5, clamped to [0, 100].",
})
else:
unavailable.append({"name": "5-day ROC", "reason": "Need at least 6 closing prices"})
# 20-day ROC (weight 0.5)
if len(closes) >= 21 and closes[-21] > 0:
roc_20 = (latest - closes[-21]) / closes[-21] * 100.0
score_20 = max(0.0, min(100.0, 50.0 + roc_20 * 5.0))
scores.append((0.5, score_20))
sub_scores.append({
"name": "20-day ROC",
"score": score_20,
"weight": 0.5,
"raw_value": round(roc_20, 4),
"description": f"20-day rate of change: {round(roc_20, 2)}%. Score: 50 + ROC * 5, clamped to [0, 100].",
})
else:
unavailable.append({"name": "20-day ROC", "reason": "Need at least 21 closing prices"})
if not scores:
return None
breakdown: dict = {
"sub_scores": [],
"formula": formula,
"unavailable": unavailable,
}
return None, breakdown
total_weight = sum(w for w, _ in scores)
if total_weight == 0:
return None
return None, None
weighted = sum(w * s for w, s in scores) / total_weight
return max(0.0, min(100.0, weighted))
final_score = max(0.0, min(100.0, weighted))
breakdown = {
"sub_scores": sub_scores,
"formula": formula,
"unavailable": unavailable,
}
return final_score, breakdown
_DIMENSION_COMPUTERS = {
@@ -289,7 +545,13 @@ async def compute_dimension_score(
)
ticker = await _get_ticker(db, symbol)
score_val = await _DIMENSION_COMPUTERS[dimension](db, symbol)
raw_result = await _DIMENSION_COMPUTERS[dimension](db, symbol)
# Handle both tuple (score, breakdown) and plain float | None returns
if isinstance(raw_result, tuple):
score_val = raw_result[0]
else:
score_val = raw_result
now = datetime.now(timezone.utc)
@@ -406,13 +668,15 @@ async def compute_composite_score(
return composite, missing
async def get_score(
db: AsyncSession, symbol: str
) -> dict:
"""Get composite + all dimension scores for a ticker.
Recomputes stale dimensions on demand, then recomputes composite.
Returns a dict suitable for ScoreResponse.
Returns a dict suitable for ScoreResponse, including dimension breakdowns
and composite breakdown with re-normalization info.
"""
ticker = await _get_ticker(db, symbol)
weights = await _get_weights(db)
@@ -450,19 +714,64 @@ async def get_score(
)
comp = comp_result.scalar_one_or_none()
# Compute breakdowns for each dimension by calling the dimension computers
breakdowns: dict[str, dict | None] = {}
for dim in DIMENSIONS:
try:
raw_result = await _DIMENSION_COMPUTERS[dim](db, symbol)
if isinstance(raw_result, tuple) and len(raw_result) == 2:
breakdowns[dim] = raw_result[1]
else:
breakdowns[dim] = None
except Exception:
breakdowns[dim] = None
# Build dimension entries with breakdowns
dimensions = []
missing = []
available_dims: list[str] = []
for dim in DIMENSIONS:
found = next((ds for ds in dim_scores_list if ds.dimension == dim), None)
if found is not None:
if found is not None and not found.is_stale and found.score is not None:
dimensions.append({
"dimension": found.dimension,
"score": found.score,
"is_stale": found.is_stale,
"computed_at": found.computed_at,
"breakdown": breakdowns.get(dim),
})
w = weights.get(dim, 0.0)
if w > 0:
available_dims.append(dim)
else:
missing.append(dim)
# Still include stale dimensions in the list if they exist in DB
if found is not None:
dimensions.append({
"dimension": found.dimension,
"score": found.score,
"is_stale": found.is_stale,
"computed_at": found.computed_at,
"breakdown": breakdowns.get(dim),
})
# Build composite breakdown with re-normalization info
composite_breakdown = None
available_weight_sum = sum(weights.get(d, 0.0) for d in available_dims)
if available_weight_sum > 0:
renormalized_weights = {
d: weights.get(d, 0.0) / available_weight_sum for d in available_dims
}
else:
renormalized_weights = {}
composite_breakdown = {
"weights": weights,
"available_dimensions": available_dims,
"missing_dimensions": missing,
"renormalized_weights": renormalized_weights,
"formula": "Weighted average of available dimensions with re-normalized weights: sum(weight_i * score_i) / sum(weight_i)",
}
return {
"symbol": ticker.symbol,
@@ -472,9 +781,11 @@ async def get_score(
"dimensions": dimensions,
"missing_dimensions": missing,
"computed_at": comp.computed_at if comp else None,
"composite_breakdown": composite_breakdown,
}
async def get_rankings(db: AsyncSession) -> dict:
"""Get all tickers ranked by composite score descending.

View File

@@ -6,6 +6,7 @@ using a time-decay weighted average over a configurable lookback window.
from __future__ import annotations
import json
import math
from datetime import datetime, timedelta, timezone
@@ -34,6 +35,8 @@ async def store_sentiment(
confidence: int,
source: str,
timestamp: datetime | None = None,
reasoning: str = "",
citations: list[dict] | None = None,
) -> SentimentScore:
"""Store a new sentiment record for a ticker."""
ticker = await _get_ticker(db, symbol)
@@ -41,12 +44,17 @@ async def store_sentiment(
if timestamp is None:
timestamp = datetime.now(timezone.utc)
if citations is None:
citations = []
record = SentimentScore(
ticker_id=ticker.id,
classification=classification,
confidence=confidence,
source=source,
timestamp=timestamp,
reasoning=reasoning,
citations_json=json.dumps(citations),
)
db.add(record)
await db.commit()

View File

@@ -204,6 +204,121 @@ def detect_sr_levels(
return tagged
def cluster_sr_zones(
levels: list[dict],
current_price: float,
tolerance: float = 0.02,
max_zones: int | None = None,
) -> list[dict]:
"""Cluster nearby S/R levels into zones.
Returns list of zone dicts:
{
"low": float,
"high": float,
"midpoint": float,
"strength": int, # sum of constituent strengths, capped at 100
"type": "support" | "resistance",
"level_count": int,
}
"""
if not levels:
return []
if max_zones is not None and max_zones <= 0:
return []
# 1. Sort levels by price_level ascending
sorted_levels = sorted(levels, key=lambda x: x["price_level"])
# 2. Greedy merge into clusters
clusters: list[list[dict]] = []
current_cluster: list[dict] = [sorted_levels[0]]
for level in sorted_levels[1:]:
# Compute current cluster midpoint
prices = [l["price_level"] for l in current_cluster]
cluster_low = min(prices)
cluster_high = max(prices)
cluster_mid = (cluster_low + cluster_high) / 2.0
# Check if within tolerance of cluster midpoint
if cluster_mid != 0:
distance_pct = abs(level["price_level"] - cluster_mid) / cluster_mid
else:
distance_pct = abs(level["price_level"])
if distance_pct <= tolerance:
current_cluster.append(level)
else:
clusters.append(current_cluster)
current_cluster = [level]
clusters.append(current_cluster)
# 3. Compute zone for each cluster
zones: list[dict] = []
for cluster in clusters:
prices = [l["price_level"] for l in cluster]
low = min(prices)
high = max(prices)
midpoint = (low + high) / 2.0
strength = min(100, sum(l["strength"] for l in cluster))
level_count = len(cluster)
# 4. Tag zone type
zone_type = "support" if midpoint < current_price else "resistance"
zones.append({
"low": low,
"high": high,
"midpoint": midpoint,
"strength": strength,
"type": zone_type,
"level_count": level_count,
})
# 5. Split into support and resistance pools, each sorted by strength desc
support_zones = sorted(
[z for z in zones if z["type"] == "support"],
key=lambda z: z["strength"],
reverse=True,
)
resistance_zones = sorted(
[z for z in zones if z["type"] == "resistance"],
key=lambda z: z["strength"],
reverse=True,
)
# 6. Interleave pick: alternate strongest from each pool
selected: list[dict] = []
limit = max_zones if max_zones is not None else len(zones)
si, ri = 0, 0
pick_support = True # start with support pool
while len(selected) < limit and (si < len(support_zones) or ri < len(resistance_zones)):
if pick_support:
if si < len(support_zones):
selected.append(support_zones[si])
si += 1
elif ri < len(resistance_zones):
selected.append(resistance_zones[ri])
ri += 1
else:
if ri < len(resistance_zones):
selected.append(resistance_zones[ri])
ri += 1
elif si < len(support_zones):
selected.append(support_zones[si])
si += 1
pick_support = not pick_support
# 7. Sort final selection by strength descending
selected.sort(key=lambda z: z["strength"], reverse=True)
return selected
async def recalculate_sr_levels(
db: AsyncSession,

View File

@@ -21,7 +21,7 @@ export class ApiError extends Error {
*/
const apiClient = axios.create({
baseURL: '/api/v1/',
timeout: 30_000,
timeout: 120_000,
headers: { 'Content-Type': 'application/json' },
});

View File

@@ -13,6 +13,6 @@ export function getRankings() {
export function updateWeights(weights: Record<string, number>) {
return apiClient
.put<{ message: string }>('scores/weights', weights)
.put<{ message: string }>('scores/weights', { weights })
.then((r) => r.data);
}

View File

@@ -1,11 +1,13 @@
import { useMemo, useRef, useEffect, useCallback } from 'react';
import type { OHLCVBar, SRLevel } from '../../lib/types';
import { useMemo, useRef, useEffect, useCallback, useState } from 'react';
import type { OHLCVBar, SRLevel, SRZone, TradeSetup } from '../../lib/types';
import { formatPrice, formatDate } from '../../lib/format';
interface CandlestickChartProps {
data: OHLCVBar[];
srLevels?: SRLevel[];
maxSRLevels?: number;
zones?: SRZone[];
tradeSetup?: TradeSetup;
}
function filterTopSRLevels(levels: SRLevel[], max: number): SRLevel[] {
@@ -20,12 +22,29 @@ interface TooltipState {
bar: OHLCVBar | null;
}
export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: CandlestickChartProps) {
const MIN_VISIBLE_BARS = 10;
export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6, zones = [], tradeSetup }: CandlestickChartProps) {
const canvasRef = useRef<HTMLCanvasElement>(null);
const overlayCanvasRef = useRef<HTMLCanvasElement>(null);
const containerRef = useRef<HTMLDivElement>(null);
const tooltipRef = useRef<HTMLDivElement>(null);
const tooltipState = useRef<TooltipState>({ visible: false, x: 0, y: 0, bar: null });
const crosshairRef = useRef<{ x: number; y: number } | null>(null);
const animFrame = useRef<number>(0);
const zoomFrame = useRef<number>(0);
const isPanningRef = useRef<boolean>(false);
const panStartXRef = useRef<number>(0);
const [visibleRange, setVisibleRange] = useState<{ start: number; end: number }>({
start: 0,
end: data.length,
});
// Reset visible range when data changes
useEffect(() => {
setVisibleRange({ start: 0, end: data.length });
}, [data]);
const topLevels = useMemo(() => filterTopSRLevels(srLevels, maxSRLevels), [srLevels, maxSRLevels]);
@@ -34,6 +53,12 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
const container = containerRef.current;
if (!canvas || !container || data.length === 0) return;
// Clamp visible range to valid bounds
const start = Math.max(0, visibleRange.start);
const end = Math.min(data.length, visibleRange.end);
const visibleData = data.slice(start, end);
if (visibleData.length === 0) return;
const dpr = window.devicePixelRatio || 1;
const rect = container.getBoundingClientRect();
const W = rect.width;
@@ -54,10 +79,12 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
const cw = W - ml - mr;
const ch = H - mt - mb;
// Price range
const allPrices = data.flatMap((b) => [b.high, b.low]);
// Price range from visible data
const allPrices = visibleData.flatMap((b) => [b.high, b.low]);
const srPrices = topLevels.map((l) => l.price_level);
const allVals = [...allPrices, ...srPrices];
const zonePrices = zones.flatMap((z) => [z.low, z.high]);
const tradePrices = tradeSetup ? [tradeSetup.entry_price, tradeSetup.stop_loss, tradeSetup.target] : [];
const allVals = [...allPrices, ...srPrices, ...zonePrices, ...tradePrices];
const minP = Math.min(...allVals);
const maxP = Math.max(...allVals);
const pad = (maxP - minP) * 0.06 || 1;
@@ -65,7 +92,7 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
const hi = maxP + pad;
const yScale = (v: number) => mt + ch - ((v - lo) / (hi - lo)) * ch;
const barW = cw / data.length;
const barW = cw / visibleData.length;
const candleW = Math.max(barW * 0.65, 1);
// Grid lines (horizontal)
@@ -87,43 +114,142 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
// X-axis labels
ctx.textAlign = 'center';
const labelInterval = Math.max(Math.floor(data.length / 8), 1);
for (let i = 0; i < data.length; i += labelInterval) {
const labelInterval = Math.max(Math.floor(visibleData.length / 8), 1);
for (let i = 0; i < visibleData.length; i += labelInterval) {
const x = ml + i * barW + barW / 2;
ctx.fillStyle = '#6b7280';
ctx.fillText(formatDate(data[i].date), x, H - 6);
ctx.fillText(formatDate(visibleData[i].date), x, H - 6);
}
// S/R levels
topLevels.forEach((level) => {
const y = yScale(level.price_level);
const isSupport = level.type === 'support';
const color = isSupport ? '#10b981' : '#ef4444';
// S/R levels (only when no zones are provided)
if (zones.length === 0) {
topLevels.forEach((level) => {
const y = yScale(level.price_level);
const isSupport = level.type === 'support';
const color = isSupport ? '#10b981' : '#ef4444';
ctx.strokeStyle = color;
ctx.lineWidth = 1.5;
ctx.globalAlpha = 0.55;
ctx.setLineDash([6, 3]);
ctx.strokeStyle = color;
ctx.lineWidth = 1.5;
ctx.globalAlpha = 0.55;
ctx.setLineDash([6, 3]);
ctx.beginPath();
ctx.moveTo(ml, y);
ctx.lineTo(ml + cw, y);
ctx.stroke();
ctx.setLineDash([]);
ctx.globalAlpha = 1;
// Label
ctx.fillStyle = color;
ctx.font = '10px Inter, system-ui, sans-serif';
ctx.textAlign = 'left';
ctx.fillText(
`${level.type[0].toUpperCase()} ${formatPrice(level.price_level)}`,
ml + cw + 4,
y + 3
);
});
}
// S/R Zone rectangles (drawn before candles so candles render on top)
zones.forEach((zone) => {
const isSupport = zone.type === 'support';
const fillColor = isSupport ? 'rgba(16, 185, 129, 0.15)' : 'rgba(239, 68, 68, 0.15)';
const borderColor = isSupport ? 'rgba(16, 185, 129, 0.35)' : 'rgba(239, 68, 68, 0.35)';
const labelColor = isSupport ? '#10b981' : '#ef4444';
const yTop = yScale(zone.high);
const yBottom = yScale(zone.low);
// Handle single-level zones (low == high) as thin 2px-height rectangles
const rectHeight = Math.max(yBottom - yTop, 2);
// Shaded rectangle spanning full chart width
ctx.fillStyle = fillColor;
ctx.fillRect(ml, yTop, cw, rectHeight);
// Border lines at top and bottom of zone
ctx.strokeStyle = borderColor;
ctx.lineWidth = 1;
ctx.beginPath();
ctx.moveTo(ml, y);
ctx.lineTo(ml + cw, y);
ctx.moveTo(ml, yTop);
ctx.lineTo(ml + cw, yTop);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(ml, yTop + rectHeight);
ctx.lineTo(ml + cw, yTop + rectHeight);
ctx.stroke();
ctx.setLineDash([]);
ctx.globalAlpha = 1;
// Label
ctx.fillStyle = color;
// Label with midpoint price and strength score
const yMid = yTop + rectHeight / 2;
ctx.fillStyle = labelColor;
ctx.font = '10px Inter, system-ui, sans-serif';
ctx.textAlign = 'left';
ctx.fillText(
`${level.type[0].toUpperCase()} ${formatPrice(level.price_level)}`,
`${zone.type[0].toUpperCase()} ${formatPrice(zone.midpoint)} (${zone.strength})`,
ml + cw + 4,
y + 3
yMid + 3
);
});
// Trade setup overlay (drawn before candles so candles render on top)
if (tradeSetup) {
const entryY = yScale(tradeSetup.entry_price);
const stopY = yScale(tradeSetup.stop_loss);
const targetY = yScale(tradeSetup.target);
// Stop-loss zone: red semi-transparent rectangle between entry and stop-loss
const slTop = Math.min(entryY, stopY);
const slHeight = Math.max(Math.abs(stopY - entryY), 1);
ctx.fillStyle = 'rgba(239, 68, 68, 0.13)';
ctx.fillRect(ml, slTop, cw, slHeight);
// Stop-loss border
ctx.strokeStyle = 'rgba(239, 68, 68, 0.4)';
ctx.lineWidth = 1;
ctx.setLineDash([4, 3]);
ctx.beginPath();
ctx.moveTo(ml, stopY);
ctx.lineTo(ml + cw, stopY);
ctx.stroke();
ctx.setLineDash([]);
// Take-profit zone: green semi-transparent rectangle between entry and target
const tpTop = Math.min(entryY, targetY);
const tpHeight = Math.max(Math.abs(targetY - entryY), 1);
ctx.fillStyle = 'rgba(16, 185, 129, 0.13)';
ctx.fillRect(ml, tpTop, cw, tpHeight);
// Target border
ctx.strokeStyle = 'rgba(16, 185, 129, 0.4)';
ctx.lineWidth = 1;
ctx.setLineDash([4, 3]);
ctx.beginPath();
ctx.moveTo(ml, targetY);
ctx.lineTo(ml + cw, targetY);
ctx.stroke();
ctx.setLineDash([]);
// Entry price: dashed horizontal line (blue/white)
ctx.strokeStyle = 'rgba(96, 165, 250, 0.9)';
ctx.lineWidth = 1.5;
ctx.setLineDash([6, 4]);
ctx.beginPath();
ctx.moveTo(ml, entryY);
ctx.lineTo(ml + cw, entryY);
ctx.stroke();
ctx.setLineDash([]);
// Labels on right side
ctx.font = '10px Inter, system-ui, sans-serif';
ctx.textAlign = 'left';
ctx.fillStyle = 'rgba(96, 165, 250, 0.9)';
ctx.fillText(`Entry ${formatPrice(tradeSetup.entry_price)}`, ml + cw + 4, entryY + 3);
ctx.fillStyle = 'rgba(239, 68, 68, 0.8)';
ctx.fillText(`SL ${formatPrice(tradeSetup.stop_loss)}`, ml + cw + 4, stopY + 3);
ctx.fillStyle = 'rgba(16, 185, 129, 0.8)';
ctx.fillText(`TP ${formatPrice(tradeSetup.target)}`, ml + cw + 4, targetY + 3);
}
// Candles
data.forEach((bar, i) => {
visibleData.forEach((bar, i) => {
const x = ml + i * barW + barW / 2;
const bullish = bar.close >= bar.open;
const color = bullish ? '#10b981' : '#ef4444';
@@ -148,42 +274,307 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
ctx.fillRect(x - candleW / 2, bodyTop, candleW, bodyH);
});
// Store geometry for hit testing
(canvas as any).__chartMeta = { ml, mr, mt, mb, cw, ch, barW, lo, hi, yScale };
}, [data, topLevels]);
// Store geometry for hit testing (includes visibleRange offset)
(canvas as any).__chartMeta = { ml, mr, mt, mb, cw, ch, barW, lo, hi, yScale, visibleStart: start };
// Size the overlay canvas to match
const overlay = overlayCanvasRef.current;
if (overlay) {
overlay.width = W * dpr;
overlay.height = H * dpr;
overlay.style.width = `${W}px`;
overlay.style.height = `${H}px`;
}
}, [data, topLevels, visibleRange, zones, tradeSetup]);
const drawCrosshair = useCallback(() => {
const overlay = overlayCanvasRef.current;
const canvas = canvasRef.current;
if (!overlay || !canvas) return;
const dpr = window.devicePixelRatio || 1;
const ctx = overlay.getContext('2d');
if (!ctx) return;
ctx.setTransform(dpr, 0, 0, dpr, 0, 0);
ctx.clearRect(0, 0, overlay.width, overlay.height);
const pos = crosshairRef.current;
if (!pos) return;
const meta = (canvas as any).__chartMeta;
if (!meta) return;
const { ml, mt, mb, cw, ch, barW, lo, hi, visibleStart } = meta;
const H = overlay.height / dpr;
// Clamp crosshair to chart area
const cx = Math.max(ml, Math.min(ml + cw, pos.x));
const cy = Math.max(mt, Math.min(mt + ch, pos.y));
// Dashed crosshair lines
ctx.strokeStyle = 'rgba(255, 255, 255, 0.4)';
ctx.lineWidth = 0.75;
ctx.setLineDash([4, 3]);
// Vertical line
ctx.beginPath();
ctx.moveTo(cx, mt);
ctx.lineTo(cx, mt + ch);
ctx.stroke();
// Horizontal line
ctx.beginPath();
ctx.moveTo(ml, cy);
ctx.lineTo(ml + cw, cy);
ctx.stroke();
ctx.setLineDash([]);
// Price label on y-axis (right side)
const price = hi - ((cy - mt) / ch) * (hi - lo);
const priceText = formatPrice(price);
ctx.font = '11px Inter, system-ui, sans-serif';
const priceMetrics = ctx.measureText(priceText);
const labelPadX = 5;
const labelPadY = 3;
const labelW = priceMetrics.width + labelPadX * 2;
const labelH = 16 + labelPadY * 2;
const labelX = ml + cw + 2;
const labelY = cy - labelH / 2;
ctx.fillStyle = 'rgba(55, 65, 81, 0.9)';
ctx.beginPath();
ctx.roundRect(labelX, labelY, labelW, labelH, 3);
ctx.fill();
ctx.fillStyle = '#e5e7eb';
ctx.textAlign = 'left';
ctx.textBaseline = 'middle';
ctx.fillText(priceText, labelX + labelPadX, cy);
// Date label on x-axis (bottom)
const localIdx = Math.floor((cx - ml) / barW);
const absIdx = (visibleStart ?? 0) + localIdx;
if (absIdx >= 0 && absIdx < data.length) {
const dateText = formatDate(data[absIdx].date);
const dateMetrics = ctx.measureText(dateText);
const dateLabelW = dateMetrics.width + labelPadX * 2;
const dateLabelH = 16 + labelPadY * 2;
const dateLabelX = cx - dateLabelW / 2;
const dateLabelY = H - mb + 2;
ctx.fillStyle = 'rgba(55, 65, 81, 0.9)';
ctx.beginPath();
ctx.roundRect(dateLabelX, dateLabelY, dateLabelW, dateLabelH, 3);
ctx.fill();
ctx.fillStyle = '#e5e7eb';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(dateText, cx, dateLabelY + dateLabelH / 2);
}
}, [data]);
useEffect(() => {
draw();
const onResize = () => {
cancelAnimationFrame(animFrame.current);
animFrame.current = requestAnimationFrame(draw);
animFrame.current = requestAnimationFrame(() => {
draw();
drawCrosshair();
});
};
window.addEventListener('resize', onResize);
return () => {
window.removeEventListener('resize', onResize);
cancelAnimationFrame(animFrame.current);
};
}, [draw]);
}, [draw, drawCrosshair]);
// Prevent default scroll on wheel events over the overlay canvas
useEffect(() => {
const overlay = overlayCanvasRef.current;
if (!overlay) return;
const preventScroll = (e: WheelEvent) => {
if (data.length >= MIN_VISIBLE_BARS) {
e.preventDefault();
}
};
overlay.addEventListener('wheel', preventScroll, { passive: false });
return () => overlay.removeEventListener('wheel', preventScroll);
}, [data.length]);
const handleWheel = useCallback(
(e: React.WheelEvent<HTMLCanvasElement>) => {
// Disable zoom if dataset has fewer than 10 bars
if (data.length < MIN_VISIBLE_BARS) return;
cancelAnimationFrame(zoomFrame.current);
zoomFrame.current = requestAnimationFrame(() => {
const canvas = canvasRef.current;
if (!canvas) return;
const meta = (canvas as any).__chartMeta;
if (!meta) return;
const rect = canvas.getBoundingClientRect();
const mx = e.clientX - rect.left;
// Determine cursor position as fraction within the visible chart area
const cursorFraction = Math.max(0, Math.min(1, (mx - meta.ml) / meta.cw));
setVisibleRange((prev) => {
const currentWidth = prev.end - prev.start;
const zoomFactor = 0.1;
let delta: number;
if (e.deltaY > 0) {
// Scroll down → zoom out (widen range)
delta = Math.max(1, Math.round(currentWidth * zoomFactor));
} else {
// Scroll up → zoom in (narrow range)
delta = -Math.max(1, Math.round(currentWidth * zoomFactor));
}
const newWidth = currentWidth + delta;
// Clamp to min 10 bars, max full dataset
const clampedWidth = Math.max(MIN_VISIBLE_BARS, Math.min(data.length, newWidth));
if (clampedWidth === currentWidth) return prev;
const widthChange = clampedWidth - currentWidth;
// Distribute the change around the cursor position
const leftChange = Math.round(widthChange * cursorFraction);
const rightChange = widthChange - leftChange;
let newStart = prev.start - leftChange;
let newEnd = prev.end + rightChange;
// Clamp to dataset bounds
if (newStart < 0) {
newEnd -= newStart;
newStart = 0;
}
if (newEnd > data.length) {
newStart -= newEnd - data.length;
newEnd = data.length;
}
newStart = Math.max(0, newStart);
newEnd = Math.min(data.length, newEnd);
return { start: newStart, end: newEnd };
});
});
},
[data.length]
);
const handleMouseDown = useCallback(
(e: React.MouseEvent<HTMLCanvasElement>) => {
// Pan only when zoomed in (visible range < full dataset)
const currentWidth = visibleRange.end - visibleRange.start;
if (currentWidth >= data.length) return;
isPanningRef.current = true;
panStartXRef.current = e.clientX;
},
[data.length, visibleRange]
);
const handleMouseMove = useCallback(
(e: React.MouseEvent<HTMLCanvasElement>) => {
const canvas = canvasRef.current;
if (!canvas) return;
const rect = canvas.getBoundingClientRect();
const mx = e.clientX - rect.left;
const my = e.clientY - rect.top;
// Handle panning
if (isPanningRef.current) {
const meta = (canvas as any).__chartMeta;
if (!meta) return;
const dx = e.clientX - panStartXRef.current;
// Convert pixel drag to bar count: negative dx = drag left = shift range right (see later bars)
const barShift = -Math.round(dx / meta.barW);
if (barShift !== 0) {
panStartXRef.current = e.clientX;
setVisibleRange((prev) => {
const width = prev.end - prev.start;
let newStart = prev.start + barShift;
let newEnd = prev.end + barShift;
// Clamp to dataset bounds
if (newStart < 0) {
newStart = 0;
newEnd = width;
}
if (newEnd > data.length) {
newEnd = data.length;
newStart = data.length - width;
}
if (newStart === prev.start && newEnd === prev.end) return prev;
return { start: newStart, end: newEnd };
});
}
// Clear crosshair while panning
crosshairRef.current = null;
drawCrosshair();
return;
}
// Update crosshair position and draw
crosshairRef.current = { x: mx, y: my };
drawCrosshair();
// Tooltip logic
const tip = tooltipRef.current;
if (!canvas || !tip || data.length === 0) return;
if (!tip || data.length === 0) return;
const meta = (canvas as any).__chartMeta;
if (!meta) return;
const rect = canvas.getBoundingClientRect();
const mx = e.clientX - rect.left;
const idx = Math.floor((mx - meta.ml) / meta.barW);
const localIdx = Math.floor((mx - meta.ml) / meta.barW);
if (idx >= 0 && idx < data.length) {
const bar = data[idx];
tooltipState.current = { visible: true, x: e.clientX - rect.left, y: e.clientY - rect.top, bar };
// Map local index to the visible data slice
const visibleStart = meta.visibleStart ?? 0;
const visibleEnd = Math.min(data.length, visibleStart + Math.round(meta.cw / meta.barW));
const absIdx = visibleStart + localIdx;
if (localIdx >= 0 && absIdx < visibleEnd && absIdx < data.length) {
const bar = data[absIdx];
tooltipState.current = { visible: true, x: mx, y: my, bar };
tip.style.display = 'block';
tip.style.left = `${Math.min(mx + 14, rect.width - 180)}px`;
tip.style.top = `${Math.max(e.clientY - rect.top - 80, 8)}px`;
tip.style.top = `${Math.max(my - 80, 8)}px`;
// Check if cursor is near trade overlay zone
let tradeTooltipHtml = '';
if (tradeSetup && meta.yScale) {
const entryY = meta.yScale(tradeSetup.entry_price);
const stopY = meta.yScale(tradeSetup.stop_loss);
const targetY = meta.yScale(tradeSetup.target);
const tradeTop = Math.min(entryY, stopY, targetY);
const tradeBottom = Math.max(entryY, stopY, targetY);
if (my >= tradeTop - 10 && my <= tradeBottom + 10) {
tradeTooltipHtml = `
<div class="border-t border-gray-600 mt-1.5 pt-1.5 text-gray-300 font-medium mb-1">Trade Setup</div>
<div class="grid grid-cols-2 gap-x-3 gap-y-0.5 text-gray-400">
<span>Direction</span><span class="text-right text-gray-200">${tradeSetup.direction}</span>
<span>Entry</span><span class="text-right text-blue-300">${formatPrice(tradeSetup.entry_price)}</span>
<span>Stop</span><span class="text-right text-red-400">${formatPrice(tradeSetup.stop_loss)}</span>
<span>Target</span><span class="text-right text-green-400">${formatPrice(tradeSetup.target)}</span>
<span>R:R</span><span class="text-right text-gray-200">${tradeSetup.rr_ratio.toFixed(2)}</span>
</div>`;
}
}
tip.innerHTML = `
<div class="text-gray-300 font-medium mb-1">${formatDate(bar.date)}</div>
<div class="grid grid-cols-2 gap-x-3 gap-y-0.5 text-gray-400">
@@ -192,18 +583,25 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
<span>Low</span><span class="text-right text-gray-200">${formatPrice(bar.low)}</span>
<span>Close</span><span class="text-right text-gray-200">${formatPrice(bar.close)}</span>
<span>Vol</span><span class="text-right text-gray-200">${bar.volume.toLocaleString()}</span>
</div>`;
</div>${tradeTooltipHtml}`;
} else {
tip.style.display = 'none';
}
},
[data]
[data, drawCrosshair, tradeSetup]
);
const handleMouseUp = useCallback(() => {
isPanningRef.current = false;
}, []);
const handleMouseLeave = useCallback(() => {
isPanningRef.current = false;
crosshairRef.current = null;
drawCrosshair();
const tip = tooltipRef.current;
if (tip) tip.style.display = 'none';
}, []);
}, [drawCrosshair]);
if (data.length === 0) {
return (
@@ -217,10 +615,18 @@ export function CandlestickChart({ data, srLevels = [], maxSRLevels = 6 }: Candl
<div ref={containerRef} className="relative w-full" style={{ height: 400 }}>
<canvas
ref={canvasRef}
className="w-full cursor-crosshair"
className="w-full"
style={{ height: 400 }}
/>
<canvas
ref={overlayCanvasRef}
className="absolute top-0 left-0 w-full cursor-crosshair"
style={{ height: 400 }}
onMouseDown={handleMouseDown}
onMouseMove={handleMouseMove}
onMouseUp={handleMouseUp}
onMouseLeave={handleMouseLeave}
onWheel={handleWheel}
/>
<div
ref={tooltipRef}

View File

@@ -1,3 +1,4 @@
import { useMemo } from 'react';
import { Link } from 'react-router-dom';
import type { RankingEntry } from '../../lib/types';
@@ -5,18 +6,31 @@ interface RankingsTableProps {
rankings: RankingEntry[];
}
function scoreColor(score: number): string {
function scoreColor(score: number | null): string {
if (score === null || score === undefined) return 'text-gray-600';
if (score > 70) return 'text-emerald-400';
if (score >= 40) return 'text-amber-400';
return 'text-red-400';
}
function capitalize(s: string): string {
return s.charAt(0).toUpperCase() + s.slice(1);
}
export function RankingsTable({ rankings }: RankingsTableProps) {
if (rankings.length === 0) {
return <p className="py-8 text-center text-sm text-gray-500">No rankings available.</p>;
}
const dimensionNames = rankings.length > 0 ? rankings[0].dimensions.map((d) => d.dimension) : [];
const dimensionNames = useMemo(() => {
const nameSet = new Set<string>();
for (const entry of rankings) {
for (const d of entry.dimensions) {
nameSet.add(d.dimension);
}
}
return Array.from(nameSet);
}, [rankings]);
return (
<div className="glass overflow-x-auto">
@@ -27,7 +41,7 @@ export function RankingsTable({ rankings }: RankingsTableProps) {
<th className="px-4 py-3">Symbol</th>
<th className="px-4 py-3">Composite</th>
{dimensionNames.map((dim) => (
<th key={dim} className="px-4 py-3">{dim}</th>
<th key={dim} className="px-4 py-3">{capitalize(dim)}</th>
))}
</tr>
</thead>
@@ -43,11 +57,15 @@ export function RankingsTable({ rankings }: RankingsTableProps) {
<td className={`px-4 py-3.5 font-semibold ${scoreColor(entry.composite_score)}`}>
{Math.round(entry.composite_score)}
</td>
{entry.dimensions.map((dim) => (
<td key={dim.dimension} className={`px-4 py-3.5 font-mono ${scoreColor(dim.score)}`}>
{Math.round(dim.score)}
</td>
))}
{dimensionNames.map((dim) => {
const found = entry.dimensions.find((d) => d.dimension === dim);
const score = found ? found.score : null;
return (
<td key={dim} className={`px-4 py-3.5 font-mono ${scoreColor(score)}`}>
{score !== null ? Math.round(score) : 'N/A'}
</td>
);
})}
</tr>
))}
</tbody>

View File

@@ -1,4 +1,4 @@
import { useState, type FormEvent } from 'react';
import { useState, useMemo, type FormEvent } from 'react';
import { useUpdateWeights } from '../../hooks/useScores';
interface WeightsFormProps {
@@ -6,17 +6,33 @@ interface WeightsFormProps {
}
export function WeightsForm({ weights }: WeightsFormProps) {
const [localWeights, setLocalWeights] = useState<Record<string, number>>(weights);
// Convert API decimal weights (0-1) to 0-100 integer scale on mount
const [sliderValues, setSliderValues] = useState<Record<string, number>>(() =>
Object.fromEntries(
Object.entries(weights).map(([key, w]) => [key, Math.round(w * 100)])
)
);
const updateWeights = useUpdateWeights();
const allZero = useMemo(
() => Object.values(sliderValues).every((v) => v === 0),
[sliderValues]
);
const handleChange = (key: string, value: string) => {
const num = parseFloat(value);
if (!isNaN(num)) setLocalWeights((prev) => ({ ...prev, [key]: num }));
const num = parseInt(value, 10);
if (!isNaN(num)) setSliderValues((prev) => ({ ...prev, [key]: num }));
};
const handleSubmit = (e: FormEvent) => {
e.preventDefault();
updateWeights.mutate(localWeights);
if (allZero) return;
const total = Object.values(sliderValues).reduce((sum, v) => sum + v, 0);
const normalized = Object.fromEntries(
Object.entries(sliderValues).map(([key, v]) => [key, v / total])
);
updateWeights.mutate(normalized);
};
return (
@@ -24,23 +40,35 @@ export function WeightsForm({ weights }: WeightsFormProps) {
<h3 className="mb-4 text-xs font-semibold uppercase tracking-widest text-gray-500">
Scoring Weights
</h3>
<div className="grid grid-cols-2 gap-3 sm:grid-cols-3 lg:grid-cols-4">
<div className="grid grid-cols-1 gap-3 sm:grid-cols-2 lg:grid-cols-3">
{Object.keys(weights).map((key) => (
<label key={key} className="flex flex-col gap-1.5">
<span className="text-xs text-gray-400 capitalize">{key.replace(/_/g, ' ')}</span>
<input
type="number"
step="any"
value={localWeights[key] ?? 0}
onChange={(e) => handleChange(key, e.target.value)}
className="input-glass px-2.5 py-1.5 text-sm"
/>
<div className="flex items-center gap-2">
<input
type="range"
min={0}
max={100}
step={1}
value={sliderValues[key] ?? 0}
onChange={(e) => handleChange(key, e.target.value)}
className="h-2 w-full cursor-pointer appearance-none rounded-lg bg-gray-700 accent-indigo-500"
/>
<span className="min-w-[2ch] text-right text-sm font-medium text-gray-300">
{sliderValues[key] ?? 0}
</span>
</div>
</label>
))}
</div>
{allZero && (
<p className="mt-3 text-xs text-red-400">
At least one weight must be greater than zero
</p>
)}
<button
type="submit"
disabled={updateWeights.isPending}
disabled={updateWeights.isPending || allZero}
className="mt-4 btn-gradient px-4 py-2 text-sm disabled:opacity-50"
>
<span>{updateWeights.isPending ? 'Updating…' : 'Update Weights'}</span>

View File

@@ -1,8 +1,8 @@
import { Link } from 'react-router-dom';
import type { TradeSetup } from '../../lib/types';
import { formatPrice, formatDateTime } from '../../lib/format';
import { formatPrice, formatPercent, formatDateTime } from '../../lib/format';
export type SortColumn = 'symbol' | 'direction' | 'entry_price' | 'stop_loss' | 'target' | 'rr_ratio' | 'composite_score' | 'detected_at';
export type SortColumn = 'symbol' | 'direction' | 'entry_price' | 'stop_loss' | 'target' | 'risk_amount' | 'reward_amount' | 'rr_ratio' | 'stop_pct' | 'target_pct' | 'composite_score' | 'detected_at';
export type SortDirection = 'asc' | 'desc';
interface TradeTableProps {
@@ -18,11 +18,36 @@ const columns: { key: SortColumn; label: string }[] = [
{ key: 'entry_price', label: 'Entry' },
{ key: 'stop_loss', label: 'Stop Loss' },
{ key: 'target', label: 'Target' },
{ key: 'risk_amount', label: 'Risk $' },
{ key: 'reward_amount', label: 'Reward $' },
{ key: 'rr_ratio', label: 'R:R' },
{ key: 'stop_pct', label: '% to Stop' },
{ key: 'target_pct', label: '% to Target' },
{ key: 'composite_score', label: 'Score' },
{ key: 'detected_at', label: 'Detected' },
];
export interface TradeAnalysis {
risk_amount: number;
reward_amount: number;
stop_pct: number;
target_pct: number;
}
export function computeTradeAnalysis(trade: TradeSetup): TradeAnalysis {
const risk_amount = Math.abs(trade.entry_price - trade.stop_loss);
const reward_amount = Math.abs(trade.target - trade.entry_price);
const stop_pct = (risk_amount / trade.entry_price) * 100;
const target_pct = (reward_amount / trade.entry_price) * 100;
return { risk_amount, reward_amount, stop_pct, target_pct };
}
function rrColorClass(rr: number): string {
if (rr >= 3.0) return 'text-green-400';
if (rr >= 2.0) return 'text-amber-400';
return 'text-red-400';
}
function sortIndicator(column: SortColumn, active: SortColumn, dir: SortDirection) {
if (column !== active) return '';
return dir === 'asc' ? ' ▲' : ' ▼';
@@ -50,30 +75,37 @@ export function TradeTable({ trades, sortColumn, sortDirection, onSort }: TradeT
</tr>
</thead>
<tbody>
{trades.map((trade) => (
<tr key={trade.id} className="border-b border-white/[0.04] transition-all duration-200 hover:bg-white/[0.03]">
<td className="px-4 py-3.5">
<Link to={`/ticker/${trade.symbol}`} className="font-medium text-blue-400 hover:text-blue-300 transition-colors duration-150">
{trade.symbol}
</Link>
</td>
<td className="px-4 py-3.5">
<span className={trade.direction === 'long' ? 'font-medium text-emerald-400' : 'font-medium text-red-400'}>
{trade.direction}
</span>
</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.entry_price)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.stop_loss)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.target)}</td>
<td className="px-4 py-3.5 font-mono font-semibold text-gray-200">{trade.rr_ratio.toFixed(2)}</td>
<td className="px-4 py-3.5">
<span className={`font-semibold ${trade.composite_score > 70 ? 'text-emerald-400' : trade.composite_score >= 40 ? 'text-amber-400' : 'text-red-400'}`}>
{Math.round(trade.composite_score)}
</span>
</td>
<td className="px-4 py-3.5 text-gray-400">{formatDateTime(trade.detected_at)}</td>
</tr>
))}
{trades.map((trade) => {
const analysis = computeTradeAnalysis(trade);
return (
<tr key={trade.id} className="border-b border-white/[0.04] transition-all duration-200 hover:bg-white/[0.03]">
<td className="px-4 py-3.5">
<Link to={`/ticker/${trade.symbol}`} className="font-medium text-blue-400 hover:text-blue-300 transition-colors duration-150">
{trade.symbol}
</Link>
</td>
<td className="px-4 py-3.5">
<span className={trade.direction === 'long' ? 'font-medium text-emerald-400' : 'font-medium text-red-400'}>
{trade.direction}
</span>
</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.entry_price)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.stop_loss)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(trade.target)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(analysis.risk_amount)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPrice(analysis.reward_amount)}</td>
<td className={`px-4 py-3.5 font-mono font-semibold ${rrColorClass(trade.rr_ratio)}`}>{trade.rr_ratio.toFixed(2)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPercent(analysis.stop_pct)}</td>
<td className="px-4 py-3.5 font-mono text-gray-200">{formatPercent(analysis.target_pct)}</td>
<td className="px-4 py-3.5">
<span className={`font-semibold ${trade.composite_score > 70 ? 'text-emerald-400' : trade.composite_score >= 40 ? 'text-amber-400' : 'text-red-400'}`}>
{Math.round(trade.composite_score)}
</span>
</td>
<td className="px-4 py-3.5 text-gray-400">{formatDateTime(trade.detected_at)}</td>
</tr>
);
})}
</tbody>
</table>
</div>

View File

@@ -0,0 +1,66 @@
import type { ScoreBreakdown } from '../../lib/types';
interface DimensionBreakdownPanelProps {
breakdown: ScoreBreakdown;
}
function formatWeight(weight: number): string {
return `${Math.round(weight * 100)}%`;
}
function formatRawValue(value: number | string | null): string {
if (value === null) return '—';
if (typeof value === 'string') return value;
return Number.isInteger(value) ? value.toString() : value.toFixed(2);
}
export function DimensionBreakdownPanel({ breakdown }: DimensionBreakdownPanelProps) {
return (
<div className="space-y-3">
{/* Sub-score rows */}
{breakdown.sub_scores.length > 0 && (
<div className="space-y-1.5">
{breakdown.sub_scores.map((sub) => (
<div
key={sub.name}
data-testid="sub-score-row"
className="flex items-center justify-between gap-2 text-sm"
>
<span className="text-gray-400 min-w-0 truncate">{sub.name}</span>
<div className="flex items-center gap-2 shrink-0">
<span className="text-gray-200 tabular-nums">{sub.score.toFixed(1)}</span>
<span className="rounded bg-white/10 px-1.5 py-0.5 text-[10px] font-medium text-gray-400">
{formatWeight(sub.weight)}
</span>
<span className="text-gray-500 text-xs tabular-nums w-16 text-right">
{formatRawValue(sub.raw_value)}
</span>
</div>
</div>
))}
</div>
)}
{/* Formula description */}
{breakdown.formula && (
<p className="text-xs text-gray-500 leading-relaxed">{breakdown.formula}</p>
)}
{/* Unavailable sub-scores */}
{breakdown.unavailable.length > 0 && (
<div className="space-y-1">
{breakdown.unavailable.map((item) => (
<div
key={item.name}
data-testid="unavailable-row"
className="flex items-center justify-between text-sm"
>
<span className="text-gray-600">{item.name}</span>
<span className="text-gray-600 text-xs italic">{item.reason}</span>
</div>
))}
</div>
)}
</div>
);
}

View File

@@ -1,3 +1,4 @@
import { useState } from 'react';
import { formatPercent, formatLargeNumber } from '../../lib/format';
import type { FundamentalResponse } from '../../lib/types';
@@ -5,30 +6,106 @@ interface FundamentalsPanelProps {
data: FundamentalResponse;
}
const FIELD_LABELS: Record<string, string> = {
pe_ratio: 'P/E Ratio',
revenue_growth: 'Revenue Growth',
earnings_surprise: 'Earnings Surprise',
market_cap: 'Market Cap',
};
export function FundamentalsPanel({ data }: FundamentalsPanelProps) {
const [expanded, setExpanded] = useState<boolean>(false);
const items = [
{ label: 'P/E Ratio', value: data.pe_ratio !== null ? data.pe_ratio.toFixed(2) : '—' },
{ label: 'Revenue Growth', value: data.revenue_growth !== null ? formatPercent(data.revenue_growth) : '—' },
{ label: 'Earnings Surprise', value: data.earnings_surprise !== null ? formatPercent(data.earnings_surprise) : '—' },
{ label: 'Market Cap', value: data.market_cap !== null ? formatLargeNumber(data.market_cap) : '—' },
{ key: 'pe_ratio', label: 'P/E Ratio', value: data.pe_ratio, format: (v: number) => v.toFixed(2) },
{ key: 'revenue_growth', label: 'Revenue Growth', value: data.revenue_growth, format: formatPercent },
{ key: 'earnings_surprise', label: 'Earnings Surprise', value: data.earnings_surprise, format: formatPercent },
{ key: 'market_cap', label: 'Market Cap', value: data.market_cap, format: formatLargeNumber },
];
const unavailableEntries = Object.entries(data.unavailable_fields ?? {});
return (
<div className="glass p-5">
<h3 className="mb-3 text-xs font-medium uppercase tracking-widest text-gray-500">Fundamentals</h3>
<div className="space-y-2.5 text-sm">
{items.map((item) => (
<div key={item.label} className="flex justify-between">
<span className="text-gray-400">{item.label}</span>
<span className="text-gray-200">{item.value}</span>
</div>
))}
{data.fetched_at && (
<p className="mt-2 text-xs text-gray-500">
Updated {new Date(data.fetched_at).toLocaleDateString()}
</p>
)}
{items.map((item) => {
const reason = data.unavailable_fields?.[item.key];
let display: React.ReactNode;
let valueClass = 'text-gray-200';
if (item.value !== null) {
display = item.format(item.value);
} else if (reason) {
display = reason;
valueClass = 'text-amber-400';
} else {
display = '—';
}
return (
<div key={item.key} className="flex justify-between">
<span className="text-gray-400">{item.label}</span>
<span className={valueClass}>{display}</span>
</div>
);
})}
</div>
<button
type="button"
onClick={() => setExpanded((prev) => !prev)}
className="mt-3 flex w-full items-center justify-center gap-1 text-xs text-gray-500 hover:text-gray-300 transition-colors"
aria-expanded={expanded}
aria-label={expanded ? 'Collapse details' : 'Expand details'}
>
<svg
className={`h-4 w-4 transition-transform ${expanded ? 'rotate-180' : ''}`}
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth={2}
>
<path strokeLinecap="round" strokeLinejoin="round" d="M19 9l-7 7-7-7" />
</svg>
</button>
{expanded && (
<div className="mt-3 space-y-3 border-t border-white/10 pt-3">
<div className="space-y-1 text-sm">
<div className="flex justify-between">
<span className="text-gray-500">Data Source</span>
<span className="text-gray-300">FMP</span>
</div>
{data.fetched_at && (
<div className="flex justify-between">
<span className="text-gray-500">Fetched</span>
<span className="text-gray-300">{new Date(data.fetched_at).toLocaleString()}</span>
</div>
)}
</div>
{unavailableEntries.length > 0 && (
<div>
<span className="text-xs font-medium uppercase tracking-widest text-gray-500">Unavailable Fields</span>
<ul className="mt-1 space-y-1">
{unavailableEntries.map(([field, reason]) => (
<li key={field} className="flex justify-between text-sm">
<span className="text-gray-400">{FIELD_LABELS[field] ?? field}</span>
<span className="text-amber-400">{reason}</span>
</li>
))}
</ul>
</div>
)}
</div>
)}
{!expanded && data.fetched_at && (
<p className="mt-2 text-xs text-gray-500">
Updated {new Date(data.fetched_at).toLocaleDateString()}
</p>
)}
</div>
);
}

View File

@@ -1,3 +1,4 @@
import { useState } from 'react';
import { formatPercent } from '../../lib/format';
import type { SentimentResponse } from '../../lib/types';
@@ -12,32 +13,84 @@ const classificationColors: Record<string, string> = {
};
export function SentimentPanel({ data }: SentimentPanelProps) {
const [expanded, setExpanded] = useState<boolean>(false);
const latest = data.scores[0];
return (
<div className="glass p-5">
<h3 className="mb-3 text-xs font-medium uppercase tracking-widest text-gray-500">Sentiment</h3>
{latest ? (
<div className="space-y-2.5 text-sm">
<div className="flex justify-between">
<span className="text-gray-400">Classification</span>
<span className={classificationColors[latest.classification] ?? 'text-gray-300'}>
{latest.classification}
</span>
<>
<div className="space-y-2.5 text-sm">
<div className="flex justify-between">
<span className="text-gray-400">Classification</span>
<span className={classificationColors[latest.classification] ?? 'text-gray-300'}>
{latest.classification}
</span>
</div>
<div className="flex justify-between">
<span className="text-gray-400">Confidence</span>
<span className="text-gray-200">{formatPercent(latest.confidence)}</span>
</div>
<div className="flex justify-between">
<span className="text-gray-400">Dimension Score</span>
<span className="text-gray-200">{data.dimension_score !== null ? Math.round(data.dimension_score) : '—'}</span>
</div>
<div className="flex justify-between">
<span className="text-gray-400">Sources</span>
<span className="text-gray-200">{data.count}</span>
</div>
</div>
<div className="flex justify-between">
<span className="text-gray-400">Confidence</span>
<span className="text-gray-200">{formatPercent(latest.confidence)}</span>
</div>
<div className="flex justify-between">
<span className="text-gray-400">Dimension Score</span>
<span className="text-gray-200">{data.dimension_score !== null ? Math.round(data.dimension_score) : '—'}</span>
</div>
<div className="flex justify-between">
<span className="text-gray-400">Sources</span>
<span className="text-gray-200">{data.count}</span>
</div>
</div>
<button
type="button"
onClick={() => setExpanded((prev) => !prev)}
className="mt-3 flex w-full items-center justify-center gap-1 text-xs text-gray-500 hover:text-gray-300 transition-colors"
aria-expanded={expanded}
aria-label={expanded ? 'Collapse details' : 'Expand details'}
>
<svg
className={`h-4 w-4 transition-transform ${expanded ? 'rotate-180' : ''}`}
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth={2}
>
<path strokeLinecap="round" strokeLinejoin="round" d="M19 9l-7 7-7-7" />
</svg>
</button>
{expanded && (
<div className="mt-3 space-y-3 border-t border-white/10 pt-3">
<div>
<span className="text-xs font-medium uppercase tracking-widest text-gray-500">Reasoning</span>
<p className="mt-1 text-sm text-gray-300">
{latest.reasoning || 'No reasoning available'}
</p>
</div>
{latest.citations.length > 0 && (
<div>
<span className="text-xs font-medium uppercase tracking-widest text-gray-500">Citations</span>
<ul className="mt-1 space-y-1">
{latest.citations.map((citation, idx) => (
<li key={idx}>
<a
href={citation.url}
target="_blank"
rel="noopener noreferrer"
className="text-sm text-blue-400 hover:text-blue-300 underline break-all"
>
{citation.title || citation.url}
</a>
</li>
))}
</ul>
</div>
)}
</div>
)}
</>
) : (
<p className="text-sm text-gray-500">No sentiment data available</p>
)}

View File

@@ -1,6 +1,11 @@
import { useState } from 'react';
import { DimensionBreakdownPanel } from '../ticker/DimensionBreakdownPanel';
import type { DimensionScoreDetail, CompositeBreakdown } from '../../lib/types';
interface ScoreCardProps {
compositeScore: number | null;
dimensions: { dimension: string; score: number }[];
dimensions: DimensionScoreDetail[];
compositeBreakdown?: CompositeBreakdown;
}
function scoreColor(score: number): string {
@@ -46,7 +51,13 @@ function ScoreRing({ score }: { score: number }) {
);
}
export function ScoreCard({ compositeScore, dimensions }: ScoreCardProps) {
export function ScoreCard({ compositeScore, dimensions, compositeBreakdown }: ScoreCardProps) {
const [expanded, setExpanded] = useState<Record<string, boolean>>({});
const toggleExpand = (dimension: string) => {
setExpanded((prev) => ({ ...prev, [dimension]: !prev[dimension] }));
};
return (
<div className="glass p-5">
<div className="flex items-center gap-4">
@@ -60,28 +71,79 @@ export function ScoreCard({ compositeScore, dimensions }: ScoreCardProps) {
<p className={`text-2xl font-bold ${compositeScore !== null ? scoreColor(compositeScore) : 'text-gray-500'}`}>
{compositeScore !== null ? Math.round(compositeScore) : '—'}
</p>
{compositeBreakdown && (
<p className="mt-1 text-[10px] text-gray-500 leading-snug max-w-[200px]" data-testid="renorm-explanation">
Weighted average of available dimensions with re-normalized weights.
</p>
)}
</div>
</div>
{dimensions.length > 0 && (
<div className="mt-5 space-y-2.5">
<div className="mt-5 space-y-1">
<p className="text-[10px] font-medium uppercase tracking-widest text-gray-500">Dimensions</p>
{dimensions.map((d) => (
<div key={d.dimension} className="flex items-center justify-between text-sm">
<span className="text-gray-300 capitalize">{d.dimension}</span>
<div className="flex items-center gap-2">
<div className="h-1.5 w-20 rounded-full bg-white/[0.06] overflow-hidden">
<div
className={`h-1.5 rounded-full bg-gradient-to-r ${barGradient(d.score)} transition-all duration-500`}
style={{ width: `${Math.max(0, Math.min(100, d.score))}%` }}
/>
</div>
<span className={`w-8 text-right font-medium text-xs ${scoreColor(d.score)}`}>
{Math.round(d.score)}
</span>
{dimensions.map((d) => {
const isExpanded = expanded[d.dimension] ?? false;
const weight = compositeBreakdown?.renormalized_weights?.[d.dimension]
?? compositeBreakdown?.weights?.[d.dimension];
return (
<div key={d.dimension}>
<button
type="button"
className="flex w-full items-center justify-between text-sm py-1 hover:bg-white/[0.03] rounded transition-colors"
onClick={() => d.breakdown && toggleExpand(d.dimension)}
data-testid={`dimension-row-${d.dimension}`}
>
<span className="text-gray-300 capitalize flex items-center gap-1.5">
{d.breakdown && (
<span className="text-gray-500 text-[10px]">{isExpanded ? '▾' : '▸'}</span>
)}
{d.dimension}
</span>
<div className="flex items-center gap-2">
{weight != null && (
<span className="text-[10px] text-gray-500 tabular-nums" data-testid={`weight-${d.dimension}`}>
{Math.round(weight * 100)}%
</span>
)}
<div className="h-1.5 w-20 rounded-full bg-white/[0.06] overflow-hidden">
<div
className={`h-1.5 rounded-full bg-gradient-to-r ${barGradient(d.score)} transition-all duration-500`}
style={{ width: `${Math.max(0, Math.min(100, d.score))}%` }}
/>
</div>
<span className={`w-8 text-right font-medium text-xs ${scoreColor(d.score)}`}>
{Math.round(d.score)}
</span>
</div>
</button>
{isExpanded && d.breakdown && (
<div className="ml-4 mt-1 mb-2 pl-3 border-l border-white/[0.06]">
<DimensionBreakdownPanel breakdown={d.breakdown} />
</div>
)}
</div>
);
})}
{/* Missing dimensions */}
{compositeBreakdown && compositeBreakdown.missing_dimensions.length > 0 && (
<div className="mt-2 space-y-1">
{compositeBreakdown.missing_dimensions
.filter((dim) => !dimensions.some((d) => d.dimension === dim))
.map((dim) => (
<div
key={dim}
className="flex items-center justify-between text-sm py-1 opacity-40"
data-testid={`missing-dimension-${dim}`}
>
<span className="text-gray-500 capitalize">{dim}</span>
<span className="text-[10px] text-gray-600 italic">redistributed</span>
</div>
))}
</div>
))}
)}
</div>
)}
</div>

View File

@@ -4,6 +4,7 @@ import { getScores } from '../api/scores';
import { getLevels } from '../api/sr-levels';
import { getSentiment } from '../api/sentiment';
import { getFundamentals } from '../api/fundamentals';
import * as tradesApi from '../api/trades';
export function useTickerDetail(symbol: string) {
const ohlcv = useQuery({
@@ -36,5 +37,11 @@ export function useTickerDetail(symbol: string) {
enabled: !!symbol,
});
return { ohlcv, scores, srLevels, sentiment, fundamentals };
const trades = useQuery({
queryKey: ['trades'],
queryFn: () => tradesApi.list(),
enabled: !!symbol,
});
return { ohlcv, scores, srLevels, sentiment, fundamentals, trades };
}

View File

@@ -34,6 +34,16 @@ export interface SRLevelSummary {
strength: number;
}
// S/R Zone
export interface SRZone {
low: number;
high: number;
midpoint: number;
strength: number;
type: 'support' | 'resistance';
level_count: number;
}
// OHLCV
export interface OHLCVBar {
id: number;
@@ -48,6 +58,28 @@ export interface OHLCVBar {
}
// Scores
export interface SubScore {
name: string;
score: number;
weight: number;
raw_value: number | string | null;
description: string;
}
export interface ScoreBreakdown {
sub_scores: SubScore[];
formula: string;
unavailable: { name: string; reason: string }[];
}
export interface CompositeBreakdown {
weights: Record<string, number>;
available_dimensions: string[];
missing_dimensions: string[];
renormalized_weights: Record<string, number>;
formula: string;
}
export interface ScoreResponse {
symbol: string;
composite_score: number | null;
@@ -56,6 +88,7 @@ export interface ScoreResponse {
dimensions: DimensionScoreDetail[];
missing_dimensions: string[];
computed_at: string | null;
composite_breakdown?: CompositeBreakdown;
}
export interface DimensionScoreDetail {
@@ -63,6 +96,7 @@ export interface DimensionScoreDetail {
score: number;
is_stale: boolean;
computed_at: string | null;
breakdown?: ScoreBreakdown;
}
export interface RankingEntry {
@@ -102,16 +136,25 @@ export interface SRLevel {
export interface SRLevelResponse {
symbol: string;
levels: SRLevel[];
zones: SRZone[];
visible_levels: SRLevel[];
count: number;
}
// Sentiment
export interface CitationItem {
url: string;
title: string;
}
export interface SentimentScore {
id: number;
classification: 'bullish' | 'bearish' | 'neutral';
confidence: number;
source: string;
timestamp: string;
reasoning: string;
citations: CitationItem[];
}
export interface SentimentResponse {
@@ -130,6 +173,7 @@ export interface FundamentalResponse {
earnings_surprise: number | null;
market_cap: number | null;
fetched_at: string | null;
unavailable_fields: Record<string, string>;
}
// Indicators

View File

@@ -1,7 +1,10 @@
import { useMemo, useState } from 'react';
import { useMutation, useQueryClient } from '@tanstack/react-query';
import { useTrades } from '../hooks/useTrades';
import { TradeTable, type SortColumn, type SortDirection } from '../components/scanner/TradeTable';
import { TradeTable, type SortColumn, type SortDirection, computeTradeAnalysis } from '../components/scanner/TradeTable';
import { SkeletonTable } from '../components/ui/Skeleton';
import { useToast } from '../components/ui/Toast';
import { triggerJob } from '../api/admin';
import type { TradeSetup } from '../lib/types';
type DirectionFilter = 'both' | 'long' | 'short';
@@ -18,6 +21,17 @@ function filterTrades(
});
}
function getComputedValue(trade: TradeSetup, column: SortColumn): number {
const analysis = computeTradeAnalysis(trade);
switch (column) {
case 'risk_amount': return analysis.risk_amount;
case 'reward_amount': return analysis.reward_amount;
case 'stop_pct': return analysis.stop_pct;
case 'target_pct': return analysis.target_pct;
default: return 0;
}
}
function sortTrades(
trades: TradeSetup[],
column: SortColumn,
@@ -35,8 +49,19 @@ function sortTrades(
case 'detected_at':
cmp = new Date(a.detected_at).getTime() - new Date(b.detected_at).getTime();
break;
default:
cmp = (a[column] as number) - (b[column] as number);
case 'risk_amount':
case 'reward_amount':
case 'stop_pct':
case 'target_pct':
cmp = getComputedValue(a, column) - getComputedValue(b, column);
break;
case 'entry_price':
case 'stop_loss':
case 'target':
case 'rr_ratio':
case 'composite_score':
cmp = a[column] - b[column];
break;
}
return direction === 'asc' ? cmp : -cmp;
});
@@ -45,12 +70,25 @@ function sortTrades(
export default function ScannerPage() {
const { data: trades, isLoading, isError, error } = useTrades();
const queryClient = useQueryClient();
const toast = useToast();
const [minRR, setMinRR] = useState(0);
const [directionFilter, setDirectionFilter] = useState<DirectionFilter>('both');
const [sortColumn, setSortColumn] = useState<SortColumn>('rr_ratio');
const [sortDirection, setSortDirection] = useState<SortDirection>('desc');
const scanMutation = useMutation({
mutationFn: () => triggerJob('rr_scanner'),
onSuccess: () => {
toast.addToast('success', 'Scanner triggered. Results will refresh shortly.');
setTimeout(() => queryClient.invalidateQueries({ queryKey: ['trades'] }), 3000);
},
onError: () => {
toast.addToast('error', 'Failed to trigger scanner');
},
});
const handleSort = (column: SortColumn) => {
if (column === sortColumn) {
setSortDirection((prev) => (prev === 'asc' ? 'desc' : 'asc'));
@@ -68,23 +106,44 @@ export default function ScannerPage() {
return (
<div className="space-y-6">
<h1 className="text-2xl font-bold text-gray-100">Trade Scanner</h1>
<div className="flex items-center justify-between">
<h1 className="text-2xl font-bold text-gray-100">Trade Scanner</h1>
<button
type="button"
onClick={() => scanMutation.mutate()}
disabled={scanMutation.isPending}
className="rounded-lg bg-blue-600 px-4 py-2 text-sm font-medium text-white hover:bg-blue-500 disabled:opacity-50 transition-colors duration-150"
>
{scanMutation.isPending ? 'Scanning...' : 'Run Scanner'}
</button>
</div>
{/* Explainer banner */}
<div className="rounded-lg border border-blue-500/20 bg-blue-500/10 px-4 py-3 text-sm text-blue-300">
The scanner identifies asymmetric risk-reward trade setups by analyzing S/R levels
as price targets and using ATR-based stops to define risk.
Click <span className="font-medium">Run Scanner</span> to scan all tickers now,
or wait for the scheduled run.
</div>
{/* Filter controls */}
<div className="flex flex-wrap items-end gap-4">
<div>
<label htmlFor="min-rr" className="mb-1 block text-xs text-gray-400">
Min R:R
Min Risk:Reward
</label>
<input
id="min-rr"
type="number"
min={0}
step={0.1}
value={minRR}
onChange={(e) => setMinRR(Number(e.target.value) || 0)}
className="w-24 rounded border border-gray-700 bg-gray-800 px-3 py-1.5 text-sm text-gray-200 focus:border-blue-500 focus:outline-none transition-colors duration-150"
/>
<div className="flex items-center gap-1">
<span className="text-sm text-gray-400">1 :</span>
<input
id="min-rr"
type="number"
min={0}
step={0.1}
value={minRR}
onChange={(e) => setMinRR(Number(e.target.value) || 0)}
className="w-20 rounded border border-gray-700 bg-gray-800 px-3 py-1.5 text-sm text-gray-200 focus:border-blue-500 focus:outline-none transition-colors duration-150"
/>
</div>
</div>
<div>
<label htmlFor="direction" className="mb-1 block text-xs text-gray-400">
@@ -112,7 +171,13 @@ export default function ScannerPage() {
</div>
)}
{trades && (
{trades && processed.length === 0 && !isLoading && (
<div className="rounded-lg border border-gray-700 bg-gray-800/50 px-4 py-8 text-center text-sm text-gray-400">
No trade setups match the current filters. Try lowering the Min R:R or click Run Scanner to refresh.
</div>
)}
{trades && processed.length > 0 && (
<TradeTable
trades={processed}
sortColumn={sortColumn}

View File

@@ -1,4 +1,4 @@
import { useMemo } from 'react';
import { useMemo, useEffect } from 'react';
import { useParams } from 'react-router-dom';
import { useMutation, useQueryClient } from '@tanstack/react-query';
import { useTickerDetail } from '../hooks/useTickerDetail';
@@ -11,6 +11,7 @@ import { IndicatorSelector } from '../components/ticker/IndicatorSelector';
import { useToast } from '../components/ui/Toast';
import { fetchData } from '../api/ingestion';
import { formatPrice } from '../lib/format';
import type { TradeSetup } from '../lib/types';
function SectionError({ message, onRetry }: { message: string; onRetry?: () => void }) {
return (
@@ -65,7 +66,7 @@ function DataFreshnessBar({ items }: { items: DataStatusItem[] }) {
export default function TickerDetailPage() {
const { symbol = '' } = useParams<{ symbol: string }>();
const { ohlcv, scores, srLevels, sentiment, fundamentals } = useTickerDetail(symbol);
const { ohlcv, scores, srLevels, sentiment, fundamentals, trades } = useTickerDetail(symbol);
const queryClient = useQueryClient();
const { addToast } = useToast();
@@ -132,10 +133,29 @@ export default function TickerDetailPage() {
},
], [ohlcv.data, sentiment.data, fundamentals.data, srLevels.data, scores.data]);
// Sort S/R levels by strength for the table
// Log trades API errors but don't disrupt the page
useEffect(() => {
if (trades.error) {
console.error('Failed to fetch trade setups:', trades.error);
}
}, [trades.error]);
// Pick the latest trade setup for the current symbol
const tradeSetup: TradeSetup | undefined = useMemo(() => {
if (trades.error || !trades.data) return undefined;
const matching = trades.data.filter(
(t) => t.symbol.toUpperCase() === symbol.toUpperCase(),
);
if (matching.length === 0) return undefined;
return matching.reduce((latest, t) =>
new Date(t.detected_at) > new Date(latest.detected_at) ? t : latest,
);
}, [trades.data, trades.error, symbol]);
// Sort visible S/R levels by strength for the table (only levels within chart zones)
const sortedLevels = useMemo(() => {
if (!srLevels.data?.levels) return [];
return [...srLevels.data.levels].sort((a, b) => b.strength - a.strength);
if (!srLevels.data?.visible_levels) return [];
return [...srLevels.data.visible_levels].sort((a, b) => b.strength - a.strength);
}, [srLevels.data]);
return (
@@ -176,7 +196,7 @@ export default function TickerDetailPage() {
)}
{ohlcv.data && (
<div className="glass p-5">
<CandlestickChart data={ohlcv.data} srLevels={srLevels.data?.levels} />
<CandlestickChart data={ohlcv.data} srLevels={srLevels.data?.levels} zones={srLevels.data?.zones} tradeSetup={tradeSetup} />
{srLevels.isError && (
<p className="mt-2 text-xs text-yellow-500/80">S/R levels unavailable chart shown without overlays</p>
)}
@@ -184,6 +204,39 @@ export default function TickerDetailPage() {
)}
</section>
{/* Trade Setup Summary Card */}
{tradeSetup && (
<section>
<h2 className="mb-3 text-xs font-medium uppercase tracking-widest text-gray-500">Trade Setup</h2>
<div className="glass p-5">
<div className="flex flex-wrap items-center gap-6">
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Direction</span>
<span className={`text-sm font-semibold ${tradeSetup.direction === 'long' ? 'text-emerald-400' : 'text-red-400'}`}>
{tradeSetup.direction.toUpperCase()}
</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Entry</span>
<span className="text-sm font-mono text-blue-300">{formatPrice(tradeSetup.entry_price)}</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Stop</span>
<span className="text-sm font-mono text-red-400">{formatPrice(tradeSetup.stop_loss)}</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">Target</span>
<span className="text-sm font-mono text-emerald-400">{formatPrice(tradeSetup.target)}</span>
</div>
<div className="flex items-center gap-2">
<span className="text-xs text-gray-500">R:R</span>
<span className="text-sm font-semibold text-gray-200">{tradeSetup.rr_ratio.toFixed(2)}</span>
</div>
</div>
</div>
</section>
)}
{/* Scores + Side Panels */}
<div className="grid gap-6 lg:grid-cols-3">
<section>
@@ -193,7 +246,7 @@ export default function TickerDetailPage() {
<SectionError message={scores.error instanceof Error ? scores.error.message : 'Failed to load scores'} onRetry={() => scores.refetch()} />
)}
{scores.data && (
<ScoreCard compositeScore={scores.data.composite_score} dimensions={scores.data.dimensions.map((d) => ({ dimension: d.dimension, score: d.score }))} />
<ScoreCard compositeScore={scores.data.composite_score} dimensions={scores.data.dimensions} compositeBreakdown={scores.data.composite_breakdown} />
)}
</section>

View File

@@ -1 +1 @@
{"root":["./src/app.tsx","./src/main.tsx","./src/vite-env.d.ts","./src/api/admin.ts","./src/api/auth.ts","./src/api/client.ts","./src/api/fundamentals.ts","./src/api/health.ts","./src/api/indicators.ts","./src/api/ingestion.ts","./src/api/ohlcv.ts","./src/api/scores.ts","./src/api/sentiment.ts","./src/api/sr-levels.ts","./src/api/tickers.ts","./src/api/trades.ts","./src/api/watchlist.ts","./src/components/admin/datacleanup.tsx","./src/components/admin/jobcontrols.tsx","./src/components/admin/settingsform.tsx","./src/components/admin/tickermanagement.tsx","./src/components/admin/usertable.tsx","./src/components/auth/protectedroute.tsx","./src/components/charts/candlestickchart.tsx","./src/components/layout/appshell.tsx","./src/components/layout/mobilenav.tsx","./src/components/layout/sidebar.tsx","./src/components/rankings/rankingstable.tsx","./src/components/rankings/weightsform.tsx","./src/components/scanner/tradetable.tsx","./src/components/ticker/fundamentalspanel.tsx","./src/components/ticker/indicatorselector.tsx","./src/components/ticker/sroverlay.tsx","./src/components/ticker/sentimentpanel.tsx","./src/components/ui/badge.tsx","./src/components/ui/confirmdialog.tsx","./src/components/ui/scorecard.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/toast.tsx","./src/components/watchlist/addtickerform.tsx","./src/components/watchlist/watchlisttable.tsx","./src/hooks/useadmin.ts","./src/hooks/useauth.ts","./src/hooks/usescores.ts","./src/hooks/usetickerdetail.ts","./src/hooks/usetickers.ts","./src/hooks/usetrades.ts","./src/hooks/usewatchlist.ts","./src/lib/format.ts","./src/lib/types.ts","./src/pages/adminpage.tsx","./src/pages/loginpage.tsx","./src/pages/rankingspage.tsx","./src/pages/registerpage.tsx","./src/pages/scannerpage.tsx","./src/pages/tickerdetailpage.tsx","./src/pages/watchlistpage.tsx","./src/stores/authstore.ts"],"version":"5.6.3"}
{"root":["./src/app.tsx","./src/main.tsx","./src/vite-env.d.ts","./src/api/admin.ts","./src/api/auth.ts","./src/api/client.ts","./src/api/fundamentals.ts","./src/api/health.ts","./src/api/indicators.ts","./src/api/ingestion.ts","./src/api/ohlcv.ts","./src/api/scores.ts","./src/api/sentiment.ts","./src/api/sr-levels.ts","./src/api/tickers.ts","./src/api/trades.ts","./src/api/watchlist.ts","./src/components/admin/datacleanup.tsx","./src/components/admin/jobcontrols.tsx","./src/components/admin/settingsform.tsx","./src/components/admin/tickermanagement.tsx","./src/components/admin/usertable.tsx","./src/components/auth/protectedroute.tsx","./src/components/charts/candlestickchart.tsx","./src/components/layout/appshell.tsx","./src/components/layout/mobilenav.tsx","./src/components/layout/sidebar.tsx","./src/components/rankings/rankingstable.tsx","./src/components/rankings/weightsform.tsx","./src/components/scanner/tradetable.tsx","./src/components/ticker/dimensionbreakdownpanel.tsx","./src/components/ticker/fundamentalspanel.tsx","./src/components/ticker/indicatorselector.tsx","./src/components/ticker/sroverlay.tsx","./src/components/ticker/sentimentpanel.tsx","./src/components/ui/badge.tsx","./src/components/ui/confirmdialog.tsx","./src/components/ui/scorecard.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/toast.tsx","./src/components/watchlist/addtickerform.tsx","./src/components/watchlist/watchlisttable.tsx","./src/hooks/useadmin.ts","./src/hooks/useauth.ts","./src/hooks/usescores.ts","./src/hooks/usetickerdetail.ts","./src/hooks/usetickers.ts","./src/hooks/usetrades.ts","./src/hooks/usewatchlist.ts","./src/lib/format.ts","./src/lib/types.ts","./src/pages/adminpage.tsx","./src/pages/loginpage.tsx","./src/pages/rankingspage.tsx","./src/pages/registerpage.tsx","./src/pages/scannerpage.tsx","./src/pages/tickerdetailpage.tsx","./src/pages/watchlistpage.tsx","./src/stores/authstore.ts"],"version":"5.6.3"}

View File

@@ -0,0 +1,211 @@
"""Unit tests for cluster_sr_zones() in app.services.sr_service."""
from app.services.sr_service import cluster_sr_zones
def _level(price: float, strength: int = 50) -> dict:
"""Helper to build a level dict."""
return {"price_level": price, "strength": strength}
class TestClusterSrZonesEmptyAndEdge:
"""Edge cases: empty input, max_zones boundaries."""
def test_empty_levels_returns_empty(self):
assert cluster_sr_zones([], current_price=100.0) == []
def test_max_zones_zero_returns_empty(self):
levels = [_level(100.0)]
assert cluster_sr_zones(levels, current_price=100.0, max_zones=0) == []
def test_max_zones_negative_returns_empty(self):
levels = [_level(100.0)]
assert cluster_sr_zones(levels, current_price=100.0, max_zones=-1) == []
def test_single_level(self):
levels = [_level(95.0, 60)]
zones = cluster_sr_zones(levels, current_price=100.0)
assert len(zones) == 1
z = zones[0]
assert z["low"] == 95.0
assert z["high"] == 95.0
assert z["midpoint"] == 95.0
assert z["strength"] == 60
assert z["type"] == "support"
assert z["level_count"] == 1
class TestClusterSrZonesMerging:
"""Greedy merge behaviour."""
def test_two_levels_within_tolerance_merge(self):
# 100 and 101 are 1% apart; tolerance=2% → should merge
levels = [_level(100.0, 30), _level(101.0, 40)]
zones = cluster_sr_zones(levels, current_price=200.0, tolerance=0.02)
assert len(zones) == 1
z = zones[0]
assert z["low"] == 100.0
assert z["high"] == 101.0
assert z["midpoint"] == 100.5
assert z["strength"] == 70
assert z["level_count"] == 2
def test_two_levels_outside_tolerance_stay_separate(self):
# 100 and 110 are 10% apart; tolerance=2% → separate
levels = [_level(100.0, 30), _level(110.0, 40)]
zones = cluster_sr_zones(levels, current_price=200.0, tolerance=0.02)
assert len(zones) == 2
def test_all_same_price_merge_into_one(self):
levels = [_level(50.0, 20), _level(50.0, 30), _level(50.0, 10)]
zones = cluster_sr_zones(levels, current_price=100.0)
assert len(zones) == 1
assert zones[0]["strength"] == 60
assert zones[0]["level_count"] == 3
def test_levels_at_tolerance_boundary(self):
# midpoint of cluster starting at 100 is 100. 2% of 100 = 2.
# A level at 102 is exactly at the boundary → should merge
levels = [_level(100.0, 25), _level(102.0, 25)]
zones = cluster_sr_zones(levels, current_price=200.0, tolerance=0.02)
assert len(zones) == 1
class TestClusterSrZonesStrength:
"""Strength capping and computation."""
def test_strength_capped_at_100(self):
levels = [_level(100.0, 80), _level(100.5, 80)]
zones = cluster_sr_zones(levels, current_price=200.0, tolerance=0.02)
assert len(zones) == 1
assert zones[0]["strength"] == 100
def test_strength_sum_when_under_cap(self):
levels = [_level(100.0, 10), _level(100.5, 20)]
zones = cluster_sr_zones(levels, current_price=200.0, tolerance=0.02)
assert zones[0]["strength"] == 30
class TestClusterSrZonesTypeTagging:
"""Support vs resistance tagging."""
def test_support_when_midpoint_below_current(self):
levels = [_level(90.0, 50)]
zones = cluster_sr_zones(levels, current_price=100.0)
assert zones[0]["type"] == "support"
def test_resistance_when_midpoint_above_current(self):
levels = [_level(110.0, 50)]
zones = cluster_sr_zones(levels, current_price=100.0)
assert zones[0]["type"] == "resistance"
def test_resistance_when_midpoint_equals_current(self):
# "else resistance" per spec
levels = [_level(100.0, 50)]
zones = cluster_sr_zones(levels, current_price=100.0)
assert zones[0]["type"] == "resistance"
class TestClusterSrZonesSorting:
"""Sorting by strength descending."""
def test_sorted_by_strength_descending(self):
levels = [_level(50.0, 20), _level(150.0, 80), _level(250.0, 50)]
zones = cluster_sr_zones(levels, current_price=100.0, tolerance=0.001)
strengths = [z["strength"] for z in zones]
assert strengths == sorted(strengths, reverse=True)
class TestClusterSrZonesMaxZones:
"""max_zones filtering."""
def test_max_zones_limits_output(self):
levels = [_level(50.0, 20), _level(150.0, 80), _level(250.0, 50)]
zones = cluster_sr_zones(
levels, current_price=100.0, tolerance=0.001, max_zones=2
)
assert len(zones) == 2
# Balanced selection: 1 support (strength 20) + 1 resistance (strength 80)
types = {z["type"] for z in zones}
assert "support" in types
assert "resistance" in types
assert zones[0]["strength"] == 80
assert zones[1]["strength"] == 20
def test_max_zones_none_returns_all(self):
levels = [_level(50.0, 20), _level(150.0, 80), _level(250.0, 50)]
zones = cluster_sr_zones(
levels, current_price=100.0, tolerance=0.001, max_zones=None
)
assert len(zones) == 3
def test_max_zones_larger_than_count_returns_all(self):
levels = [_level(50.0, 20)]
zones = cluster_sr_zones(
levels, current_price=100.0, max_zones=10
)
assert len(zones) == 1
class TestClusterSrZonesBalancedSelection:
"""Balanced interleave selection behaviour (Requirements 1.1, 1.2, 1.3, 1.5, 1.6)."""
def test_mixed_input_produces_balanced_output(self):
"""3 support + 3 resistance with max_zones=4 → 2 support + 2 resistance."""
levels = [
_level(80.0, 70), # support
_level(85.0, 50), # support
_level(90.0, 30), # support
_level(110.0, 60), # resistance
_level(115.0, 40), # resistance
_level(120.0, 20), # resistance
]
zones = cluster_sr_zones(levels, current_price=100.0, tolerance=0.001, max_zones=4)
assert len(zones) == 4
support_count = sum(1 for z in zones if z["type"] == "support")
resistance_count = sum(1 for z in zones if z["type"] == "resistance")
assert support_count == 2
assert resistance_count == 2
def test_all_support_fills_from_support_only(self):
"""When no resistance levels exist, all slots filled from support."""
levels = [
_level(80.0, 70),
_level(85.0, 50),
_level(90.0, 30),
]
zones = cluster_sr_zones(levels, current_price=200.0, tolerance=0.001, max_zones=2)
assert len(zones) == 2
assert all(z["type"] == "support" for z in zones)
def test_all_resistance_fills_from_resistance_only(self):
"""When no support levels exist, all slots filled from resistance."""
levels = [
_level(110.0, 60),
_level(115.0, 40),
_level(120.0, 20),
]
zones = cluster_sr_zones(levels, current_price=50.0, tolerance=0.001, max_zones=2)
assert len(zones) == 2
assert all(z["type"] == "resistance" for z in zones)
def test_single_zone_edge_case(self):
"""Only 1 level total → returns exactly 1 zone."""
levels = [_level(95.0, 45)]
zones = cluster_sr_zones(levels, current_price=100.0, max_zones=5)
assert len(zones) == 1
assert zones[0]["strength"] == 45
def test_both_types_present_when_max_zones_gte_2(self):
"""When both types exist and max_zones >= 2, at least one of each is present."""
levels = [
_level(70.0, 90), # support (strongest overall)
_level(75.0, 80), # support
_level(80.0, 70), # support
_level(130.0, 10), # resistance (weakest overall)
]
zones = cluster_sr_zones(levels, current_price=100.0, tolerance=0.001, max_zones=2)
types = {z["type"] for z in zones}
assert "support" in types
assert "resistance" in types

View File

@@ -0,0 +1,156 @@
"""Unit tests for FMPFundamentalProvider 402 reason recording."""
from __future__ import annotations
from unittest.mock import AsyncMock, patch
import httpx
import pytest
from app.providers.fmp import FMPFundamentalProvider
def _mock_response(status_code: int, json_data: object = None) -> httpx.Response:
"""Build a fake httpx.Response."""
resp = httpx.Response(
status_code=status_code,
json=json_data if json_data is not None else {},
request=httpx.Request("GET", "https://example.com"),
)
return resp
@pytest.fixture
def provider() -> FMPFundamentalProvider:
return FMPFundamentalProvider(api_key="test-key")
class TestFetchJsonOptional402Tracking:
"""_fetch_json_optional returns (data, was_402) tuple."""
@pytest.mark.asyncio
async def test_returns_empty_dict_and_true_on_402(self, provider):
mock_client = AsyncMock()
mock_client.get.return_value = _mock_response(402)
data, was_402 = await provider._fetch_json_optional(
mock_client, "ratios-ttm", {}, "AAPL"
)
assert data == {}
assert was_402 is True
@pytest.mark.asyncio
async def test_returns_data_and_false_on_200(self, provider):
mock_client = AsyncMock()
mock_client.get.return_value = _mock_response(
200, [{"priceToEarningsRatioTTM": 25.5}]
)
data, was_402 = await provider._fetch_json_optional(
mock_client, "ratios-ttm", {}, "AAPL"
)
assert data == {"priceToEarningsRatioTTM": 25.5}
assert was_402 is False
class TestFetchFundamentals402Recording:
"""fetch_fundamentals records 402 endpoints in unavailable_fields."""
@pytest.mark.asyncio
async def test_all_402_records_all_fields(self, provider):
"""When all supplementary endpoints return 402, all three fields are recorded."""
profile_resp = _mock_response(200, [{"marketCap": 1_000_000}])
ratios_resp = _mock_response(402)
growth_resp = _mock_response(402)
earnings_resp = _mock_response(402)
async def mock_get(url, params=None):
if "profile" in url:
return profile_resp
if "ratios-ttm" in url:
return ratios_resp
if "financial-growth" in url:
return growth_resp
if "earnings" in url:
return earnings_resp
return _mock_response(200, [{}])
with patch("app.providers.fmp.httpx.AsyncClient") as MockClient:
instance = AsyncMock()
instance.get.side_effect = mock_get
instance.__aenter__ = AsyncMock(return_value=instance)
instance.__aexit__ = AsyncMock(return_value=False)
MockClient.return_value = instance
result = await provider.fetch_fundamentals("AAPL")
assert result.unavailable_fields == {
"pe_ratio": "requires paid plan",
"revenue_growth": "requires paid plan",
"earnings_surprise": "requires paid plan",
}
@pytest.mark.asyncio
async def test_mixed_200_402_records_only_402_fields(self, provider):
"""When only ratios-ttm returns 402, only pe_ratio is recorded."""
profile_resp = _mock_response(200, [{"marketCap": 2_000_000}])
ratios_resp = _mock_response(402)
growth_resp = _mock_response(200, [{"revenueGrowth": 0.15}])
earnings_resp = _mock_response(200, [{"epsActual": 3.0, "epsEstimated": 2.5}])
async def mock_get(url, params=None):
if "profile" in url:
return profile_resp
if "ratios-ttm" in url:
return ratios_resp
if "financial-growth" in url:
return growth_resp
if "earnings" in url:
return earnings_resp
return _mock_response(200, [{}])
with patch("app.providers.fmp.httpx.AsyncClient") as MockClient:
instance = AsyncMock()
instance.get.side_effect = mock_get
instance.__aenter__ = AsyncMock(return_value=instance)
instance.__aexit__ = AsyncMock(return_value=False)
MockClient.return_value = instance
result = await provider.fetch_fundamentals("AAPL")
assert result.unavailable_fields == {"pe_ratio": "requires paid plan"}
assert result.revenue_growth == 0.15
assert result.earnings_surprise is not None
@pytest.mark.asyncio
async def test_no_402_empty_unavailable_fields(self, provider):
"""When all endpoints succeed, unavailable_fields is empty."""
profile_resp = _mock_response(200, [{"marketCap": 3_000_000}])
ratios_resp = _mock_response(200, [{"priceToEarningsRatioTTM": 20.0}])
growth_resp = _mock_response(200, [{"revenueGrowth": 0.10}])
earnings_resp = _mock_response(200, [{"epsActual": 2.0, "epsEstimated": 1.8}])
async def mock_get(url, params=None):
if "profile" in url:
return profile_resp
if "ratios-ttm" in url:
return ratios_resp
if "financial-growth" in url:
return growth_resp
if "earnings" in url:
return earnings_resp
return _mock_response(200, [{}])
with patch("app.providers.fmp.httpx.AsyncClient") as MockClient:
instance = AsyncMock()
instance.get.side_effect = mock_get
instance.__aenter__ = AsyncMock(return_value=instance)
instance.__aexit__ = AsyncMock(return_value=False)
MockClient.return_value = instance
result = await provider.fetch_fundamentals("AAPL")
assert result.unavailable_fields == {}
assert result.pe_ratio == 20.0

View File

@@ -0,0 +1,99 @@
"""Unit tests for fundamental_service — unavailable_fields persistence."""
from __future__ import annotations
import json
import pytest
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from app.database import Base
from app.models.ticker import Ticker
from app.services import fundamental_service
# Use a dedicated engine so commit/refresh work without conflicting
# with the conftest transactional session.
_engine = create_async_engine("sqlite+aiosqlite://", echo=False)
_session_factory = async_sessionmaker(_engine, class_=AsyncSession, expire_on_commit=False)
@pytest.fixture(autouse=True)
async def _setup_tables():
async with _engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
yield
async with _engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
@pytest.fixture
async def session() -> AsyncSession:
async with _session_factory() as s:
yield s
@pytest.fixture
async def ticker(session: AsyncSession) -> Ticker:
"""Create a test ticker."""
t = Ticker(symbol="AAPL")
session.add(t)
await session.commit()
await session.refresh(t)
return t
@pytest.mark.asyncio
async def test_store_fundamental_persists_unavailable_fields(
session: AsyncSession, ticker: Ticker
):
"""unavailable_fields dict is serialized to JSON and stored."""
fields = {"pe_ratio": "requires paid plan", "revenue_growth": "requires paid plan"}
record = await fundamental_service.store_fundamental(
session,
symbol="AAPL",
pe_ratio=None,
revenue_growth=None,
market_cap=1_000_000.0,
unavailable_fields=fields,
)
assert json.loads(record.unavailable_fields_json) == fields
@pytest.mark.asyncio
async def test_store_fundamental_defaults_to_empty_dict(
session: AsyncSession, ticker: Ticker
):
"""When unavailable_fields is not provided, column defaults to '{}'."""
record = await fundamental_service.store_fundamental(
session,
symbol="AAPL",
pe_ratio=25.0,
)
assert json.loads(record.unavailable_fields_json) == {}
@pytest.mark.asyncio
async def test_store_fundamental_updates_unavailable_fields(
session: AsyncSession, ticker: Ticker
):
"""Updating an existing record also updates unavailable_fields_json."""
# First store
await fundamental_service.store_fundamental(
session,
symbol="AAPL",
pe_ratio=None,
unavailable_fields={"pe_ratio": "requires paid plan"},
)
# Second store — fields now available
record = await fundamental_service.store_fundamental(
session,
symbol="AAPL",
pe_ratio=25.0,
unavailable_fields={},
)
assert json.loads(record.unavailable_fields_json) == {}

View File

@@ -0,0 +1,162 @@
"""Unit tests for OpenAISentimentProvider reasoning and citations extraction."""
from __future__ import annotations
from datetime import datetime, timezone
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from app.providers.openai_sentiment import OpenAISentimentProvider
def _make_annotation(ann_type: str, url: str = "", title: str = "") -> SimpleNamespace:
return SimpleNamespace(type=ann_type, url=url, title=title)
def _make_content_block(text: str, annotations: list | None = None) -> SimpleNamespace:
block = SimpleNamespace(text=text, annotations=annotations or [])
return block
def _make_message_item(content_blocks: list) -> SimpleNamespace:
return SimpleNamespace(type="message", content=content_blocks)
def _make_web_search_item() -> SimpleNamespace:
return SimpleNamespace(type="web_search_call")
def _build_response(json_text: str, annotations: list | None = None) -> SimpleNamespace:
"""Build a mock OpenAI Responses API response object."""
content_block = _make_content_block(json_text, annotations or [])
message_item = _make_message_item([content_block])
items = [message_item]
return SimpleNamespace(output=items)
def _build_response_with_search(
json_text: str, annotations: list | None = None
) -> SimpleNamespace:
"""Build a response with a web_search_call item followed by a message with annotations."""
search_item = _make_web_search_item()
content_block = _make_content_block(json_text, annotations or [])
message_item = _make_message_item([content_block])
return SimpleNamespace(output=[search_item, message_item])
@pytest.fixture
def provider():
"""Create an OpenAISentimentProvider with a mocked client."""
with patch("app.providers.openai_sentiment.AsyncOpenAI"):
p = OpenAISentimentProvider(api_key="test-key")
return p
class TestReasoningExtraction:
"""Tests for extracting reasoning from the parsed JSON response."""
@pytest.mark.asyncio
async def test_reasoning_extracted_from_json(self, provider):
json_text = '{"classification": "bullish", "confidence": 85, "reasoning": "Strong earnings report"}'
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("AAPL")
assert result.reasoning == "Strong earnings report"
@pytest.mark.asyncio
async def test_empty_reasoning_when_field_missing(self, provider):
json_text = '{"classification": "neutral", "confidence": 50}'
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("MSFT")
assert result.reasoning == ""
@pytest.mark.asyncio
async def test_empty_reasoning_when_field_is_empty_string(self, provider):
json_text = '{"classification": "bearish", "confidence": 70, "reasoning": ""}'
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("TSLA")
assert result.reasoning == ""
class TestCitationsExtraction:
"""Tests for extracting url_citation annotations from the response."""
@pytest.mark.asyncio
async def test_citations_extracted_from_annotations(self, provider):
json_text = '{"classification": "bullish", "confidence": 90, "reasoning": "Good news"}'
annotations = [
_make_annotation("url_citation", url="https://example.com/1", title="Article 1"),
_make_annotation("url_citation", url="https://example.com/2", title="Article 2"),
]
mock_response = _build_response(json_text, annotations)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("AAPL")
assert len(result.citations) == 2
assert result.citations[0] == {"url": "https://example.com/1", "title": "Article 1"}
assert result.citations[1] == {"url": "https://example.com/2", "title": "Article 2"}
@pytest.mark.asyncio
async def test_empty_citations_when_no_annotations(self, provider):
json_text = '{"classification": "neutral", "confidence": 50, "reasoning": "No news"}'
mock_response = _build_response(json_text)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("GOOG")
assert result.citations == []
@pytest.mark.asyncio
async def test_non_url_citation_annotations_ignored(self, provider):
json_text = '{"classification": "bearish", "confidence": 60, "reasoning": "Mixed signals"}'
annotations = [
_make_annotation("file_citation", url="https://file.com", title="File"),
_make_annotation("url_citation", url="https://real.com", title="Real"),
]
mock_response = _build_response(json_text, annotations)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("META")
assert len(result.citations) == 1
assert result.citations[0] == {"url": "https://real.com", "title": "Real"}
@pytest.mark.asyncio
async def test_citations_from_response_with_web_search_call(self, provider):
json_text = '{"classification": "bullish", "confidence": 80, "reasoning": "Positive outlook"}'
annotations = [
_make_annotation("url_citation", url="https://news.com/a", title="News A"),
]
mock_response = _build_response_with_search(json_text, annotations)
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("NVDA")
assert len(result.citations) == 1
assert result.citations[0] == {"url": "https://news.com/a", "title": "News A"}
@pytest.mark.asyncio
async def test_no_error_when_annotations_attr_missing(self, provider):
"""Content blocks without annotations attribute should not cause errors."""
json_text = '{"classification": "neutral", "confidence": 50, "reasoning": "Quiet day"}'
# Block with no annotations attribute at all
block = SimpleNamespace(text=json_text)
message_item = SimpleNamespace(type="message", content=[block])
mock_response = SimpleNamespace(output=[message_item])
provider._client.responses.create = AsyncMock(return_value=mock_response)
result = await provider.fetch_sentiment("AMD")
assert result.citations == []
assert result.reasoning == "Quiet day"

View File

@@ -0,0 +1,273 @@
"""Bug-condition exploration tests for R:R scanner target quality.
These tests confirm the bug described in bugfix.md: the old code always selected
the most distant S/R level (highest raw R:R) regardless of strength or proximity.
The fix replaces max-R:R selection with quality-score selection.
Since the code is already fixed, these tests PASS on the current codebase.
On the unfixed code they would FAIL, confirming the bug.
**Validates: Requirements 1.1, 1.3, 1.4, 2.1, 2.3, 2.4**
"""
from __future__ import annotations
from datetime import date, timedelta
import pytest
from hypothesis import given, settings, HealthCheck, strategies as st
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.ohlcv import OHLCVRecord
from app.models.sr_level import SRLevel
from app.models.ticker import Ticker
from app.services.rr_scanner_service import scan_ticker
# ---------------------------------------------------------------------------
# Session fixture that allows scan_ticker to commit
# ---------------------------------------------------------------------------
# The default db_session fixture wraps in session.begin() which conflicts
# with scan_ticker's internal commit(). We use a plain session instead.
@pytest.fixture
async def scan_session() -> AsyncSession:
"""Provide a DB session compatible with scan_ticker (which commits)."""
from tests.conftest import _test_session_factory
async with _test_session_factory() as session:
yield session
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_ohlcv_bars(
ticker_id: int,
num_bars: int = 20,
base_close: float = 100.0,
) -> list[OHLCVRecord]:
"""Generate realistic OHLCV bars with small daily variation.
Produces bars where close ≈ base_close, with enough range for ATR
computation (needs >= 15 bars). The ATR will be roughly 2.0.
"""
bars: list[OHLCVRecord] = []
start = date(2024, 1, 1)
for i in range(num_bars):
close = base_close + (i % 3 - 1) * 0.5 # oscillate ±0.5
bars.append(OHLCVRecord(
ticker_id=ticker_id,
date=start + timedelta(days=i),
open=close - 0.3,
high=close + 1.0,
low=close - 1.0,
close=close,
volume=100_000,
))
return bars
# ---------------------------------------------------------------------------
# Deterministic test: strong-near vs weak-far (long setup)
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_long_prefers_strong_near_over_weak_far(scan_session: AsyncSession):
"""With a strong nearby resistance and a weak distant resistance, the
scanner should pick the strong nearby one — NOT the most distant.
On unfixed code this would fail because max-R:R always picks the
farthest level.
"""
ticker = Ticker(symbol="EXPLR")
scan_session.add(ticker)
await scan_session.flush()
# 20 bars closing around 100
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
# With ATR=2.0 and multiplier=1.5, risk=3.0.
# R:R threshold=1.5 → min reward=4.5 → min target=104.5
# Strong nearby resistance: price=105, strength=90 (R:R≈1.67, quality≈0.66)
near_level = SRLevel(
ticker_id=ticker.id,
price_level=105.0,
type="resistance",
strength=90,
detection_method="volume_profile",
)
# Weak distant resistance: price=130, strength=5 (R:R=10, quality≈0.58)
far_level = SRLevel(
ticker_id=ticker.id,
price_level=130.0,
type="resistance",
strength=5,
detection_method="volume_profile",
)
scan_session.add_all([near_level, far_level])
await scan_session.flush()
setups = await scan_ticker(scan_session, "EXPLR", rr_threshold=1.5)
long_setups = [s for s in setups if s.direction == "long"]
assert len(long_setups) == 1, "Expected exactly one long setup"
selected_target = long_setups[0].target
# The scanner must NOT pick the most distant level (130)
assert selected_target != pytest.approx(130.0, abs=0.01), (
"Bug: scanner picked the weak distant level (130) instead of the "
"strong nearby level (105)"
)
# It should pick the strong nearby level
assert selected_target == pytest.approx(105.0, abs=0.01)
# ---------------------------------------------------------------------------
# Deterministic test: strong-near vs weak-far (short setup)
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_short_prefers_strong_near_over_weak_far(scan_session: AsyncSession):
"""Short-side mirror: strong nearby support should be preferred over
weak distant support.
"""
ticker = Ticker(symbol="EXPLS")
scan_session.add(ticker)
await scan_session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
# With ATR=2.0 and multiplier=1.5, risk=3.0.
# R:R threshold=1.5 → min reward=4.5 → min target below 95.5
# Strong nearby support: price=95, strength=85 (R:R≈1.67, quality≈0.64)
near_level = SRLevel(
ticker_id=ticker.id,
price_level=95.0,
type="support",
strength=85,
detection_method="pivot_point",
)
# Weak distant support: price=70, strength=5 (R:R=10, quality≈0.58)
far_level = SRLevel(
ticker_id=ticker.id,
price_level=70.0,
type="support",
strength=5,
detection_method="pivot_point",
)
scan_session.add_all([near_level, far_level])
await scan_session.flush()
setups = await scan_ticker(scan_session, "EXPLS", rr_threshold=1.5)
short_setups = [s for s in setups if s.direction == "short"]
assert len(short_setups) == 1, "Expected exactly one short setup"
selected_target = short_setups[0].target
assert selected_target != pytest.approx(70.0, abs=0.01), (
"Bug: scanner picked the weak distant level (70) instead of the "
"strong nearby level (95)"
)
assert selected_target == pytest.approx(95.0, abs=0.01)
# ---------------------------------------------------------------------------
# Hypothesis property test: selection is NOT always the most distant level
# ---------------------------------------------------------------------------
@st.composite
def strong_near_weak_far_pair(draw: st.DrawFn) -> dict:
"""Generate a (strong-near, weak-far) resistance pair above entry=100.
Guarantees:
- near_price < far_price (both above entry)
- near_strength >> far_strength
- Both meet the R:R threshold of 1.5 given typical ATR ≈ 2 → risk ≈ 3
"""
# Near level: 515 above entry (R:R ≈ 1.75.0 with risk≈3)
near_dist = draw(st.floats(min_value=5.0, max_value=15.0))
near_strength = draw(st.integers(min_value=70, max_value=100))
# Far level: 2560 above entry (R:R ≈ 8.320 with risk≈3)
far_dist = draw(st.floats(min_value=25.0, max_value=60.0))
far_strength = draw(st.integers(min_value=1, max_value=15))
return {
"near_price": 100.0 + near_dist,
"near_strength": near_strength,
"far_price": 100.0 + far_dist,
"far_strength": far_strength,
}
@pytest.mark.asyncio
@given(pair=strong_near_weak_far_pair())
@settings(
max_examples=15,
deadline=None,
suppress_health_check=[HealthCheck.function_scoped_fixture],
)
async def test_property_scanner_does_not_always_pick_most_distant(
pair: dict,
scan_session: AsyncSession,
):
"""**Validates: Requirements 1.1, 1.3, 1.4, 2.1, 2.3, 2.4**
Property: when a strong nearby resistance exists alongside a weak distant
resistance, the scanner does NOT always select the most distant level.
On unfixed code this would fail for every example because max-R:R always
picks the farthest level.
"""
from tests.conftest import _test_engine, _test_session_factory
# Each hypothesis example needs a fresh DB state
async with _test_engine.begin() as conn:
from app.database import Base
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with _test_session_factory() as session:
ticker = Ticker(symbol="PROP")
session.add(ticker)
await session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
session.add_all(bars)
near_level = SRLevel(
ticker_id=ticker.id,
price_level=pair["near_price"],
type="resistance",
strength=pair["near_strength"],
detection_method="volume_profile",
)
far_level = SRLevel(
ticker_id=ticker.id,
price_level=pair["far_price"],
type="resistance",
strength=pair["far_strength"],
detection_method="volume_profile",
)
session.add_all([near_level, far_level])
await session.commit()
setups = await scan_ticker(session, "PROP", rr_threshold=1.5)
long_setups = [s for s in setups if s.direction == "long"]
assert len(long_setups) == 1, "Expected exactly one long setup"
selected_target = long_setups[0].target
most_distant = round(pair["far_price"], 4)
# The fixed scanner should prefer the strong nearby level, not the
# most distant weak one.
assert selected_target != pytest.approx(most_distant, abs=0.01), (
f"Bug: scanner picked the most distant level ({most_distant}) "
f"with strength={pair['far_strength']} over the nearby level "
f"({round(pair['near_price'], 4)}) with strength={pair['near_strength']}"
)

View File

@@ -0,0 +1,383 @@
"""Fix-checking tests for R:R scanner quality-score selection.
Verify that the fixed scan_ticker selects the candidate with the highest
quality score among all candidates meeting the R:R threshold, for both
long and short setups.
**Validates: Requirements 2.1, 2.2, 2.3, 2.4**
"""
from __future__ import annotations
from datetime import date, timedelta
import pytest
from hypothesis import given, settings, HealthCheck, strategies as st
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.ohlcv import OHLCVRecord
from app.models.sr_level import SRLevel
from app.models.ticker import Ticker
from app.services.rr_scanner_service import scan_ticker, _compute_quality_score
# ---------------------------------------------------------------------------
# Session fixture (plain session, not wrapped in begin())
# ---------------------------------------------------------------------------
@pytest.fixture
async def scan_session() -> AsyncSession:
"""Provide a DB session compatible with scan_ticker (which commits)."""
from tests.conftest import _test_session_factory
async with _test_session_factory() as session:
yield session
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_ohlcv_bars(
ticker_id: int,
num_bars: int = 20,
base_close: float = 100.0,
) -> list[OHLCVRecord]:
"""Generate OHLCV bars closing around base_close with ATR ≈ 2.0."""
bars: list[OHLCVRecord] = []
start = date(2024, 1, 1)
for i in range(num_bars):
close = base_close + (i % 3 - 1) * 0.5 # oscillate ±0.5
bars.append(OHLCVRecord(
ticker_id=ticker_id,
date=start + timedelta(days=i),
open=close - 0.3,
high=close + 1.0,
low=close - 1.0,
close=close,
volume=100_000,
))
return bars
# ---------------------------------------------------------------------------
# Hypothesis strategy: multiple resistance levels above entry for longs
# ---------------------------------------------------------------------------
@st.composite
def long_candidate_levels(draw: st.DrawFn) -> list[dict]:
"""Generate 2-5 resistance levels above entry_price=100.
All levels meet the R:R threshold of 1.5 given ATR≈2, risk≈3,
so min reward=4.5, min target=104.5.
"""
num_levels = draw(st.integers(min_value=2, max_value=5))
levels = []
for _ in range(num_levels):
# Distance from entry: 5 to 50 (all above 4.5 threshold)
distance = draw(st.floats(min_value=5.0, max_value=50.0))
strength = draw(st.integers(min_value=0, max_value=100))
levels.append({
"price": 100.0 + distance,
"strength": strength,
})
return levels
@st.composite
def short_candidate_levels(draw: st.DrawFn) -> list[dict]:
"""Generate 2-5 support levels below entry_price=100.
All levels meet the R:R threshold of 1.5 given ATR≈2, risk≈3,
so min reward=4.5, max target=95.5.
"""
num_levels = draw(st.integers(min_value=2, max_value=5))
levels = []
for _ in range(num_levels):
# Distance below entry: 5 to 50 (all above 4.5 threshold)
distance = draw(st.floats(min_value=5.0, max_value=50.0))
strength = draw(st.integers(min_value=0, max_value=100))
levels.append({
"price": 100.0 - distance,
"strength": strength,
})
return levels
# ---------------------------------------------------------------------------
# Property test: long setup selects highest quality score candidate
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
@given(levels=long_candidate_levels())
@settings(
max_examples=20,
deadline=None,
suppress_health_check=[HealthCheck.function_scoped_fixture],
)
async def test_property_long_selects_highest_quality(
levels: list[dict],
scan_session: AsyncSession,
):
"""**Validates: Requirements 2.1, 2.3, 2.4**
Property: when multiple resistance levels meet the R:R threshold,
the fixed scan_ticker selects the one with the highest quality score.
"""
from tests.conftest import _test_engine, _test_session_factory
from app.database import Base
# Fresh DB state per hypothesis example
async with _test_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with _test_session_factory() as session:
ticker = Ticker(symbol="FIXL")
session.add(ticker)
await session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
session.add_all(bars)
sr_levels = []
for lv in levels:
sr_levels.append(SRLevel(
ticker_id=ticker.id,
price_level=lv["price"],
type="resistance",
strength=lv["strength"],
detection_method="volume_profile",
))
session.add_all(sr_levels)
await session.commit()
setups = await scan_ticker(session, "FIXL", rr_threshold=1.5)
long_setups = [s for s in setups if s.direction == "long"]
assert len(long_setups) == 1, "Expected exactly one long setup"
selected_target = long_setups[0].target
# Compute entry_price and risk from the bars (same logic as scan_ticker)
# entry_price = last close ≈ 100.0, ATR ≈ 2.0, risk = ATR * 1.5 = 3.0
entry_price = bars[-1].close
# Use approximate risk; the exact value comes from ATR computation
# We reconstruct it from the setup's entry and stop
risk = long_setups[0].entry_price - long_setups[0].stop_loss
# Compute quality scores for all candidates that meet threshold
best_quality = -1.0
best_target = None
for lv in levels:
distance = lv["price"] - entry_price
if distance > 0:
rr = distance / risk
if rr >= 1.5:
quality = _compute_quality_score(rr, lv["strength"], distance, entry_price)
if quality > best_quality:
best_quality = quality
best_target = round(lv["price"], 4)
assert best_target is not None, "At least one candidate should meet threshold"
assert selected_target == pytest.approx(best_target, abs=0.01), (
f"Selected target {selected_target} != expected best-quality target "
f"{best_target} (quality={best_quality:.4f})"
)
# ---------------------------------------------------------------------------
# Property test: short setup selects highest quality score candidate
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
@given(levels=short_candidate_levels())
@settings(
max_examples=20,
deadline=None,
suppress_health_check=[HealthCheck.function_scoped_fixture],
)
async def test_property_short_selects_highest_quality(
levels: list[dict],
scan_session: AsyncSession,
):
"""**Validates: Requirements 2.2, 2.3, 2.4**
Property: when multiple support levels meet the R:R threshold,
the fixed scan_ticker selects the one with the highest quality score.
"""
from tests.conftest import _test_engine, _test_session_factory
from app.database import Base
# Fresh DB state per hypothesis example
async with _test_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with _test_session_factory() as session:
ticker = Ticker(symbol="FIXS")
session.add(ticker)
await session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
session.add_all(bars)
sr_levels = []
for lv in levels:
sr_levels.append(SRLevel(
ticker_id=ticker.id,
price_level=lv["price"],
type="support",
strength=lv["strength"],
detection_method="pivot_point",
))
session.add_all(sr_levels)
await session.commit()
setups = await scan_ticker(session, "FIXS", rr_threshold=1.5)
short_setups = [s for s in setups if s.direction == "short"]
assert len(short_setups) == 1, "Expected exactly one short setup"
selected_target = short_setups[0].target
entry_price = bars[-1].close
risk = short_setups[0].stop_loss - short_setups[0].entry_price
# Compute quality scores for all candidates that meet threshold
best_quality = -1.0
best_target = None
for lv in levels:
distance = entry_price - lv["price"]
if distance > 0:
rr = distance / risk
if rr >= 1.5:
quality = _compute_quality_score(rr, lv["strength"], distance, entry_price)
if quality > best_quality:
best_quality = quality
best_target = round(lv["price"], 4)
assert best_target is not None, "At least one candidate should meet threshold"
assert selected_target == pytest.approx(best_target, abs=0.01), (
f"Selected target {selected_target} != expected best-quality target "
f"{best_target} (quality={best_quality:.4f})"
)
# ---------------------------------------------------------------------------
# Deterministic test: 3 levels with known quality scores (long)
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_deterministic_long_three_levels(scan_session: AsyncSession):
"""**Validates: Requirements 2.1, 2.3, 2.4**
Concrete example with 3 resistance levels of known quality scores.
Entry=100, ATR≈2, risk≈3.
Level A: price=105, strength=90 → rr=5/3≈1.67, dist=5
quality = 0.35*(1.67/10) + 0.35*(90/100) + 0.30*(1-5/100)
= 0.35*0.167 + 0.35*0.9 + 0.30*0.95
= 0.0585 + 0.315 + 0.285 = 0.6585
Level B: price=112, strength=50 → rr=12/3=4.0, dist=12
quality = 0.35*(4/10) + 0.35*(50/100) + 0.30*(1-12/100)
= 0.35*0.4 + 0.35*0.5 + 0.30*0.88
= 0.14 + 0.175 + 0.264 = 0.579
Level C: price=130, strength=10 → rr=30/3=10.0, dist=30
quality = 0.35*(10/10) + 0.35*(10/100) + 0.30*(1-30/100)
= 0.35*1.0 + 0.35*0.1 + 0.30*0.7
= 0.35 + 0.035 + 0.21 = 0.595
Expected winner: Level A (quality=0.6585)
"""
ticker = Ticker(symbol="DET3L")
scan_session.add(ticker)
await scan_session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
level_a = SRLevel(
ticker_id=ticker.id, price_level=105.0, type="resistance",
strength=90, detection_method="volume_profile",
)
level_b = SRLevel(
ticker_id=ticker.id, price_level=112.0, type="resistance",
strength=50, detection_method="volume_profile",
)
level_c = SRLevel(
ticker_id=ticker.id, price_level=130.0, type="resistance",
strength=10, detection_method="volume_profile",
)
scan_session.add_all([level_a, level_b, level_c])
await scan_session.flush()
setups = await scan_ticker(scan_session, "DET3L", rr_threshold=1.5)
long_setups = [s for s in setups if s.direction == "long"]
assert len(long_setups) == 1, "Expected exactly one long setup"
# Level A (105, strength=90) should win with highest quality
assert long_setups[0].target == pytest.approx(105.0, abs=0.01), (
f"Expected target=105.0 (highest quality), got {long_setups[0].target}"
)
# ---------------------------------------------------------------------------
# Deterministic test: 3 levels with known quality scores (short)
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_deterministic_short_three_levels(scan_session: AsyncSession):
"""**Validates: Requirements 2.2, 2.3, 2.4**
Concrete example with 3 support levels of known quality scores.
Entry=100, ATR≈2, risk≈3.
Level A: price=95, strength=85 → rr=5/3≈1.67, dist=5
quality = 0.35*(1.67/10) + 0.35*(85/100) + 0.30*(1-5/100)
= 0.0585 + 0.2975 + 0.285 = 0.641
Level B: price=88, strength=45 → rr=12/3=4.0, dist=12
quality = 0.35*(4/10) + 0.35*(45/100) + 0.30*(1-12/100)
= 0.14 + 0.1575 + 0.264 = 0.5615
Level C: price=70, strength=8 → rr=30/3=10.0, dist=30
quality = 0.35*(10/10) + 0.35*(8/100) + 0.30*(1-30/100)
= 0.35 + 0.028 + 0.21 = 0.588
Expected winner: Level A (quality=0.641)
"""
ticker = Ticker(symbol="DET3S")
scan_session.add(ticker)
await scan_session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
level_a = SRLevel(
ticker_id=ticker.id, price_level=95.0, type="support",
strength=85, detection_method="pivot_point",
)
level_b = SRLevel(
ticker_id=ticker.id, price_level=88.0, type="support",
strength=45, detection_method="pivot_point",
)
level_c = SRLevel(
ticker_id=ticker.id, price_level=70.0, type="support",
strength=8, detection_method="pivot_point",
)
scan_session.add_all([level_a, level_b, level_c])
await scan_session.flush()
setups = await scan_ticker(scan_session, "DET3S", rr_threshold=1.5)
short_setups = [s for s in setups if s.direction == "short"]
assert len(short_setups) == 1, "Expected exactly one short setup"
# Level A (95, strength=85) should win with highest quality
assert short_setups[0].target == pytest.approx(95.0, abs=0.01), (
f"Expected target=95.0 (highest quality), got {short_setups[0].target}"
)

View File

@@ -0,0 +1,259 @@
"""Integration tests for R:R scanner full flow with quality-based target selection.
Verifies the complete scan_ticker pipeline: quality-based S/R level selection,
correct TradeSetup field population, and database persistence.
**Validates: Requirements 2.1, 2.2, 2.3, 2.4, 3.4**
"""
from __future__ import annotations
from datetime import date, datetime, timedelta, timezone
import pytest
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.ohlcv import OHLCVRecord
from app.models.score import CompositeScore
from app.models.sr_level import SRLevel
from app.models.ticker import Ticker
from app.models.trade_setup import TradeSetup
from app.services.rr_scanner_service import scan_ticker, _compute_quality_score
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
async def scan_session() -> AsyncSession:
"""Provide a DB session compatible with scan_ticker (which commits)."""
from tests.conftest import _test_session_factory
async with _test_session_factory() as session:
yield session
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_ohlcv_bars(
ticker_id: int,
num_bars: int = 20,
base_close: float = 100.0,
) -> list[OHLCVRecord]:
"""Generate OHLCV bars closing around base_close with ATR ≈ 2.0."""
bars: list[OHLCVRecord] = []
start = date(2024, 1, 1)
for i in range(num_bars):
close = base_close + (i % 3 - 1) * 0.5 # oscillate ±0.5
bars.append(OHLCVRecord(
ticker_id=ticker_id,
date=start + timedelta(days=i),
open=close - 0.3,
high=close + 1.0,
low=close - 1.0,
close=close,
volume=100_000,
))
return bars
# ===========================================================================
# 8.1 Integration test: full scan_ticker flow with quality-based selection,
# correct TradeSetup fields, and database persistence
# ===========================================================================
@pytest.mark.asyncio
async def test_scan_ticker_full_flow_quality_selection_and_persistence(
scan_session: AsyncSession,
):
"""Integration test for the complete scan_ticker pipeline.
Scenario:
- Entry ≈ 100, ATR ≈ 2.0, risk ≈ 3.0 (atr_multiplier=1.5)
- 3 resistance levels above (long candidates):
A: price=105, strength=90 (strong, near) → highest quality
B: price=115, strength=40 (medium, mid)
C: price=135, strength=5 (weak, far)
- 3 support levels below (short candidates):
D: price=95, strength=85 (strong, near) → highest quality
E: price=85, strength=35 (medium, mid)
F: price=65, strength=8 (weak, far)
- CompositeScore: 72.5
Verifies:
1. Both long and short setups are produced
2. Long target = Level A (highest quality, not most distant)
3. Short target = Level D (highest quality, not most distant)
4. All TradeSetup fields are correct and rounded to 4 decimals
5. rr_ratio is the actual R:R of the selected level
6. Old setups are deleted, new ones persisted
"""
# -- Setup: create ticker --
ticker = Ticker(symbol="INTEG")
scan_session.add(ticker)
await scan_session.flush()
# -- Setup: OHLCV bars (20 bars, close ≈ 100, ATR ≈ 2.0) --
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
# -- Setup: S/R levels --
sr_levels = [
# Long candidates (resistance above entry)
SRLevel(ticker_id=ticker.id, price_level=105.0, type="resistance",
strength=90, detection_method="volume_profile"),
SRLevel(ticker_id=ticker.id, price_level=115.0, type="resistance",
strength=40, detection_method="volume_profile"),
SRLevel(ticker_id=ticker.id, price_level=135.0, type="resistance",
strength=5, detection_method="pivot_point"),
# Short candidates (support below entry)
SRLevel(ticker_id=ticker.id, price_level=95.0, type="support",
strength=85, detection_method="volume_profile"),
SRLevel(ticker_id=ticker.id, price_level=85.0, type="support",
strength=35, detection_method="pivot_point"),
SRLevel(ticker_id=ticker.id, price_level=65.0, type="support",
strength=8, detection_method="volume_profile"),
]
scan_session.add_all(sr_levels)
# -- Setup: CompositeScore --
comp = CompositeScore(
ticker_id=ticker.id,
score=72.5,
is_stale=False,
weights_json="{}",
computed_at=datetime.now(timezone.utc),
)
scan_session.add(comp)
# -- Setup: dummy old setups that should be deleted --
old_setup = TradeSetup(
ticker_id=ticker.id,
direction="long",
entry_price=99.0,
stop_loss=96.0,
target=120.0,
rr_ratio=7.0,
composite_score=50.0,
detected_at=datetime(2024, 1, 1, tzinfo=timezone.utc),
)
scan_session.add(old_setup)
await scan_session.commit()
# Verify old setup exists before scan
pre_result = await scan_session.execute(
select(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
pre_setups = list(pre_result.scalars().all())
assert len(pre_setups) == 1, "Dummy old setup should exist before scan"
# -- Act: run scan_ticker --
setups = await scan_ticker(scan_session, "INTEG", rr_threshold=1.5, atr_multiplier=1.5)
# -- Assert: both directions produced --
assert len(setups) == 2, f"Expected 2 setups (long + short), got {len(setups)}"
long_setups = [s for s in setups if s.direction == "long"]
short_setups = [s for s in setups if s.direction == "short"]
assert len(long_setups) == 1, f"Expected 1 long setup, got {len(long_setups)}"
assert len(short_setups) == 1, f"Expected 1 short setup, got {len(short_setups)}"
long_setup = long_setups[0]
short_setup = short_setups[0]
# -- Assert: long target is Level A (highest quality, not most distant) --
# Level A: price=105 (strong, near) should beat Level C: price=135 (weak, far)
assert long_setup.target == pytest.approx(105.0, abs=0.01), (
f"Long target should be 105.0 (highest quality), got {long_setup.target}"
)
# -- Assert: short target is Level D (highest quality, not most distant) --
# Level D: price=95 (strong, near) should beat Level F: price=65 (weak, far)
assert short_setup.target == pytest.approx(95.0, abs=0.01), (
f"Short target should be 95.0 (highest quality), got {short_setup.target}"
)
# -- Assert: entry_price is the last close (≈ 100) --
# Last bar: index 19, close = 100 + (19 % 3 - 1) * 0.5 = 100 + 0*0.5 = 100.0
expected_entry = 100.0
assert long_setup.entry_price == pytest.approx(expected_entry, abs=0.5)
assert short_setup.entry_price == pytest.approx(expected_entry, abs=0.5)
entry = long_setup.entry_price # actual entry for R:R calculations
# -- Assert: stop_loss values --
# ATR ≈ 2.0, risk = ATR × 1.5 = 3.0
# Long stop = entry - risk, Short stop = entry + risk
risk = long_setup.entry_price - long_setup.stop_loss
assert risk > 0, "Long risk must be positive"
assert short_setup.stop_loss > short_setup.entry_price, "Short stop must be above entry"
# -- Assert: rr_ratio is the actual R:R of the selected level --
long_reward = long_setup.target - long_setup.entry_price
long_expected_rr = round(long_reward / risk, 4)
assert long_setup.rr_ratio == pytest.approx(long_expected_rr, abs=0.01), (
f"Long rr_ratio should be actual R:R={long_expected_rr}, got {long_setup.rr_ratio}"
)
short_risk = short_setup.stop_loss - short_setup.entry_price
short_reward = short_setup.entry_price - short_setup.target
short_expected_rr = round(short_reward / short_risk, 4)
assert short_setup.rr_ratio == pytest.approx(short_expected_rr, abs=0.01), (
f"Short rr_ratio should be actual R:R={short_expected_rr}, got {short_setup.rr_ratio}"
)
# -- Assert: composite_score matches --
assert long_setup.composite_score == pytest.approx(72.5, abs=0.01)
assert short_setup.composite_score == pytest.approx(72.5, abs=0.01)
# -- Assert: ticker_id is correct --
assert long_setup.ticker_id == ticker.id
assert short_setup.ticker_id == ticker.id
# -- Assert: detected_at is set --
assert long_setup.detected_at is not None
assert short_setup.detected_at is not None
# -- Assert: fields are rounded to 4 decimal places --
for setup in [long_setup, short_setup]:
for field_name in ("entry_price", "stop_loss", "target", "rr_ratio", "composite_score"):
val = getattr(setup, field_name)
rounded = round(val, 4)
assert val == pytest.approx(rounded, abs=1e-6), (
f"{setup.direction} {field_name}={val} not rounded to 4 decimals"
)
# -- Assert: database persistence --
# Old dummy setup should be gone, only the 2 new setups should exist
db_result = await scan_session.execute(
select(TradeSetup).where(TradeSetup.ticker_id == ticker.id)
)
persisted = list(db_result.scalars().all())
assert len(persisted) == 2, (
f"Expected 2 persisted setups (old deleted), got {len(persisted)}"
)
persisted_directions = sorted(s.direction for s in persisted)
assert persisted_directions == ["long", "short"], (
f"Expected ['long', 'short'] persisted, got {persisted_directions}"
)
# Verify persisted records match returned setups
persisted_long = [s for s in persisted if s.direction == "long"][0]
persisted_short = [s for s in persisted if s.direction == "short"][0]
assert persisted_long.target == long_setup.target
assert persisted_long.rr_ratio == long_setup.rr_ratio
assert persisted_long.entry_price == long_setup.entry_price
assert persisted_long.stop_loss == long_setup.stop_loss
assert persisted_long.composite_score == long_setup.composite_score
assert persisted_short.target == short_setup.target
assert persisted_short.rr_ratio == short_setup.rr_ratio
assert persisted_short.entry_price == short_setup.entry_price
assert persisted_short.stop_loss == short_setup.stop_loss
assert persisted_short.composite_score == short_setup.composite_score

View File

@@ -0,0 +1,433 @@
"""Preservation tests for R:R scanner target quality bugfix.
Verify that the fix does NOT change behavior for zero-candidate and
single-candidate scenarios, and that get_trade_setups sorting is unchanged.
The fix only changes selection logic when MULTIPLE candidates exist.
Zero-candidate and single-candidate scenarios must produce identical results.
**Validates: Requirements 3.1, 3.2, 3.3, 3.5**
"""
from __future__ import annotations
from datetime import date, datetime, timedelta, timezone
import pytest
from hypothesis import given, settings, HealthCheck, strategies as st
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.ohlcv import OHLCVRecord
from app.models.sr_level import SRLevel
from app.models.ticker import Ticker
from app.models.trade_setup import TradeSetup
from app.models.score import CompositeScore
from app.services.rr_scanner_service import scan_ticker, get_trade_setups
# ---------------------------------------------------------------------------
# Session fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
async def scan_session() -> AsyncSession:
"""Provide a DB session compatible with scan_ticker (which commits)."""
from tests.conftest import _test_session_factory
async with _test_session_factory() as session:
yield session
@pytest.fixture
async def db_session() -> AsyncSession:
"""Provide a transactional DB session for get_trade_setups tests."""
from tests.conftest import _test_session_factory
async with _test_session_factory() as session:
async with session.begin():
yield session
await session.rollback()
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_ohlcv_bars(
ticker_id: int,
num_bars: int = 20,
base_close: float = 100.0,
) -> list[OHLCVRecord]:
"""Generate OHLCV bars closing around base_close with ATR ≈ 2.0."""
bars: list[OHLCVRecord] = []
start = date(2024, 1, 1)
for i in range(num_bars):
close = base_close + (i % 3 - 1) * 0.5 # oscillate ±0.5
bars.append(OHLCVRecord(
ticker_id=ticker_id,
date=start + timedelta(days=i),
open=close - 0.3,
high=close + 1.0,
low=close - 1.0,
close=close,
volume=100_000,
))
return bars
# ===========================================================================
# 7.1 [PBT-preservation] Property test: zero-candidate and single-candidate
# scenarios produce the same output as the original code.
# ===========================================================================
@st.composite
def zero_candidate_scenario(draw: st.DrawFn) -> dict:
"""Generate a scenario where no S/R levels qualify as candidates.
Variants:
- No SR levels at all
- All levels below entry (no long targets) and all above entry (no short targets)
but all below the R:R threshold for their respective directions
- Levels in the right direction but below R:R threshold
Note: scan_ticker does NOT filter by SR level type — it only checks whether
the price_level is above or below entry. So "wrong side" means all levels
are clustered near entry and below threshold in both directions.
"""
variant = draw(st.sampled_from(["no_levels", "below_threshold"]))
if variant == "no_levels":
return {"variant": variant, "levels": []}
else: # below_threshold
# All levels close to entry so R:R < 1.5 with risk ≈ 3
# For longs: reward < 4.5 → price < 104.5
# For shorts: reward < 4.5 → price > 95.5
# Place all levels in the 96104 band (below threshold both ways)
num = draw(st.integers(min_value=1, max_value=3))
levels = []
for _ in range(num):
price = draw(st.floats(min_value=100.5, max_value=103.5))
levels.append({
"price": price,
"type": draw(st.sampled_from(["resistance", "support"])),
"strength": draw(st.integers(min_value=10, max_value=100)),
})
# Also add some below entry but still below threshold
for _ in range(draw(st.integers(min_value=0, max_value=2))):
price = draw(st.floats(min_value=96.5, max_value=99.5))
levels.append({
"price": price,
"type": draw(st.sampled_from(["resistance", "support"])),
"strength": draw(st.integers(min_value=10, max_value=100)),
})
return {"variant": variant, "levels": levels}
@st.composite
def single_candidate_scenario(draw: st.DrawFn) -> dict:
"""Generate a scenario with exactly one S/R level that meets the R:R threshold.
For longs: one resistance above entry with R:R >= 1.5 (price >= 104.5 with risk ≈ 3).
"""
direction = draw(st.sampled_from(["long", "short"]))
if direction == "long":
# Single resistance above entry meeting threshold
price = draw(st.floats(min_value=105.0, max_value=150.0))
strength = draw(st.integers(min_value=1, max_value=100))
return {
"direction": direction,
"level": {"price": price, "type": "resistance", "strength": strength},
}
else:
# Single support below entry meeting threshold
price = draw(st.floats(min_value=50.0, max_value=95.0))
strength = draw(st.integers(min_value=1, max_value=100))
return {
"direction": direction,
"level": {"price": price, "type": "support", "strength": strength},
}
@pytest.mark.asyncio
@given(scenario=zero_candidate_scenario())
@settings(
max_examples=15,
deadline=None,
suppress_health_check=[HealthCheck.function_scoped_fixture],
)
async def test_property_zero_candidates_produce_no_setup(
scenario: dict,
scan_session: AsyncSession,
):
"""**Validates: Requirements 3.1, 3.2**
Property: when zero candidate S/R levels exist (no levels, wrong side,
or below threshold), scan_ticker produces no setup — unchanged from
original behavior.
"""
from tests.conftest import _test_engine, _test_session_factory
from app.database import Base
async with _test_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with _test_session_factory() as session:
ticker = Ticker(symbol="PRSV0")
session.add(ticker)
await session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
session.add_all(bars)
for lv_data in scenario.get("levels", []):
session.add(SRLevel(
ticker_id=ticker.id,
price_level=lv_data["price"],
type=lv_data["type"],
strength=lv_data["strength"],
detection_method="volume_profile",
))
await session.commit()
setups = await scan_ticker(session, "PRSV0", rr_threshold=1.5)
assert setups == [], (
f"Expected no setups for zero-candidate scenario "
f"(variant={scenario.get('variant', 'unknown')}), got {len(setups)}"
)
@pytest.mark.asyncio
@given(scenario=single_candidate_scenario())
@settings(
max_examples=15,
deadline=None,
suppress_health_check=[HealthCheck.function_scoped_fixture],
)
async def test_property_single_candidate_selected_unchanged(
scenario: dict,
scan_session: AsyncSession,
):
"""**Validates: Requirements 3.3**
Property: when exactly one candidate S/R level meets the R:R threshold,
scan_ticker selects it — same as the original code would.
"""
from tests.conftest import _test_engine, _test_session_factory
from app.database import Base
async with _test_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async with _test_session_factory() as session:
ticker = Ticker(symbol="PRSV1")
session.add(ticker)
await session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
session.add_all(bars)
lv = scenario["level"]
session.add(SRLevel(
ticker_id=ticker.id,
price_level=lv["price"],
type=lv["type"],
strength=lv["strength"],
detection_method="volume_profile",
))
await session.commit()
setups = await scan_ticker(session, "PRSV1", rr_threshold=1.5)
direction = scenario["direction"]
dir_setups = [s for s in setups if s.direction == direction]
assert len(dir_setups) == 1, (
f"Expected exactly one {direction} setup for single candidate, "
f"got {len(dir_setups)}"
)
selected_target = dir_setups[0].target
expected_target = round(lv["price"], 4)
assert selected_target == pytest.approx(expected_target, abs=0.01), (
f"Single candidate: expected target={expected_target}, "
f"got {selected_target}"
)
# ===========================================================================
# 7.2 Unit test: no S/R levels → no setup produced
# ===========================================================================
@pytest.mark.asyncio
async def test_no_sr_levels_produces_no_setup(scan_session: AsyncSession):
"""**Validates: Requirements 3.1**
When a ticker has OHLCV data but no S/R levels at all,
scan_ticker should return an empty list.
"""
ticker = Ticker(symbol="NOSRL")
scan_session.add(ticker)
await scan_session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
await scan_session.flush()
setups = await scan_ticker(scan_session, "NOSRL", rr_threshold=1.5)
assert setups == [], (
f"Expected no setups when no SR levels exist, got {len(setups)}"
)
# ===========================================================================
# 7.3 Unit test: single candidate meets threshold → selected
# ===========================================================================
@pytest.mark.asyncio
async def test_single_resistance_above_threshold_selected(scan_session: AsyncSession):
"""**Validates: Requirements 3.3**
When exactly one resistance level above entry meets the R:R threshold,
it should be selected as the long setup target.
Entry ≈ 100, ATR ≈ 2, risk ≈ 3. Resistance at 110 → R:R ≈ 3.33 (>= 1.5).
"""
ticker = Ticker(symbol="SINGL")
scan_session.add(ticker)
await scan_session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
level = SRLevel(
ticker_id=ticker.id,
price_level=110.0,
type="resistance",
strength=60,
detection_method="volume_profile",
)
scan_session.add(level)
await scan_session.flush()
setups = await scan_ticker(scan_session, "SINGL", rr_threshold=1.5)
long_setups = [s for s in setups if s.direction == "long"]
assert len(long_setups) == 1, (
f"Expected exactly one long setup, got {len(long_setups)}"
)
assert long_setups[0].target == pytest.approx(110.0, abs=0.01), (
f"Expected target=110.0, got {long_setups[0].target}"
)
@pytest.mark.asyncio
async def test_single_support_below_threshold_selected(scan_session: AsyncSession):
"""**Validates: Requirements 3.3**
When exactly one support level below entry meets the R:R threshold,
it should be selected as the short setup target.
Entry ≈ 100, ATR ≈ 2, risk ≈ 3. Support at 90 → R:R ≈ 3.33 (>= 1.5).
"""
ticker = Ticker(symbol="SINGS")
scan_session.add(ticker)
await scan_session.flush()
bars = _make_ohlcv_bars(ticker.id, num_bars=20, base_close=100.0)
scan_session.add_all(bars)
level = SRLevel(
ticker_id=ticker.id,
price_level=90.0,
type="support",
strength=55,
detection_method="pivot_point",
)
scan_session.add(level)
await scan_session.flush()
setups = await scan_ticker(scan_session, "SINGS", rr_threshold=1.5)
short_setups = [s for s in setups if s.direction == "short"]
assert len(short_setups) == 1, (
f"Expected exactly one short setup, got {len(short_setups)}"
)
assert short_setups[0].target == pytest.approx(90.0, abs=0.01), (
f"Expected target=90.0, got {short_setups[0].target}"
)
# ===========================================================================
# 7.4 Unit test: get_trade_setups sorting is unchanged (R:R desc, composite desc)
# ===========================================================================
@pytest.mark.asyncio
async def test_get_trade_setups_sorting_rr_desc_composite_desc(db_session: AsyncSession):
"""**Validates: Requirements 3.5**
get_trade_setups must return results sorted by rr_ratio descending,
with composite_score descending as the secondary sort key.
"""
now = datetime.now(timezone.utc)
# Create tickers for each setup
ticker_a = Ticker(symbol="SORTA")
ticker_b = Ticker(symbol="SORTB")
ticker_c = Ticker(symbol="SORTC")
ticker_d = Ticker(symbol="SORTD")
db_session.add_all([ticker_a, ticker_b, ticker_c, ticker_d])
await db_session.flush()
# Create setups with different rr_ratio and composite_score values
# Expected order: D (rr=5.0), C (rr=3.0, comp=80), B (rr=3.0, comp=50), A (rr=1.5)
setup_a = TradeSetup(
ticker_id=ticker_a.id, direction="long",
entry_price=100.0, stop_loss=97.0, target=104.5,
rr_ratio=1.5, composite_score=90.0, detected_at=now,
)
setup_b = TradeSetup(
ticker_id=ticker_b.id, direction="long",
entry_price=100.0, stop_loss=97.0, target=109.0,
rr_ratio=3.0, composite_score=50.0, detected_at=now,
)
setup_c = TradeSetup(
ticker_id=ticker_c.id, direction="short",
entry_price=100.0, stop_loss=103.0, target=91.0,
rr_ratio=3.0, composite_score=80.0, detected_at=now,
)
setup_d = TradeSetup(
ticker_id=ticker_d.id, direction="long",
entry_price=100.0, stop_loss=97.0, target=115.0,
rr_ratio=5.0, composite_score=30.0, detected_at=now,
)
db_session.add_all([setup_a, setup_b, setup_c, setup_d])
await db_session.flush()
results = await get_trade_setups(db_session)
assert len(results) == 4, f"Expected 4 setups, got {len(results)}"
# Verify ordering: rr_ratio desc, then composite_score desc
rr_values = [r["rr_ratio"] for r in results]
assert rr_values == [5.0, 3.0, 3.0, 1.5], (
f"Expected rr_ratio order [5.0, 3.0, 3.0, 1.5], got {rr_values}"
)
# For the two setups with rr_ratio=3.0, composite_score should be desc
tied_composites = [r["composite_score"] for r in results if r["rr_ratio"] == 3.0]
assert tied_composites == [80.0, 50.0], (
f"Expected composite_score order [80.0, 50.0] for tied R:R, "
f"got {tied_composites}"
)
# Verify symbols match expected order
symbols = [r["symbol"] for r in results]
assert symbols == ["SORTD", "SORTC", "SORTB", "SORTA"], (
f"Expected symbol order ['SORTD', 'SORTC', 'SORTB', 'SORTA'], "
f"got {symbols}"
)

View File

@@ -0,0 +1,159 @@
"""Unit tests for _compute_quality_score in rr_scanner_service."""
import pytest
from app.services.rr_scanner_service import _compute_quality_score
# ---------------------------------------------------------------------------
# 4.1 — Known inputs with hand-computed expected outputs
# ---------------------------------------------------------------------------
class TestKnownInputs:
def test_typical_candidate(self):
# rr=5, strength=80, distance=3, entry_price=100
# norm_rr = 5/10 = 0.5
# norm_strength = 80/100 = 0.8
# norm_proximity = 1 - 3/100 = 0.97
# score = 0.35*0.5 + 0.35*0.8 + 0.30*0.97 = 0.175 + 0.28 + 0.291 = 0.746
result = _compute_quality_score(rr=5.0, strength=80, distance=3.0, entry_price=100.0)
assert result == pytest.approx(0.746, abs=1e-9)
def test_weak_distant_candidate(self):
# rr=8, strength=10, distance=50, entry_price=100
# norm_rr = 8/10 = 0.8
# norm_strength = 10/100 = 0.1
# norm_proximity = 1 - 50/100 = 0.5
# score = 0.35*0.8 + 0.35*0.1 + 0.30*0.5 = 0.28 + 0.035 + 0.15 = 0.465
result = _compute_quality_score(rr=8.0, strength=10, distance=50.0, entry_price=100.0)
assert result == pytest.approx(0.465, abs=1e-9)
def test_strong_near_candidate(self):
# rr=2, strength=95, distance=5, entry_price=200
# norm_rr = 2/10 = 0.2
# norm_strength = 95/100 = 0.95
# norm_proximity = 1 - 5/200 = 0.975
# score = 0.35*0.2 + 0.35*0.95 + 0.30*0.975 = 0.07 + 0.3325 + 0.2925 = 0.695
result = _compute_quality_score(rr=2.0, strength=95, distance=5.0, entry_price=200.0)
assert result == pytest.approx(0.695, abs=1e-9)
def test_custom_weights(self):
# rr=4, strength=50, distance=10, entry_price=100
# norm_rr = 4/10 = 0.4, norm_strength = 0.5, norm_proximity = 1 - 10/100 = 0.9
# With w_rr=0.5, w_strength=0.3, w_proximity=0.2:
# score = 0.5*0.4 + 0.3*0.5 + 0.2*0.9 = 0.2 + 0.15 + 0.18 = 0.53
result = _compute_quality_score(
rr=4.0, strength=50, distance=10.0, entry_price=100.0,
w_rr=0.5, w_strength=0.3, w_proximity=0.2,
)
assert result == pytest.approx(0.53, abs=1e-9)
def test_custom_rr_cap(self):
# rr=3, strength=60, distance=8, entry_price=100, rr_cap=5
# norm_rr = 3/5 = 0.6
# norm_strength = 60/100 = 0.6
# norm_proximity = 1 - 8/100 = 0.92
# score = 0.35*0.6 + 0.35*0.6 + 0.30*0.92 = 0.21 + 0.21 + 0.276 = 0.696
result = _compute_quality_score(
rr=3.0, strength=60, distance=8.0, entry_price=100.0, rr_cap=5.0,
)
assert result == pytest.approx(0.696, abs=1e-9)
# ---------------------------------------------------------------------------
# 4.2 — Edge cases
# ---------------------------------------------------------------------------
class TestEdgeCases:
def test_strength_zero(self):
# strength=0 → norm_strength=0
# rr=5, distance=10, entry_price=100
# norm_rr=0.5, norm_strength=0.0, norm_proximity=0.9
# score = 0.35*0.5 + 0.35*0.0 + 0.30*0.9 = 0.175 + 0.0 + 0.27 = 0.445
result = _compute_quality_score(rr=5.0, strength=0, distance=10.0, entry_price=100.0)
assert result == pytest.approx(0.445, abs=1e-9)
def test_strength_100(self):
# strength=100 → norm_strength=1.0
# rr=5, distance=10, entry_price=100
# norm_rr=0.5, norm_strength=1.0, norm_proximity=0.9
# score = 0.35*0.5 + 0.35*1.0 + 0.30*0.9 = 0.175 + 0.35 + 0.27 = 0.795
result = _compute_quality_score(rr=5.0, strength=100, distance=10.0, entry_price=100.0)
assert result == pytest.approx(0.795, abs=1e-9)
def test_distance_zero(self):
# distance=0 → norm_proximity = 1 - 0/100 = 1.0
# rr=5, strength=50, entry_price=100
# norm_rr=0.5, norm_strength=0.5, norm_proximity=1.0
# score = 0.35*0.5 + 0.35*0.5 + 0.30*1.0 = 0.175 + 0.175 + 0.3 = 0.65
result = _compute_quality_score(rr=5.0, strength=50, distance=0.0, entry_price=100.0)
assert result == pytest.approx(0.65, abs=1e-9)
def test_rr_at_cap(self):
# rr=10 (== rr_cap) → norm_rr = min(10/10, 1.0) = 1.0
# strength=50, distance=10, entry_price=100
# norm_strength=0.5, norm_proximity=0.9
# score = 0.35*1.0 + 0.35*0.5 + 0.30*0.9 = 0.35 + 0.175 + 0.27 = 0.795
result = _compute_quality_score(rr=10.0, strength=50, distance=10.0, entry_price=100.0)
assert result == pytest.approx(0.795, abs=1e-9)
def test_rr_above_cap(self):
# rr=15 (> rr_cap=10) → norm_rr = min(15/10, 1.0) = 1.0 (capped)
# Same result as rr_at_cap since norm_rr is capped at 1.0
result = _compute_quality_score(rr=15.0, strength=50, distance=10.0, entry_price=100.0)
assert result == pytest.approx(0.795, abs=1e-9)
def test_rr_above_cap_equals_rr_at_cap(self):
# Explicitly verify capping: rr=15 and rr=10 produce the same score
at_cap = _compute_quality_score(rr=10.0, strength=50, distance=10.0, entry_price=100.0)
above_cap = _compute_quality_score(rr=15.0, strength=50, distance=10.0, entry_price=100.0)
assert at_cap == pytest.approx(above_cap, abs=1e-9)
# ---------------------------------------------------------------------------
# 4.3 — Normalized components stay in 01 range
# ---------------------------------------------------------------------------
class TestNormalizedComponentsRange:
"""Verify that each normalized component stays within [0, 1]."""
@pytest.mark.parametrize("rr, rr_cap", [
(0.0, 10.0),
(5.0, 10.0),
(10.0, 10.0),
(15.0, 10.0),
(100.0, 10.0),
(3.0, 5.0),
(7.0, 5.0),
])
def test_norm_rr_in_range(self, rr, rr_cap):
norm_rr = min(rr / rr_cap, 1.0)
assert 0.0 <= norm_rr <= 1.0
@pytest.mark.parametrize("strength", [0, 1, 50, 99, 100])
def test_norm_strength_in_range(self, strength):
norm_strength = strength / 100.0
assert 0.0 <= norm_strength <= 1.0
@pytest.mark.parametrize("distance, entry_price", [
(0.0, 100.0),
(1.0, 100.0),
(50.0, 100.0),
(100.0, 100.0),
(200.0, 100.0), # distance > entry_price → clamped
(0.5, 50.0),
])
def test_norm_proximity_in_range(self, distance, entry_price):
norm_proximity = 1.0 - min(distance / entry_price, 1.0)
assert 0.0 <= norm_proximity <= 1.0
@pytest.mark.parametrize("rr, strength, distance, entry_price", [
(0.0, 0, 0.0, 100.0), # all minimums
(10.0, 100, 0.0, 100.0), # all maximums
(15.0, 100, 200.0, 100.0), # rr above cap, distance > entry
(5.0, 50, 50.0, 100.0), # mid-range values
(1.0, 1, 99.0, 100.0), # near-minimum non-zero
])
def test_total_score_in_zero_one(self, rr, strength, distance, entry_price):
score = _compute_quality_score(rr=rr, strength=strength, distance=distance, entry_price=entry_price)
assert 0.0 <= score <= 1.0

View File

@@ -0,0 +1,205 @@
"""Unit tests for get_score composite breakdown and dimension breakdown wiring."""
from __future__ import annotations
from datetime import date
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch
import pytest
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from app.database import Base
from app.models.ticker import Ticker
from app.services.scoring_service import get_score, _DIMENSION_COMPUTERS
TEST_DATABASE_URL = "sqlite+aiosqlite://"
@pytest.fixture
async def fresh_db():
"""Provide a non-transactional session so get_score can commit."""
engine = create_async_engine(TEST_DATABASE_URL, echo=False)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
async with session_factory() as session:
yield session
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await engine.dispose()
def _make_ohlcv_records(n: int, base_close: float = 100.0) -> list:
"""Create n mock OHLCV records with realistic price data."""
records = []
for i in range(n):
price = base_close + (i * 0.5)
records.append(
SimpleNamespace(
date=date(2024, 1, 1),
open=price - 0.5,
high=price + 1.0,
low=price - 1.0,
close=price,
volume=1000000,
)
)
return records
def _mock_none_computer():
"""Return an AsyncMock that returns (None, None) — simulates missing dimension data."""
return AsyncMock(return_value=(None, None))
def _mock_score_computer(score: float, breakdown: dict | None = None):
"""Return an AsyncMock that returns a fixed (score, breakdown) tuple."""
bd = breakdown or {
"sub_scores": [{"name": "mock", "score": score, "weight": 1.0, "raw_value": score, "description": "mock"}],
"formula": "mock formula",
"unavailable": [],
}
return AsyncMock(return_value=(score, bd))
async def _seed_ticker(session: AsyncSession, symbol: str = "AAPL") -> Ticker:
"""Insert a ticker row and return it."""
ticker = Ticker(symbol=symbol)
session.add(ticker)
await session.commit()
return ticker
@pytest.mark.asyncio
async def test_get_score_returns_composite_breakdown(fresh_db):
"""get_score should include a composite_breakdown dict with weights and re-normalization info."""
await _seed_ticker(fresh_db, "AAPL")
original = dict(_DIMENSION_COMPUTERS)
try:
_DIMENSION_COMPUTERS["technical"] = _mock_score_computer(70.0)
_DIMENSION_COMPUTERS["momentum"] = _mock_score_computer(60.0)
_DIMENSION_COMPUTERS["sentiment"] = _mock_none_computer()
_DIMENSION_COMPUTERS["fundamental"] = _mock_none_computer()
_DIMENSION_COMPUTERS["sr_quality"] = _mock_none_computer()
result = await get_score(fresh_db, "AAPL")
finally:
_DIMENSION_COMPUTERS.update(original)
assert "composite_breakdown" in result
cb = result["composite_breakdown"]
assert cb is not None
assert "weights" in cb
assert "available_dimensions" in cb
assert "missing_dimensions" in cb
assert "renormalized_weights" in cb
assert "formula" in cb
@pytest.mark.asyncio
async def test_get_score_composite_breakdown_has_correct_available_missing(fresh_db):
"""Composite breakdown should correctly list available and missing dimensions."""
await _seed_ticker(fresh_db, "AAPL")
original = dict(_DIMENSION_COMPUTERS)
try:
_DIMENSION_COMPUTERS["technical"] = _mock_score_computer(70.0)
_DIMENSION_COMPUTERS["momentum"] = _mock_score_computer(60.0)
_DIMENSION_COMPUTERS["sentiment"] = _mock_none_computer()
_DIMENSION_COMPUTERS["fundamental"] = _mock_none_computer()
_DIMENSION_COMPUTERS["sr_quality"] = _mock_none_computer()
result = await get_score(fresh_db, "AAPL")
finally:
_DIMENSION_COMPUTERS.update(original)
cb = result["composite_breakdown"]
assert "technical" in cb["available_dimensions"]
assert "momentum" in cb["available_dimensions"]
assert "sentiment" in cb["missing_dimensions"]
assert "fundamental" in cb["missing_dimensions"]
assert "sr_quality" in cb["missing_dimensions"]
@pytest.mark.asyncio
async def test_get_score_renormalized_weights_sum_to_one(fresh_db):
"""Re-normalized weights should sum to 1.0 when at least one dimension is available."""
await _seed_ticker(fresh_db, "AAPL")
original = dict(_DIMENSION_COMPUTERS)
try:
_DIMENSION_COMPUTERS["technical"] = _mock_score_computer(70.0)
_DIMENSION_COMPUTERS["momentum"] = _mock_score_computer(60.0)
_DIMENSION_COMPUTERS["sentiment"] = _mock_none_computer()
_DIMENSION_COMPUTERS["fundamental"] = _mock_none_computer()
_DIMENSION_COMPUTERS["sr_quality"] = _mock_none_computer()
result = await get_score(fresh_db, "AAPL")
finally:
_DIMENSION_COMPUTERS.update(original)
cb = result["composite_breakdown"]
assert cb["renormalized_weights"]
total = sum(cb["renormalized_weights"].values())
assert abs(total - 1.0) < 1e-9
@pytest.mark.asyncio
async def test_get_score_dimensions_include_breakdowns(fresh_db):
"""Each available dimension entry should include a breakdown dict."""
await _seed_ticker(fresh_db, "AAPL")
tech_breakdown = {
"sub_scores": [
{"name": "ADX", "score": 72.0, "weight": 0.4, "raw_value": 72.0, "description": "ADX value"},
{"name": "EMA", "score": 65.0, "weight": 0.3, "raw_value": 1.5, "description": "EMA diff"},
{"name": "RSI", "score": 62.0, "weight": 0.3, "raw_value": 62.0, "description": "RSI value"},
],
"formula": "Weighted average: 0.4*ADX + 0.3*EMA + 0.3*RSI",
"unavailable": [],
}
original = dict(_DIMENSION_COMPUTERS)
try:
_DIMENSION_COMPUTERS["technical"] = _mock_score_computer(68.2, tech_breakdown)
_DIMENSION_COMPUTERS["momentum"] = _mock_score_computer(55.0)
_DIMENSION_COMPUTERS["sentiment"] = _mock_none_computer()
_DIMENSION_COMPUTERS["fundamental"] = _mock_none_computer()
_DIMENSION_COMPUTERS["sr_quality"] = _mock_none_computer()
result = await get_score(fresh_db, "AAPL")
finally:
_DIMENSION_COMPUTERS.update(original)
tech_dim = next((d for d in result["dimensions"] if d["dimension"] == "technical"), None)
assert tech_dim is not None
assert "breakdown" in tech_dim
assert tech_dim["breakdown"] is not None
assert len(tech_dim["breakdown"]["sub_scores"]) == 3
names = [s["name"] for s in tech_dim["breakdown"]["sub_scores"]]
assert "ADX" in names
assert "EMA" in names
assert "RSI" in names
@pytest.mark.asyncio
async def test_get_score_all_dimensions_missing(fresh_db):
"""When all dimensions return None, composite_breakdown should list all as missing."""
await _seed_ticker(fresh_db, "AAPL")
original = dict(_DIMENSION_COMPUTERS)
try:
for dim in _DIMENSION_COMPUTERS:
_DIMENSION_COMPUTERS[dim] = _mock_none_computer()
result = await get_score(fresh_db, "AAPL")
finally:
_DIMENSION_COMPUTERS.update(original)
cb = result["composite_breakdown"]
assert cb["available_dimensions"] == []
assert len(cb["missing_dimensions"]) == 5
assert cb["renormalized_weights"] == {}
assert result["composite_score"] is None

View File

@@ -0,0 +1,150 @@
"""Unit tests for _compute_momentum_score breakdown refactor."""
from __future__ import annotations
from datetime import date
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch
import pytest
from app.services.scoring_service import _compute_momentum_score
def _make_ohlcv_records(n: int, base_close: float = 100.0) -> list:
"""Create n mock OHLCV records with incrementing close prices."""
records = []
for i in range(n):
price = base_close + (i * 0.5)
records.append(
SimpleNamespace(
date=date(2024, 1, 1),
open=price - 0.5,
high=price + 1.0,
low=price - 1.0,
close=price,
volume=1000000,
)
)
return records
@pytest.mark.asyncio
async def test_returns_none_none_when_no_records(db_session):
"""When no OHLCV data exists, returns (None, None)."""
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=[],
):
score, breakdown = await _compute_momentum_score(db_session, "AAPL")
assert score is None
assert breakdown is None
@pytest.mark.asyncio
async def test_returns_none_none_when_fewer_than_6_records(db_session):
"""With fewer than 6 records, returns (None, None)."""
records = _make_ohlcv_records(5)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
score, breakdown = await _compute_momentum_score(db_session, "AAPL")
assert score is None
assert breakdown is None
@pytest.mark.asyncio
async def test_returns_only_5day_roc_when_fewer_than_21_records(db_session):
"""With 6-20 records, returns only 5-day ROC sub-score; 20-day ROC is unavailable."""
records = _make_ohlcv_records(10)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
score, breakdown = await _compute_momentum_score(db_session, "AAPL")
assert score is not None
assert 0.0 <= score <= 100.0
assert breakdown is not None
names = [s["name"] for s in breakdown["sub_scores"]]
assert "5-day ROC" in names
assert "20-day ROC" not in names
# 20-day ROC should be unavailable
unavail_names = [u["name"] for u in breakdown["unavailable"]]
assert "20-day ROC" in unavail_names
@pytest.mark.asyncio
async def test_returns_both_sub_scores_with_enough_data(db_session):
"""With 21+ records, returns both 5-day and 20-day ROC sub-scores."""
records = _make_ohlcv_records(30)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
score, breakdown = await _compute_momentum_score(db_session, "AAPL")
assert score is not None
assert 0.0 <= score <= 100.0
assert breakdown is not None
names = [s["name"] for s in breakdown["sub_scores"]]
assert "5-day ROC" in names
assert "20-day ROC" in names
# Verify weights
weight_map = {s["name"]: s["weight"] for s in breakdown["sub_scores"]}
assert weight_map["5-day ROC"] == 0.5
assert weight_map["20-day ROC"] == 0.5
# Verify raw_value is present and numeric
for sub in breakdown["sub_scores"]:
assert sub["raw_value"] is not None
assert isinstance(sub["raw_value"], (int, float))
assert sub["description"]
assert breakdown["unavailable"] == []
@pytest.mark.asyncio
async def test_formula_string_present(db_session):
"""Breakdown always includes the formula description."""
records = _make_ohlcv_records(30)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
_, breakdown = await _compute_momentum_score(db_session, "AAPL")
assert "formula" in breakdown
assert "ROC_5" in breakdown["formula"]
assert "ROC_20" in breakdown["formula"]
@pytest.mark.asyncio
async def test_raw_values_are_roc_percentages(db_session):
"""Raw values should be ROC percentages matching the actual price change."""
records = _make_ohlcv_records(30, base_close=100.0)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
_, breakdown = await _compute_momentum_score(db_session, "AAPL")
closes = [100.0 + i * 0.5 for i in range(30)]
latest = closes[-1]
expected_roc_5 = (latest - closes[-6]) / closes[-6] * 100.0
expected_roc_20 = (latest - closes[-21]) / closes[-21] * 100.0
roc_map = {s["name"]: s["raw_value"] for s in breakdown["sub_scores"]}
assert abs(roc_map["5-day ROC"] - round(expected_roc_5, 4)) < 1e-6
assert abs(roc_map["20-day ROC"] - round(expected_roc_20, 4)) < 1e-6

View File

@@ -0,0 +1,138 @@
"""Unit tests for _compute_sentiment_score breakdown refactor."""
from __future__ import annotations
from datetime import datetime, timezone
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch
import pytest
from app.services.scoring_service import _compute_sentiment_score
def _make_sentiment_records(n: int, classification: str = "bullish", confidence: int = 80) -> list:
"""Create n mock sentiment records with recent timestamps."""
now = datetime.now(timezone.utc)
records = []
for i in range(n):
records.append(
SimpleNamespace(
classification=classification,
confidence=confidence,
source="test",
timestamp=now,
reasoning="",
citations_json="[]",
)
)
return records
@pytest.mark.asyncio
async def test_returns_none_with_breakdown_when_no_records(db_session):
"""When no sentiment records exist, returns (None, breakdown) with unavailable entry."""
with patch(
"app.services.sentiment_service.get_sentiment_scores",
new_callable=AsyncMock,
return_value=[],
):
score, breakdown = await _compute_sentiment_score(db_session, "AAPL")
assert score is None
assert breakdown is not None
assert breakdown["sub_scores"] == []
assert len(breakdown["unavailable"]) == 1
assert breakdown["unavailable"][0]["name"] == "sentiment_records"
assert "formula" in breakdown
@pytest.mark.asyncio
async def test_returns_none_none_when_get_scores_raises(db_session):
"""When get_sentiment_scores raises, returns (None, None)."""
with patch(
"app.services.sentiment_service.get_sentiment_scores",
new_callable=AsyncMock,
side_effect=Exception("DB error"),
):
score, breakdown = await _compute_sentiment_score(db_session, "AAPL")
assert score is None
assert breakdown is None
@pytest.mark.asyncio
async def test_returns_breakdown_with_sub_scores(db_session):
"""With sentiment records, returns score and breakdown with expected sub-scores."""
records = _make_sentiment_records(3)
with patch(
"app.services.sentiment_service.get_sentiment_scores",
new_callable=AsyncMock,
return_value=records,
), patch(
"app.services.sentiment_service.compute_sentiment_dimension_score",
new_callable=AsyncMock,
return_value=75.0,
):
score, breakdown = await _compute_sentiment_score(db_session, "AAPL")
assert score == 75.0
assert breakdown is not None
assert "sub_scores" in breakdown
assert "formula" in breakdown
assert "unavailable" in breakdown
names = [s["name"] for s in breakdown["sub_scores"]]
assert "record_count" in names
assert "decay_rate" in names
assert "lookback_window" in names
# Verify raw values
raw_map = {s["name"]: s["raw_value"] for s in breakdown["sub_scores"]}
assert raw_map["record_count"] == 3
assert raw_map["decay_rate"] == 0.1
assert raw_map["lookback_window"] == 24
assert breakdown["unavailable"] == []
@pytest.mark.asyncio
async def test_formula_contains_decay_info(db_session):
"""Breakdown formula describes the time-decay weighted average."""
records = _make_sentiment_records(2)
with patch(
"app.services.sentiment_service.get_sentiment_scores",
new_callable=AsyncMock,
return_value=records,
), patch(
"app.services.sentiment_service.compute_sentiment_dimension_score",
new_callable=AsyncMock,
return_value=60.0,
):
_, breakdown = await _compute_sentiment_score(db_session, "AAPL")
assert "Time-decay" in breakdown["formula"]
assert "decay_rate" in breakdown["formula"]
assert "24" in breakdown["formula"]
@pytest.mark.asyncio
async def test_sub_scores_have_descriptions(db_session):
"""Each sub-score has a non-empty description."""
records = _make_sentiment_records(1)
with patch(
"app.services.sentiment_service.get_sentiment_scores",
new_callable=AsyncMock,
return_value=records,
), patch(
"app.services.sentiment_service.compute_sentiment_dimension_score",
new_callable=AsyncMock,
return_value=50.0,
):
_, breakdown = await _compute_sentiment_score(db_session, "AAPL")
for sub in breakdown["sub_scores"]:
assert sub["description"], f"Sub-score {sub['name']} missing description"

View File

@@ -0,0 +1,151 @@
"""Unit tests for _compute_technical_score breakdown refactor."""
from __future__ import annotations
from datetime import date
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch
import pytest
from app.services.scoring_service import _compute_technical_score
def _make_ohlcv_records(n: int, base_close: float = 100.0) -> list:
"""Create n mock OHLCV records with realistic price data."""
records = []
for i in range(n):
price = base_close + (i * 0.5)
records.append(
SimpleNamespace(
date=date(2024, 1, 1),
open=price - 0.5,
high=price + 1.0,
low=price - 1.0,
close=price,
volume=1000000,
)
)
return records
@pytest.mark.asyncio
async def test_returns_none_tuple_when_no_records(db_session):
"""When no OHLCV data exists, returns (None, None)."""
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=[],
):
score, breakdown = await _compute_technical_score(db_session, "AAPL")
assert score is None
assert breakdown is None
@pytest.mark.asyncio
async def test_returns_breakdown_with_all_sub_scores(db_session):
"""With enough data, returns score and breakdown with ADX, EMA, RSI sub-scores."""
records = _make_ohlcv_records(50)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
score, breakdown = await _compute_technical_score(db_session, "AAPL")
assert score is not None
assert 0.0 <= score <= 100.0
assert breakdown is not None
assert "sub_scores" in breakdown
assert "formula" in breakdown
assert "unavailable" in breakdown
names = [s["name"] for s in breakdown["sub_scores"]]
assert "ADX" in names
assert "EMA" in names
assert "RSI" in names
# Verify weights
weight_map = {s["name"]: s["weight"] for s in breakdown["sub_scores"]}
assert weight_map["ADX"] == 0.4
assert weight_map["EMA"] == 0.3
assert weight_map["RSI"] == 0.3
# Verify raw_value is present and numeric
for sub in breakdown["sub_scores"]:
assert sub["raw_value"] is not None
assert isinstance(sub["raw_value"], (int, float))
assert sub["description"]
assert breakdown["unavailable"] == []
@pytest.mark.asyncio
async def test_partial_sub_scores_with_insufficient_data(db_session):
"""With limited data (enough for EMA/RSI but not ADX), returns partial breakdown."""
# 22 bars: enough for EMA(20) and RSI(14) but not ADX (needs 28)
records = _make_ohlcv_records(22)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
score, breakdown = await _compute_technical_score(db_session, "AAPL")
assert score is not None
assert breakdown is not None
names = [s["name"] for s in breakdown["sub_scores"]]
assert "EMA" in names
assert "RSI" in names
assert "ADX" not in names
# ADX should be in unavailable
unavail_names = [u["name"] for u in breakdown["unavailable"]]
assert "ADX" in unavail_names
assert any(u["reason"] for u in breakdown["unavailable"])
@pytest.mark.asyncio
async def test_all_sub_scores_unavailable(db_session):
"""With very few bars (not enough for any indicator), returns None score with breakdown."""
# 5 bars: not enough for any indicator
records = _make_ohlcv_records(5)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
score, breakdown = await _compute_technical_score(db_session, "AAPL")
assert score is None
assert breakdown is not None
assert breakdown["sub_scores"] == []
assert len(breakdown["unavailable"]) == 3
unavail_names = [u["name"] for u in breakdown["unavailable"]]
assert "ADX" in unavail_names
assert "EMA" in unavail_names
assert "RSI" in unavail_names
@pytest.mark.asyncio
async def test_formula_string_present(db_session):
"""Breakdown always includes the formula description."""
records = _make_ohlcv_records(50)
with patch(
"app.services.price_service.query_ohlcv",
new_callable=AsyncMock,
return_value=records,
):
_, breakdown = await _compute_technical_score(db_session, "AAPL")
assert "formula" in breakdown
assert "ADX" in breakdown["formula"]
assert "EMA" in breakdown["formula"]
assert "RSI" in breakdown["formula"]

View File

@@ -0,0 +1,243 @@
"""Unit tests for the S/R levels router — zone integration."""
from datetime import datetime
from unittest.mock import AsyncMock, patch
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from app.middleware import register_exception_handlers
from app.routers.sr_levels import router
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FakeLevel:
"""Mimics an SRLevel ORM model."""
def __init__(self, id, price_level, type, strength, detection_method):
self.id = id
self.price_level = price_level
self.type = type
self.strength = strength
self.detection_method = detection_method
self.created_at = datetime(2024, 1, 1)
class _FakeOHLCV:
"""Mimics an OHLCVRecord with a close attribute."""
def __init__(self, close: float):
self.close = close
def _make_app() -> FastAPI:
app = FastAPI()
register_exception_handlers(app)
app.include_router(router, prefix="/api/v1")
# Override auth dependency to no-op
from app.dependencies import require_access, get_db
app.dependency_overrides[require_access] = lambda: None
app.dependency_overrides[get_db] = lambda: AsyncMock()
return app
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
SAMPLE_LEVELS = [
_FakeLevel(1, 95.0, "support", 60, "volume_profile"),
_FakeLevel(2, 96.0, "support", 40, "pivot_point"),
_FakeLevel(3, 110.0, "resistance", 80, "merged"),
]
SAMPLE_OHLCV = [_FakeOHLCV(100.0)]
class TestSRLevelsRouterZones:
"""Tests for max_zones parameter and zone inclusion in response."""
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_default_max_zones_returns_zones(self, mock_get_sr, mock_ohlcv):
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
assert resp.status_code == 200
body = resp.json()
assert body["status"] == "success"
data = body["data"]
assert "zones" in data
assert isinstance(data["zones"], list)
# With default max_zones=6, we should get zones
assert len(data["zones"]) > 0
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_max_zones_zero_returns_empty_zones(self, mock_get_sr, mock_ohlcv):
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL?max_zones=0")
assert resp.status_code == 200
data = resp.json()["data"]
assert data["zones"] == []
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_max_zones_limits_zone_count(self, mock_get_sr, mock_ohlcv):
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL?max_zones=1")
assert resp.status_code == 200
data = resp.json()["data"]
assert len(data["zones"]) <= 1
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_no_ohlcv_data_returns_empty_zones(self, mock_get_sr, mock_ohlcv):
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = [] # No OHLCV data
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
assert resp.status_code == 200
data = resp.json()["data"]
assert data["zones"] == []
# Levels should still be present
assert len(data["levels"]) == 3
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_no_levels_returns_empty_zones(self, mock_get_sr, mock_ohlcv):
mock_get_sr.return_value = []
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
assert resp.status_code == 200
data = resp.json()["data"]
assert data["zones"] == []
assert data["levels"] == []
assert data["count"] == 0
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_zone_fields_present(self, mock_get_sr, mock_ohlcv):
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
data = resp.json()["data"]
for zone in data["zones"]:
assert "low" in zone
assert "high" in zone
assert "midpoint" in zone
assert "strength" in zone
assert "type" in zone
assert "level_count" in zone
assert zone["type"] in ("support", "resistance")
class TestSRLevelsRouterVisibleLevels:
"""Tests for visible_levels filtering in the SR levels response."""
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_visible_levels_present_in_response(self, mock_get_sr, mock_ohlcv):
"""visible_levels field is always present in the API response."""
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
assert resp.status_code == 200
data = resp.json()["data"]
assert "visible_levels" in data
assert isinstance(data["visible_levels"], list)
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_visible_levels_within_zone_bounds(self, mock_get_sr, mock_ohlcv):
"""Every visible level has a price within at least one zone's [low, high] range."""
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
data = resp.json()["data"]
zones = data["zones"]
visible = data["visible_levels"]
# When zones exist, each visible level must fall within a zone
for lvl in visible:
price = lvl["price_level"]
assert any(
z["low"] <= price <= z["high"] for z in zones
), f"visible level price {price} not within any zone bounds"
# visible_levels must be a subset of levels (by id)
level_ids = {l["id"] for l in data["levels"]}
for lvl in visible:
assert lvl["id"] in level_ids
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_visible_levels_empty_when_no_ohlcv(self, mock_get_sr, mock_ohlcv):
"""visible_levels is empty when no OHLCV data exists (zones are empty)."""
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = []
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL")
data = resp.json()["data"]
assert data["zones"] == []
assert data["visible_levels"] == []
@patch("app.routers.sr_levels.query_ohlcv", new_callable=AsyncMock)
@patch("app.routers.sr_levels.get_sr_levels", new_callable=AsyncMock)
def test_visible_levels_empty_when_max_zones_zero(self, mock_get_sr, mock_ohlcv):
"""visible_levels is empty when max_zones=0 (zones are empty)."""
mock_get_sr.return_value = SAMPLE_LEVELS
mock_ohlcv.return_value = SAMPLE_OHLCV
app = _make_app()
client = TestClient(app)
resp = client.get("/api/v1/sr-levels/AAPL?max_zones=0")
data = resp.json()["data"]
assert data["zones"] == []
assert data["visible_levels"] == []