896 lines
29 KiB
Python
896 lines
29 KiB
Python
"""Scoring Engine service.
|
|
|
|
Computes dimension scores (technical, sr_quality, sentiment, fundamental,
|
|
momentum) each 0-100, composite score as weighted average of available
|
|
dimensions with re-normalized weights, staleness marking/recomputation
|
|
on demand, and weight update triggers full recomputation.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
from datetime import datetime, timezone
|
|
|
|
from sqlalchemy import select
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
|
|
from app.exceptions import NotFoundError, ValidationError
|
|
from app.models.score import CompositeScore, DimensionScore
|
|
from app.models.settings import SystemSetting
|
|
from app.models.ticker import Ticker
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
DIMENSIONS = ["technical", "sr_quality", "sentiment", "fundamental", "momentum"]
|
|
|
|
DEFAULT_WEIGHTS: dict[str, float] = {
|
|
"technical": 0.25,
|
|
"sr_quality": 0.20,
|
|
"sentiment": 0.15,
|
|
"fundamental": 0.20,
|
|
"momentum": 0.20,
|
|
}
|
|
|
|
SCORING_WEIGHTS_KEY = "scoring_weights"
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def _get_ticker(db: AsyncSession, symbol: str) -> Ticker:
|
|
normalised = symbol.strip().upper()
|
|
result = await db.execute(select(Ticker).where(Ticker.symbol == normalised))
|
|
ticker = result.scalar_one_or_none()
|
|
if ticker is None:
|
|
raise NotFoundError(f"Ticker not found: {normalised}")
|
|
return ticker
|
|
|
|
|
|
async def _get_weights(db: AsyncSession) -> dict[str, float]:
|
|
"""Load scoring weights from SystemSetting, falling back to defaults."""
|
|
result = await db.execute(
|
|
select(SystemSetting).where(SystemSetting.key == SCORING_WEIGHTS_KEY)
|
|
)
|
|
setting = result.scalar_one_or_none()
|
|
if setting is not None:
|
|
try:
|
|
return json.loads(setting.value)
|
|
except (json.JSONDecodeError, TypeError):
|
|
logger.warning("Invalid scoring weights in DB, using defaults")
|
|
return dict(DEFAULT_WEIGHTS)
|
|
|
|
|
|
async def _save_weights(db: AsyncSession, weights: dict[str, float]) -> None:
|
|
"""Persist scoring weights to SystemSetting."""
|
|
result = await db.execute(
|
|
select(SystemSetting).where(SystemSetting.key == SCORING_WEIGHTS_KEY)
|
|
)
|
|
setting = result.scalar_one_or_none()
|
|
now = datetime.now(timezone.utc)
|
|
if setting is not None:
|
|
setting.value = json.dumps(weights)
|
|
setting.updated_at = now
|
|
else:
|
|
setting = SystemSetting(
|
|
key=SCORING_WEIGHTS_KEY,
|
|
value=json.dumps(weights),
|
|
updated_at=now,
|
|
)
|
|
db.add(setting)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Dimension score computation
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def _compute_technical_score(
|
|
db: AsyncSession, symbol: str
|
|
) -> tuple[float | None, dict | None]:
|
|
"""Compute technical dimension score from ADX, EMA, RSI.
|
|
|
|
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
|
|
TypedDict shape: {sub_scores, formula, unavailable}.
|
|
"""
|
|
from app.services.indicator_service import (
|
|
compute_adx,
|
|
compute_ema,
|
|
compute_rsi,
|
|
_extract_ohlcv,
|
|
)
|
|
from app.services.price_service import query_ohlcv
|
|
|
|
records = await query_ohlcv(db, symbol)
|
|
if not records:
|
|
return None, None
|
|
|
|
_, highs, lows, closes, _ = _extract_ohlcv(records)
|
|
|
|
scores: list[tuple[float, float]] = [] # (weight, score)
|
|
sub_scores: list[dict] = []
|
|
unavailable: list[dict[str, str]] = []
|
|
|
|
# ADX (weight 0.4) — needs 28+ bars
|
|
try:
|
|
adx_result = compute_adx(highs, lows, closes)
|
|
scores.append((0.4, adx_result["score"]))
|
|
sub_scores.append({
|
|
"name": "ADX",
|
|
"score": adx_result["score"],
|
|
"weight": 0.4,
|
|
"raw_value": adx_result["adx"],
|
|
"description": "ADX value (0-100). Higher = stronger trend.",
|
|
})
|
|
except Exception as exc:
|
|
unavailable.append({"name": "ADX", "reason": str(exc) or "Insufficient data for ADX"})
|
|
|
|
# EMA (weight 0.3) — needs period+1 bars
|
|
try:
|
|
ema_result = compute_ema(closes)
|
|
pct_diff = (
|
|
round(
|
|
(ema_result["latest_close"] - ema_result["ema"])
|
|
/ ema_result["ema"]
|
|
* 100.0,
|
|
4,
|
|
)
|
|
if ema_result["ema"] != 0
|
|
else 0.0
|
|
)
|
|
scores.append((0.3, ema_result["score"]))
|
|
sub_scores.append({
|
|
"name": "EMA",
|
|
"score": ema_result["score"],
|
|
"weight": 0.3,
|
|
"raw_value": pct_diff,
|
|
"description": f"Price {pct_diff}% {'above' if pct_diff >= 0 else 'below'} EMA(20). Score: 50 + pct_diff * 10.",
|
|
})
|
|
except Exception as exc:
|
|
unavailable.append({"name": "EMA", "reason": str(exc) or "Insufficient data for EMA"})
|
|
|
|
# RSI (weight 0.3) — needs 15+ bars
|
|
try:
|
|
rsi_result = compute_rsi(closes)
|
|
scores.append((0.3, rsi_result["score"]))
|
|
sub_scores.append({
|
|
"name": "RSI",
|
|
"score": rsi_result["score"],
|
|
"weight": 0.3,
|
|
"raw_value": rsi_result["rsi"],
|
|
"description": "RSI(14) value. Score equals RSI.",
|
|
})
|
|
except Exception as exc:
|
|
unavailable.append({"name": "RSI", "reason": str(exc) or "Insufficient data for RSI"})
|
|
|
|
if not scores:
|
|
breakdown: dict = {
|
|
"sub_scores": [],
|
|
"formula": "Weighted average: 0.4*ADX + 0.3*EMA + 0.3*RSI, re-normalized if any sub-score unavailable.",
|
|
"unavailable": unavailable,
|
|
}
|
|
return None, breakdown
|
|
|
|
total_weight = sum(w for w, _ in scores)
|
|
if total_weight == 0:
|
|
return None, None
|
|
weighted = sum(w * s for w, s in scores) / total_weight
|
|
final_score = max(0.0, min(100.0, weighted))
|
|
|
|
breakdown = {
|
|
"sub_scores": sub_scores,
|
|
"formula": "Weighted average: 0.4*ADX + 0.3*EMA + 0.3*RSI, re-normalized if any sub-score unavailable.",
|
|
"unavailable": unavailable,
|
|
}
|
|
|
|
return final_score, breakdown
|
|
|
|
|
|
async def _compute_sr_quality_score(
|
|
db: AsyncSession, symbol: str
|
|
) -> tuple[float | None, dict | None]:
|
|
"""Compute S/R quality dimension score.
|
|
|
|
Based on number of strong levels, proximity to current price, avg strength.
|
|
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
|
|
TypedDict shape: {sub_scores, formula, unavailable}.
|
|
"""
|
|
from app.services.price_service import query_ohlcv
|
|
from app.services.sr_service import get_sr_levels
|
|
|
|
formula = "Sum of sub-scores: Strong Count (max 40) + Proximity (max 30) + Avg Strength (max 30), clamped to [0, 100]."
|
|
|
|
records = await query_ohlcv(db, symbol)
|
|
if not records:
|
|
return None, None
|
|
|
|
current_price = float(records[-1].close)
|
|
if current_price <= 0:
|
|
return None, None
|
|
|
|
try:
|
|
levels = await get_sr_levels(db, symbol)
|
|
except Exception:
|
|
return None, None
|
|
|
|
if not levels:
|
|
return None, None
|
|
|
|
sub_scores: list[dict] = []
|
|
|
|
# Factor 1: Number of strong levels (strength >= 50) — max 40 pts
|
|
strong_count = sum(1 for lv in levels if lv.strength >= 50)
|
|
count_score = min(40.0, strong_count * 10.0)
|
|
sub_scores.append({
|
|
"name": "Strong Count",
|
|
"score": count_score,
|
|
"weight": 40.0,
|
|
"raw_value": strong_count,
|
|
"description": f"{strong_count} strong level(s) (strength >= 50). Score: min(40, count * 10).",
|
|
})
|
|
|
|
# Factor 2: Proximity of nearest level to current price — max 30 pts
|
|
distances = [
|
|
abs(lv.price_level - current_price) / current_price for lv in levels
|
|
]
|
|
nearest_dist = min(distances) if distances else 1.0
|
|
nearest_dist_pct = round(nearest_dist * 100.0, 4)
|
|
# Closer = higher score. 0% distance = 30, 5%+ = 0
|
|
proximity_score = max(0.0, min(30.0, 30.0 * (1.0 - nearest_dist / 0.05)))
|
|
sub_scores.append({
|
|
"name": "Proximity",
|
|
"score": proximity_score,
|
|
"weight": 30.0,
|
|
"raw_value": nearest_dist_pct,
|
|
"description": f"Nearest S/R level is {nearest_dist_pct}% from price. Score: 30 * (1 - dist/5%), clamped to [0, 30].",
|
|
})
|
|
|
|
# Factor 3: Average strength — max 30 pts
|
|
avg_strength = sum(lv.strength for lv in levels) / len(levels)
|
|
strength_score = min(30.0, avg_strength * 0.3)
|
|
sub_scores.append({
|
|
"name": "Avg Strength",
|
|
"score": strength_score,
|
|
"weight": 30.0,
|
|
"raw_value": round(avg_strength, 4),
|
|
"description": f"Average level strength: {round(avg_strength, 2)}. Score: min(30, avg * 0.3).",
|
|
})
|
|
|
|
total = count_score + proximity_score + strength_score
|
|
final_score = max(0.0, min(100.0, total))
|
|
|
|
breakdown: dict = {
|
|
"sub_scores": sub_scores,
|
|
"formula": formula,
|
|
"unavailable": [],
|
|
}
|
|
|
|
return final_score, breakdown
|
|
|
|
|
|
async def _compute_sentiment_score(
|
|
db: AsyncSession, symbol: str
|
|
) -> tuple[float | None, dict | None]:
|
|
"""Compute sentiment dimension score via sentiment service.
|
|
|
|
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
|
|
TypedDict shape: {sub_scores, formula, unavailable}.
|
|
"""
|
|
from app.services.sentiment_service import (
|
|
compute_sentiment_dimension_score,
|
|
get_sentiment_scores,
|
|
)
|
|
|
|
lookback_hours: float = 24
|
|
decay_rate: float = 0.1
|
|
|
|
try:
|
|
scores = await get_sentiment_scores(db, symbol, lookback_hours)
|
|
except Exception:
|
|
return None, None
|
|
|
|
if not scores:
|
|
breakdown: dict = {
|
|
"sub_scores": [],
|
|
"formula": (
|
|
f"Time-decay weighted average over {lookback_hours}h window "
|
|
f"with decay_rate={decay_rate}: "
|
|
"sum(base_score * exp(-decay_rate * hours_since)) / sum(exp(-decay_rate * hours_since))"
|
|
),
|
|
"unavailable": [
|
|
{"name": "sentiment_records", "reason": "No sentiment records in lookback window"}
|
|
],
|
|
}
|
|
return None, breakdown
|
|
|
|
try:
|
|
score = await compute_sentiment_dimension_score(db, symbol, lookback_hours, decay_rate)
|
|
except Exception:
|
|
return None, None
|
|
|
|
sub_scores: list[dict] = [
|
|
{
|
|
"name": "record_count",
|
|
"score": score if score is not None else 0.0,
|
|
"weight": 1.0,
|
|
"raw_value": len(scores),
|
|
"description": f"Number of sentiment records used in the lookback window ({lookback_hours}h).",
|
|
},
|
|
{
|
|
"name": "decay_rate",
|
|
"score": score if score is not None else 0.0,
|
|
"weight": 1.0,
|
|
"raw_value": decay_rate,
|
|
"description": "Exponential decay rate applied to older records (higher = faster decay).",
|
|
},
|
|
{
|
|
"name": "lookback_window",
|
|
"score": score if score is not None else 0.0,
|
|
"weight": 1.0,
|
|
"raw_value": lookback_hours,
|
|
"description": f"Lookback window in hours for sentiment records ({lookback_hours}h).",
|
|
},
|
|
]
|
|
|
|
formula = (
|
|
f"Time-decay weighted average over {lookback_hours}h window "
|
|
f"with decay_rate={decay_rate}: "
|
|
"sum(base_score * exp(-decay_rate * hours_since)) / sum(exp(-decay_rate * hours_since))"
|
|
)
|
|
|
|
breakdown = {
|
|
"sub_scores": sub_scores,
|
|
"formula": formula,
|
|
"unavailable": [],
|
|
}
|
|
|
|
return score, breakdown
|
|
|
|
|
|
async def _compute_fundamental_score(
|
|
db: AsyncSession, symbol: str
|
|
) -> tuple[float | None, dict | None]:
|
|
"""Compute fundamental dimension score.
|
|
|
|
Normalized composite of P/E (lower is better), revenue growth
|
|
(higher is better), earnings surprise (higher is better).
|
|
|
|
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
|
|
TypedDict shape: {sub_scores, formula, unavailable}.
|
|
"""
|
|
from app.services.fundamental_service import get_fundamental
|
|
|
|
fund = await get_fundamental(db, symbol)
|
|
if fund is None:
|
|
return None, None
|
|
|
|
weight = 1.0 / 3.0
|
|
scores: list[float] = []
|
|
sub_scores: list[dict] = []
|
|
unavailable: list[dict[str, str]] = []
|
|
|
|
formula = (
|
|
"Equal-weighted average of available sub-scores: "
|
|
"(PE_Ratio + Revenue_Growth + Earnings_Surprise) / count. "
|
|
"PE: 100 - (pe - 15) * (100/30), clamped [0,100]. "
|
|
"Revenue Growth: 50 + growth% * 2.5, clamped [0,100]. "
|
|
"Earnings Surprise: 50 + surprise% * 5.0, clamped [0,100]."
|
|
)
|
|
|
|
# P/E: lower is better. 0-15 = 100, 15-30 = 50-100, 30+ = 0-50
|
|
if fund.pe_ratio is not None and fund.pe_ratio > 0:
|
|
pe_score = max(0.0, min(100.0, 100.0 - (fund.pe_ratio - 15.0) * (100.0 / 30.0)))
|
|
scores.append(pe_score)
|
|
sub_scores.append({
|
|
"name": "PE Ratio",
|
|
"score": pe_score,
|
|
"weight": weight,
|
|
"raw_value": fund.pe_ratio,
|
|
"description": "PE ratio (lower is better). Score: 100 - (pe - 15) * (100/30), clamped [0,100].",
|
|
})
|
|
else:
|
|
unavailable.append({
|
|
"name": "PE Ratio",
|
|
"reason": "PE ratio not available or not positive",
|
|
})
|
|
|
|
# Revenue growth: higher is better. 0% = 50, 20%+ = 100, -20% = 0
|
|
if fund.revenue_growth is not None:
|
|
rg_score = max(0.0, min(100.0, 50.0 + fund.revenue_growth * 2.5))
|
|
scores.append(rg_score)
|
|
sub_scores.append({
|
|
"name": "Revenue Growth",
|
|
"score": rg_score,
|
|
"weight": weight,
|
|
"raw_value": fund.revenue_growth,
|
|
"description": "Revenue growth %. Score: 50 + growth% * 2.5, clamped [0,100].",
|
|
})
|
|
else:
|
|
unavailable.append({
|
|
"name": "Revenue Growth",
|
|
"reason": "Revenue growth data not available",
|
|
})
|
|
|
|
# Earnings surprise: higher is better. 0% = 50, 10%+ = 100, -10% = 0
|
|
if fund.earnings_surprise is not None:
|
|
es_score = max(0.0, min(100.0, 50.0 + fund.earnings_surprise * 5.0))
|
|
scores.append(es_score)
|
|
sub_scores.append({
|
|
"name": "Earnings Surprise",
|
|
"score": es_score,
|
|
"weight": weight,
|
|
"raw_value": fund.earnings_surprise,
|
|
"description": "Earnings surprise %. Score: 50 + surprise% * 5.0, clamped [0,100].",
|
|
})
|
|
else:
|
|
unavailable.append({
|
|
"name": "Earnings Surprise",
|
|
"reason": "Earnings surprise data not available",
|
|
})
|
|
|
|
breakdown: dict = {
|
|
"sub_scores": sub_scores,
|
|
"formula": formula,
|
|
"unavailable": unavailable,
|
|
}
|
|
|
|
if not scores:
|
|
return None, breakdown
|
|
|
|
return sum(scores) / len(scores), breakdown
|
|
|
|
|
|
async def _compute_momentum_score(
|
|
db: AsyncSession, symbol: str
|
|
) -> tuple[float | None, dict | None]:
|
|
"""Compute momentum dimension score.
|
|
|
|
Rate of change of price over 5-day and 20-day lookback periods.
|
|
Returns (score, breakdown) where breakdown follows the ScoreBreakdown
|
|
TypedDict shape: {sub_scores, formula, unavailable}.
|
|
"""
|
|
from app.services.price_service import query_ohlcv
|
|
|
|
formula = "Weighted average: 0.5 * ROC_5 + 0.5 * ROC_20, re-normalized if any sub-score unavailable."
|
|
|
|
records = await query_ohlcv(db, symbol)
|
|
if not records or len(records) < 6:
|
|
return None, None
|
|
|
|
closes = [float(r.close) for r in records]
|
|
latest = closes[-1]
|
|
|
|
scores: list[tuple[float, float]] = [] # (weight, score)
|
|
sub_scores: list[dict] = []
|
|
unavailable: list[dict[str, str]] = []
|
|
|
|
# 5-day ROC (weight 0.5)
|
|
if len(closes) >= 6 and closes[-6] > 0:
|
|
roc_5 = (latest - closes[-6]) / closes[-6] * 100.0
|
|
# Map: -10% → 0, 0% → 50, +10% → 100
|
|
score_5 = max(0.0, min(100.0, 50.0 + roc_5 * 5.0))
|
|
scores.append((0.5, score_5))
|
|
sub_scores.append({
|
|
"name": "5-day ROC",
|
|
"score": score_5,
|
|
"weight": 0.5,
|
|
"raw_value": round(roc_5, 4),
|
|
"description": f"5-day rate of change: {round(roc_5, 2)}%. Score: 50 + ROC * 5, clamped to [0, 100].",
|
|
})
|
|
else:
|
|
unavailable.append({"name": "5-day ROC", "reason": "Need at least 6 closing prices"})
|
|
|
|
# 20-day ROC (weight 0.5)
|
|
if len(closes) >= 21 and closes[-21] > 0:
|
|
roc_20 = (latest - closes[-21]) / closes[-21] * 100.0
|
|
score_20 = max(0.0, min(100.0, 50.0 + roc_20 * 5.0))
|
|
scores.append((0.5, score_20))
|
|
sub_scores.append({
|
|
"name": "20-day ROC",
|
|
"score": score_20,
|
|
"weight": 0.5,
|
|
"raw_value": round(roc_20, 4),
|
|
"description": f"20-day rate of change: {round(roc_20, 2)}%. Score: 50 + ROC * 5, clamped to [0, 100].",
|
|
})
|
|
else:
|
|
unavailable.append({"name": "20-day ROC", "reason": "Need at least 21 closing prices"})
|
|
|
|
if not scores:
|
|
breakdown: dict = {
|
|
"sub_scores": [],
|
|
"formula": formula,
|
|
"unavailable": unavailable,
|
|
}
|
|
return None, breakdown
|
|
|
|
total_weight = sum(w for w, _ in scores)
|
|
if total_weight == 0:
|
|
return None, None
|
|
weighted = sum(w * s for w, s in scores) / total_weight
|
|
final_score = max(0.0, min(100.0, weighted))
|
|
|
|
breakdown = {
|
|
"sub_scores": sub_scores,
|
|
"formula": formula,
|
|
"unavailable": unavailable,
|
|
}
|
|
|
|
return final_score, breakdown
|
|
|
|
|
|
_DIMENSION_COMPUTERS = {
|
|
"technical": _compute_technical_score,
|
|
"sr_quality": _compute_sr_quality_score,
|
|
"sentiment": _compute_sentiment_score,
|
|
"fundamental": _compute_fundamental_score,
|
|
"momentum": _compute_momentum_score,
|
|
}
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Public API
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def compute_dimension_score(
|
|
db: AsyncSession, symbol: str, dimension: str
|
|
) -> float | None:
|
|
"""Compute a single dimension score for a ticker.
|
|
|
|
Returns the score (0-100) or None if insufficient data.
|
|
Persists the result to the DimensionScore table.
|
|
"""
|
|
if dimension not in _DIMENSION_COMPUTERS:
|
|
raise ValidationError(
|
|
f"Unknown dimension: {dimension}. Valid: {', '.join(DIMENSIONS)}"
|
|
)
|
|
|
|
ticker = await _get_ticker(db, symbol)
|
|
raw_result = await _DIMENSION_COMPUTERS[dimension](db, symbol)
|
|
|
|
# Handle both tuple (score, breakdown) and plain float | None returns
|
|
if isinstance(raw_result, tuple):
|
|
score_val = raw_result[0]
|
|
else:
|
|
score_val = raw_result
|
|
|
|
now = datetime.now(timezone.utc)
|
|
|
|
# Upsert dimension score
|
|
result = await db.execute(
|
|
select(DimensionScore).where(
|
|
DimensionScore.ticker_id == ticker.id,
|
|
DimensionScore.dimension == dimension,
|
|
)
|
|
)
|
|
existing = result.scalar_one_or_none()
|
|
|
|
if score_val is not None:
|
|
score_val = max(0.0, min(100.0, score_val))
|
|
|
|
if existing is not None:
|
|
if score_val is not None:
|
|
existing.score = score_val
|
|
existing.is_stale = False
|
|
existing.computed_at = now
|
|
else:
|
|
# Can't compute — mark stale
|
|
existing.is_stale = True
|
|
elif score_val is not None:
|
|
dim = DimensionScore(
|
|
ticker_id=ticker.id,
|
|
dimension=dimension,
|
|
score=score_val,
|
|
is_stale=False,
|
|
computed_at=now,
|
|
)
|
|
db.add(dim)
|
|
|
|
return score_val
|
|
|
|
|
|
async def compute_all_dimensions(
|
|
db: AsyncSession, symbol: str
|
|
) -> dict[str, float | None]:
|
|
"""Compute all dimension scores for a ticker. Returns dimension → score map."""
|
|
results: dict[str, float | None] = {}
|
|
for dim in DIMENSIONS:
|
|
results[dim] = await compute_dimension_score(db, symbol, dim)
|
|
return results
|
|
|
|
|
|
async def compute_composite_score(
|
|
db: AsyncSession,
|
|
symbol: str,
|
|
weights: dict[str, float] | None = None,
|
|
) -> tuple[float | None, list[str]]:
|
|
"""Compute composite score from available dimension scores.
|
|
|
|
Returns (composite_score, missing_dimensions).
|
|
Missing dimensions are excluded and weights re-normalized.
|
|
"""
|
|
ticker = await _get_ticker(db, symbol)
|
|
|
|
if weights is None:
|
|
weights = await _get_weights(db)
|
|
|
|
# Get current dimension scores
|
|
result = await db.execute(
|
|
select(DimensionScore).where(DimensionScore.ticker_id == ticker.id)
|
|
)
|
|
dim_scores = {ds.dimension: ds for ds in result.scalars().all()}
|
|
|
|
available: list[tuple[str, float, float]] = [] # (dim, weight, score)
|
|
missing: list[str] = []
|
|
|
|
for dim in DIMENSIONS:
|
|
w = weights.get(dim, 0.0)
|
|
if w <= 0:
|
|
continue
|
|
ds = dim_scores.get(dim)
|
|
if ds is not None and not ds.is_stale and ds.score is not None:
|
|
available.append((dim, w, ds.score))
|
|
else:
|
|
missing.append(dim)
|
|
|
|
if not available:
|
|
return None, missing
|
|
|
|
# Re-normalize weights
|
|
total_weight = sum(w for _, w, _ in available)
|
|
if total_weight == 0:
|
|
return None, missing
|
|
|
|
composite = sum(w * s for _, w, s in available) / total_weight
|
|
composite = max(0.0, min(100.0, composite))
|
|
|
|
# Persist composite score
|
|
now = datetime.now(timezone.utc)
|
|
comp_result = await db.execute(
|
|
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
|
)
|
|
existing = comp_result.scalar_one_or_none()
|
|
|
|
if existing is not None:
|
|
existing.score = composite
|
|
existing.is_stale = False
|
|
existing.weights_json = json.dumps(weights)
|
|
existing.computed_at = now
|
|
else:
|
|
comp = CompositeScore(
|
|
ticker_id=ticker.id,
|
|
score=composite,
|
|
is_stale=False,
|
|
weights_json=json.dumps(weights),
|
|
computed_at=now,
|
|
)
|
|
db.add(comp)
|
|
|
|
return composite, missing
|
|
|
|
|
|
|
|
async def get_score(
|
|
db: AsyncSession, symbol: str
|
|
) -> dict:
|
|
"""Get composite + all dimension scores for a ticker.
|
|
|
|
Recomputes stale dimensions on demand, then recomputes composite.
|
|
Returns a dict suitable for ScoreResponse, including dimension breakdowns
|
|
and composite breakdown with re-normalization info.
|
|
"""
|
|
ticker = await _get_ticker(db, symbol)
|
|
weights = await _get_weights(db)
|
|
|
|
# Check for stale dimension scores and recompute them
|
|
result = await db.execute(
|
|
select(DimensionScore).where(DimensionScore.ticker_id == ticker.id)
|
|
)
|
|
dim_scores = {ds.dimension: ds for ds in result.scalars().all()}
|
|
|
|
for dim in DIMENSIONS:
|
|
ds = dim_scores.get(dim)
|
|
if ds is None or ds.is_stale:
|
|
await compute_dimension_score(db, symbol, dim)
|
|
|
|
# Check composite staleness
|
|
comp_result = await db.execute(
|
|
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
|
)
|
|
comp = comp_result.scalar_one_or_none()
|
|
|
|
if comp is None or comp.is_stale:
|
|
await compute_composite_score(db, symbol, weights)
|
|
|
|
await db.commit()
|
|
|
|
# Re-fetch everything fresh
|
|
result = await db.execute(
|
|
select(DimensionScore).where(DimensionScore.ticker_id == ticker.id)
|
|
)
|
|
dim_scores_list = list(result.scalars().all())
|
|
|
|
comp_result = await db.execute(
|
|
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
|
)
|
|
comp = comp_result.scalar_one_or_none()
|
|
|
|
# Compute breakdowns for each dimension by calling the dimension computers
|
|
breakdowns: dict[str, dict | None] = {}
|
|
for dim in DIMENSIONS:
|
|
try:
|
|
raw_result = await _DIMENSION_COMPUTERS[dim](db, symbol)
|
|
if isinstance(raw_result, tuple) and len(raw_result) == 2:
|
|
breakdowns[dim] = raw_result[1]
|
|
else:
|
|
breakdowns[dim] = None
|
|
except Exception:
|
|
breakdowns[dim] = None
|
|
|
|
# Build dimension entries with breakdowns
|
|
dimensions = []
|
|
missing = []
|
|
available_dims: list[str] = []
|
|
for dim in DIMENSIONS:
|
|
found = next((ds for ds in dim_scores_list if ds.dimension == dim), None)
|
|
if found is not None and not found.is_stale and found.score is not None:
|
|
dimensions.append({
|
|
"dimension": found.dimension,
|
|
"score": found.score,
|
|
"is_stale": found.is_stale,
|
|
"computed_at": found.computed_at,
|
|
"breakdown": breakdowns.get(dim),
|
|
})
|
|
w = weights.get(dim, 0.0)
|
|
if w > 0:
|
|
available_dims.append(dim)
|
|
else:
|
|
missing.append(dim)
|
|
# Still include stale dimensions in the list if they exist in DB
|
|
if found is not None:
|
|
dimensions.append({
|
|
"dimension": found.dimension,
|
|
"score": found.score,
|
|
"is_stale": found.is_stale,
|
|
"computed_at": found.computed_at,
|
|
"breakdown": breakdowns.get(dim),
|
|
})
|
|
|
|
# Build composite breakdown with re-normalization info
|
|
composite_breakdown = None
|
|
available_weight_sum = sum(weights.get(d, 0.0) for d in available_dims)
|
|
if available_weight_sum > 0:
|
|
renormalized_weights = {
|
|
d: weights.get(d, 0.0) / available_weight_sum for d in available_dims
|
|
}
|
|
else:
|
|
renormalized_weights = {}
|
|
|
|
composite_breakdown = {
|
|
"weights": weights,
|
|
"available_dimensions": available_dims,
|
|
"missing_dimensions": missing,
|
|
"renormalized_weights": renormalized_weights,
|
|
"formula": "Weighted average of available dimensions with re-normalized weights: sum(weight_i * score_i) / sum(weight_i)",
|
|
}
|
|
|
|
return {
|
|
"symbol": ticker.symbol,
|
|
"composite_score": comp.score if comp else None,
|
|
"composite_stale": comp.is_stale if comp else False,
|
|
"weights": weights,
|
|
"dimensions": dimensions,
|
|
"missing_dimensions": missing,
|
|
"computed_at": comp.computed_at if comp else None,
|
|
"composite_breakdown": composite_breakdown,
|
|
}
|
|
|
|
|
|
|
|
async def get_rankings(db: AsyncSession) -> dict:
|
|
"""Get all tickers ranked by composite score descending.
|
|
|
|
Returns dict suitable for RankingResponse.
|
|
"""
|
|
weights = await _get_weights(db)
|
|
|
|
# Get all tickers
|
|
result = await db.execute(select(Ticker).order_by(Ticker.symbol))
|
|
tickers = list(result.scalars().all())
|
|
|
|
rankings: list[dict] = []
|
|
for ticker in tickers:
|
|
# Get composite score
|
|
comp_result = await db.execute(
|
|
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
|
)
|
|
comp = comp_result.scalar_one_or_none()
|
|
|
|
# If no composite or stale, recompute
|
|
if comp is None or comp.is_stale:
|
|
# Recompute stale dimensions first
|
|
dim_result = await db.execute(
|
|
select(DimensionScore).where(
|
|
DimensionScore.ticker_id == ticker.id
|
|
)
|
|
)
|
|
dim_scores = {ds.dimension: ds for ds in dim_result.scalars().all()}
|
|
for dim in DIMENSIONS:
|
|
ds = dim_scores.get(dim)
|
|
if ds is None or ds.is_stale:
|
|
await compute_dimension_score(db, ticker.symbol, dim)
|
|
|
|
await compute_composite_score(db, ticker.symbol, weights)
|
|
|
|
await db.commit()
|
|
|
|
# Re-fetch
|
|
comp_result = await db.execute(
|
|
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
|
)
|
|
comp = comp_result.scalar_one_or_none()
|
|
if comp is None:
|
|
continue
|
|
|
|
dim_result = await db.execute(
|
|
select(DimensionScore).where(
|
|
DimensionScore.ticker_id == ticker.id
|
|
)
|
|
)
|
|
dims = [
|
|
{
|
|
"dimension": ds.dimension,
|
|
"score": ds.score,
|
|
"is_stale": ds.is_stale,
|
|
"computed_at": ds.computed_at,
|
|
}
|
|
for ds in dim_result.scalars().all()
|
|
]
|
|
|
|
rankings.append({
|
|
"symbol": ticker.symbol,
|
|
"composite_score": comp.score,
|
|
"dimensions": dims,
|
|
})
|
|
|
|
# Sort by composite score descending
|
|
rankings.sort(key=lambda r: r["composite_score"], reverse=True)
|
|
|
|
return {
|
|
"rankings": rankings,
|
|
"weights": weights,
|
|
}
|
|
|
|
|
|
async def update_weights(
|
|
db: AsyncSession, weights: dict[str, float]
|
|
) -> dict[str, float]:
|
|
"""Update scoring weights and recompute all composite scores.
|
|
|
|
Validates that all weights are positive and dimensions are valid.
|
|
Returns the new weights.
|
|
"""
|
|
# Validate
|
|
for dim, w in weights.items():
|
|
if dim not in DIMENSIONS:
|
|
raise ValidationError(
|
|
f"Unknown dimension: {dim}. Valid: {', '.join(DIMENSIONS)}"
|
|
)
|
|
if w < 0:
|
|
raise ValidationError(f"Weight for {dim} must be non-negative, got {w}")
|
|
|
|
# Ensure all dimensions have a weight (default 0 for unspecified)
|
|
full_weights = {dim: weights.get(dim, 0.0) for dim in DIMENSIONS}
|
|
|
|
# Persist
|
|
await _save_weights(db, full_weights)
|
|
|
|
# Recompute all composite scores
|
|
result = await db.execute(select(Ticker))
|
|
tickers = list(result.scalars().all())
|
|
|
|
for ticker in tickers:
|
|
await compute_composite_score(db, ticker.symbol, full_weights)
|
|
|
|
await db.commit()
|
|
return full_weights
|