first commit
This commit is contained in:
584
app/services/scoring_service.py
Normal file
584
app/services/scoring_service.py
Normal file
@@ -0,0 +1,584 @@
|
||||
"""Scoring Engine service.
|
||||
|
||||
Computes dimension scores (technical, sr_quality, sentiment, fundamental,
|
||||
momentum) each 0-100, composite score as weighted average of available
|
||||
dimensions with re-normalized weights, staleness marking/recomputation
|
||||
on demand, and weight update triggers full recomputation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.exceptions import NotFoundError, ValidationError
|
||||
from app.models.score import CompositeScore, DimensionScore
|
||||
from app.models.settings import SystemSetting
|
||||
from app.models.ticker import Ticker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DIMENSIONS = ["technical", "sr_quality", "sentiment", "fundamental", "momentum"]
|
||||
|
||||
DEFAULT_WEIGHTS: dict[str, float] = {
|
||||
"technical": 0.25,
|
||||
"sr_quality": 0.20,
|
||||
"sentiment": 0.15,
|
||||
"fundamental": 0.20,
|
||||
"momentum": 0.20,
|
||||
}
|
||||
|
||||
SCORING_WEIGHTS_KEY = "scoring_weights"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def _get_ticker(db: AsyncSession, symbol: str) -> Ticker:
|
||||
normalised = symbol.strip().upper()
|
||||
result = await db.execute(select(Ticker).where(Ticker.symbol == normalised))
|
||||
ticker = result.scalar_one_or_none()
|
||||
if ticker is None:
|
||||
raise NotFoundError(f"Ticker not found: {normalised}")
|
||||
return ticker
|
||||
|
||||
|
||||
async def _get_weights(db: AsyncSession) -> dict[str, float]:
|
||||
"""Load scoring weights from SystemSetting, falling back to defaults."""
|
||||
result = await db.execute(
|
||||
select(SystemSetting).where(SystemSetting.key == SCORING_WEIGHTS_KEY)
|
||||
)
|
||||
setting = result.scalar_one_or_none()
|
||||
if setting is not None:
|
||||
try:
|
||||
return json.loads(setting.value)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
logger.warning("Invalid scoring weights in DB, using defaults")
|
||||
return dict(DEFAULT_WEIGHTS)
|
||||
|
||||
|
||||
async def _save_weights(db: AsyncSession, weights: dict[str, float]) -> None:
|
||||
"""Persist scoring weights to SystemSetting."""
|
||||
result = await db.execute(
|
||||
select(SystemSetting).where(SystemSetting.key == SCORING_WEIGHTS_KEY)
|
||||
)
|
||||
setting = result.scalar_one_or_none()
|
||||
now = datetime.now(timezone.utc)
|
||||
if setting is not None:
|
||||
setting.value = json.dumps(weights)
|
||||
setting.updated_at = now
|
||||
else:
|
||||
setting = SystemSetting(
|
||||
key=SCORING_WEIGHTS_KEY,
|
||||
value=json.dumps(weights),
|
||||
updated_at=now,
|
||||
)
|
||||
db.add(setting)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dimension score computation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def _compute_technical_score(db: AsyncSession, symbol: str) -> float | None:
|
||||
"""Compute technical dimension score from ADX, EMA, RSI."""
|
||||
from app.services.indicator_service import (
|
||||
compute_adx,
|
||||
compute_ema,
|
||||
compute_rsi,
|
||||
_extract_ohlcv,
|
||||
)
|
||||
from app.services.price_service import query_ohlcv
|
||||
|
||||
records = await query_ohlcv(db, symbol)
|
||||
if not records:
|
||||
return None
|
||||
|
||||
_, highs, lows, closes, _ = _extract_ohlcv(records)
|
||||
|
||||
scores: list[tuple[float, float]] = [] # (weight, score)
|
||||
|
||||
# ADX (weight 0.4) — needs 28+ bars
|
||||
try:
|
||||
adx_result = compute_adx(highs, lows, closes)
|
||||
scores.append((0.4, adx_result["score"]))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# EMA (weight 0.3) — needs period+1 bars
|
||||
try:
|
||||
ema_result = compute_ema(closes)
|
||||
scores.append((0.3, ema_result["score"]))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# RSI (weight 0.3) — needs 15+ bars
|
||||
try:
|
||||
rsi_result = compute_rsi(closes)
|
||||
scores.append((0.3, rsi_result["score"]))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not scores:
|
||||
return None
|
||||
|
||||
total_weight = sum(w for w, _ in scores)
|
||||
if total_weight == 0:
|
||||
return None
|
||||
weighted = sum(w * s for w, s in scores) / total_weight
|
||||
return max(0.0, min(100.0, weighted))
|
||||
|
||||
|
||||
async def _compute_sr_quality_score(db: AsyncSession, symbol: str) -> float | None:
|
||||
"""Compute S/R quality dimension score.
|
||||
|
||||
Based on number of strong levels, proximity to current price, avg strength.
|
||||
"""
|
||||
from app.services.price_service import query_ohlcv
|
||||
from app.services.sr_service import get_sr_levels
|
||||
|
||||
records = await query_ohlcv(db, symbol)
|
||||
if not records:
|
||||
return None
|
||||
|
||||
current_price = float(records[-1].close)
|
||||
if current_price <= 0:
|
||||
return None
|
||||
|
||||
try:
|
||||
levels = await get_sr_levels(db, symbol)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if not levels:
|
||||
return None
|
||||
|
||||
# Factor 1: Number of strong levels (strength >= 50) — max 40 pts
|
||||
strong_count = sum(1 for lv in levels if lv.strength >= 50)
|
||||
count_score = min(40.0, strong_count * 10.0)
|
||||
|
||||
# Factor 2: Proximity of nearest level to current price — max 30 pts
|
||||
distances = [
|
||||
abs(lv.price_level - current_price) / current_price for lv in levels
|
||||
]
|
||||
nearest_dist = min(distances) if distances else 1.0
|
||||
# Closer = higher score. 0% distance = 30, 5%+ = 0
|
||||
proximity_score = max(0.0, min(30.0, 30.0 * (1.0 - nearest_dist / 0.05)))
|
||||
|
||||
# Factor 3: Average strength — max 30 pts
|
||||
avg_strength = sum(lv.strength for lv in levels) / len(levels)
|
||||
strength_score = min(30.0, avg_strength * 0.3)
|
||||
|
||||
total = count_score + proximity_score + strength_score
|
||||
return max(0.0, min(100.0, total))
|
||||
|
||||
|
||||
async def _compute_sentiment_score(db: AsyncSession, symbol: str) -> float | None:
|
||||
"""Compute sentiment dimension score via sentiment service."""
|
||||
from app.services.sentiment_service import compute_sentiment_dimension_score
|
||||
|
||||
try:
|
||||
return await compute_sentiment_dimension_score(db, symbol)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
async def _compute_fundamental_score(db: AsyncSession, symbol: str) -> float | None:
|
||||
"""Compute fundamental dimension score.
|
||||
|
||||
Normalized composite of P/E (lower is better), revenue growth
|
||||
(higher is better), earnings surprise (higher is better).
|
||||
"""
|
||||
from app.services.fundamental_service import get_fundamental
|
||||
|
||||
fund = await get_fundamental(db, symbol)
|
||||
if fund is None:
|
||||
return None
|
||||
|
||||
scores: list[float] = []
|
||||
|
||||
# P/E: lower is better. 0-15 = 100, 15-30 = 50-100, 30+ = 0-50
|
||||
if fund.pe_ratio is not None and fund.pe_ratio > 0:
|
||||
pe_score = max(0.0, min(100.0, 100.0 - (fund.pe_ratio - 15.0) * (100.0 / 30.0)))
|
||||
scores.append(pe_score)
|
||||
|
||||
# Revenue growth: higher is better. 0% = 50, 20%+ = 100, -20% = 0
|
||||
if fund.revenue_growth is not None:
|
||||
rg_score = max(0.0, min(100.0, 50.0 + fund.revenue_growth * 2.5))
|
||||
scores.append(rg_score)
|
||||
|
||||
# Earnings surprise: higher is better. 0% = 50, 10%+ = 100, -10% = 0
|
||||
if fund.earnings_surprise is not None:
|
||||
es_score = max(0.0, min(100.0, 50.0 + fund.earnings_surprise * 5.0))
|
||||
scores.append(es_score)
|
||||
|
||||
if not scores:
|
||||
return None
|
||||
|
||||
return sum(scores) / len(scores)
|
||||
|
||||
|
||||
async def _compute_momentum_score(db: AsyncSession, symbol: str) -> float | None:
|
||||
"""Compute momentum dimension score.
|
||||
|
||||
Rate of change of price over 5-day and 20-day lookback periods.
|
||||
"""
|
||||
from app.services.price_service import query_ohlcv
|
||||
|
||||
records = await query_ohlcv(db, symbol)
|
||||
if not records or len(records) < 6:
|
||||
return None
|
||||
|
||||
closes = [float(r.close) for r in records]
|
||||
latest = closes[-1]
|
||||
|
||||
scores: list[tuple[float, float]] = [] # (weight, score)
|
||||
|
||||
# 5-day ROC (weight 0.5)
|
||||
if len(closes) >= 6 and closes[-6] > 0:
|
||||
roc_5 = (latest - closes[-6]) / closes[-6] * 100.0
|
||||
# Map: -10% → 0, 0% → 50, +10% → 100
|
||||
score_5 = max(0.0, min(100.0, 50.0 + roc_5 * 5.0))
|
||||
scores.append((0.5, score_5))
|
||||
|
||||
# 20-day ROC (weight 0.5)
|
||||
if len(closes) >= 21 and closes[-21] > 0:
|
||||
roc_20 = (latest - closes[-21]) / closes[-21] * 100.0
|
||||
score_20 = max(0.0, min(100.0, 50.0 + roc_20 * 5.0))
|
||||
scores.append((0.5, score_20))
|
||||
|
||||
if not scores:
|
||||
return None
|
||||
|
||||
total_weight = sum(w for w, _ in scores)
|
||||
if total_weight == 0:
|
||||
return None
|
||||
weighted = sum(w * s for w, s in scores) / total_weight
|
||||
return max(0.0, min(100.0, weighted))
|
||||
|
||||
|
||||
_DIMENSION_COMPUTERS = {
|
||||
"technical": _compute_technical_score,
|
||||
"sr_quality": _compute_sr_quality_score,
|
||||
"sentiment": _compute_sentiment_score,
|
||||
"fundamental": _compute_fundamental_score,
|
||||
"momentum": _compute_momentum_score,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def compute_dimension_score(
|
||||
db: AsyncSession, symbol: str, dimension: str
|
||||
) -> float | None:
|
||||
"""Compute a single dimension score for a ticker.
|
||||
|
||||
Returns the score (0-100) or None if insufficient data.
|
||||
Persists the result to the DimensionScore table.
|
||||
"""
|
||||
if dimension not in _DIMENSION_COMPUTERS:
|
||||
raise ValidationError(
|
||||
f"Unknown dimension: {dimension}. Valid: {', '.join(DIMENSIONS)}"
|
||||
)
|
||||
|
||||
ticker = await _get_ticker(db, symbol)
|
||||
score_val = await _DIMENSION_COMPUTERS[dimension](db, symbol)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Upsert dimension score
|
||||
result = await db.execute(
|
||||
select(DimensionScore).where(
|
||||
DimensionScore.ticker_id == ticker.id,
|
||||
DimensionScore.dimension == dimension,
|
||||
)
|
||||
)
|
||||
existing = result.scalar_one_or_none()
|
||||
|
||||
if score_val is not None:
|
||||
score_val = max(0.0, min(100.0, score_val))
|
||||
|
||||
if existing is not None:
|
||||
if score_val is not None:
|
||||
existing.score = score_val
|
||||
existing.is_stale = False
|
||||
existing.computed_at = now
|
||||
else:
|
||||
# Can't compute — mark stale
|
||||
existing.is_stale = True
|
||||
elif score_val is not None:
|
||||
dim = DimensionScore(
|
||||
ticker_id=ticker.id,
|
||||
dimension=dimension,
|
||||
score=score_val,
|
||||
is_stale=False,
|
||||
computed_at=now,
|
||||
)
|
||||
db.add(dim)
|
||||
|
||||
return score_val
|
||||
|
||||
|
||||
async def compute_all_dimensions(
|
||||
db: AsyncSession, symbol: str
|
||||
) -> dict[str, float | None]:
|
||||
"""Compute all dimension scores for a ticker. Returns dimension → score map."""
|
||||
results: dict[str, float | None] = {}
|
||||
for dim in DIMENSIONS:
|
||||
results[dim] = await compute_dimension_score(db, symbol, dim)
|
||||
return results
|
||||
|
||||
|
||||
async def compute_composite_score(
|
||||
db: AsyncSession,
|
||||
symbol: str,
|
||||
weights: dict[str, float] | None = None,
|
||||
) -> tuple[float | None, list[str]]:
|
||||
"""Compute composite score from available dimension scores.
|
||||
|
||||
Returns (composite_score, missing_dimensions).
|
||||
Missing dimensions are excluded and weights re-normalized.
|
||||
"""
|
||||
ticker = await _get_ticker(db, symbol)
|
||||
|
||||
if weights is None:
|
||||
weights = await _get_weights(db)
|
||||
|
||||
# Get current dimension scores
|
||||
result = await db.execute(
|
||||
select(DimensionScore).where(DimensionScore.ticker_id == ticker.id)
|
||||
)
|
||||
dim_scores = {ds.dimension: ds for ds in result.scalars().all()}
|
||||
|
||||
available: list[tuple[str, float, float]] = [] # (dim, weight, score)
|
||||
missing: list[str] = []
|
||||
|
||||
for dim in DIMENSIONS:
|
||||
w = weights.get(dim, 0.0)
|
||||
if w <= 0:
|
||||
continue
|
||||
ds = dim_scores.get(dim)
|
||||
if ds is not None and not ds.is_stale and ds.score is not None:
|
||||
available.append((dim, w, ds.score))
|
||||
else:
|
||||
missing.append(dim)
|
||||
|
||||
if not available:
|
||||
return None, missing
|
||||
|
||||
# Re-normalize weights
|
||||
total_weight = sum(w for _, w, _ in available)
|
||||
if total_weight == 0:
|
||||
return None, missing
|
||||
|
||||
composite = sum(w * s for _, w, s in available) / total_weight
|
||||
composite = max(0.0, min(100.0, composite))
|
||||
|
||||
# Persist composite score
|
||||
now = datetime.now(timezone.utc)
|
||||
comp_result = await db.execute(
|
||||
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
||||
)
|
||||
existing = comp_result.scalar_one_or_none()
|
||||
|
||||
if existing is not None:
|
||||
existing.score = composite
|
||||
existing.is_stale = False
|
||||
existing.weights_json = json.dumps(weights)
|
||||
existing.computed_at = now
|
||||
else:
|
||||
comp = CompositeScore(
|
||||
ticker_id=ticker.id,
|
||||
score=composite,
|
||||
is_stale=False,
|
||||
weights_json=json.dumps(weights),
|
||||
computed_at=now,
|
||||
)
|
||||
db.add(comp)
|
||||
|
||||
return composite, missing
|
||||
|
||||
|
||||
async def get_score(
|
||||
db: AsyncSession, symbol: str
|
||||
) -> dict:
|
||||
"""Get composite + all dimension scores for a ticker.
|
||||
|
||||
Recomputes stale dimensions on demand, then recomputes composite.
|
||||
Returns a dict suitable for ScoreResponse.
|
||||
"""
|
||||
ticker = await _get_ticker(db, symbol)
|
||||
weights = await _get_weights(db)
|
||||
|
||||
# Check for stale dimension scores and recompute them
|
||||
result = await db.execute(
|
||||
select(DimensionScore).where(DimensionScore.ticker_id == ticker.id)
|
||||
)
|
||||
dim_scores = {ds.dimension: ds for ds in result.scalars().all()}
|
||||
|
||||
for dim in DIMENSIONS:
|
||||
ds = dim_scores.get(dim)
|
||||
if ds is None or ds.is_stale:
|
||||
await compute_dimension_score(db, symbol, dim)
|
||||
|
||||
# Check composite staleness
|
||||
comp_result = await db.execute(
|
||||
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
||||
)
|
||||
comp = comp_result.scalar_one_or_none()
|
||||
|
||||
if comp is None or comp.is_stale:
|
||||
await compute_composite_score(db, symbol, weights)
|
||||
|
||||
await db.commit()
|
||||
|
||||
# Re-fetch everything fresh
|
||||
result = await db.execute(
|
||||
select(DimensionScore).where(DimensionScore.ticker_id == ticker.id)
|
||||
)
|
||||
dim_scores_list = list(result.scalars().all())
|
||||
|
||||
comp_result = await db.execute(
|
||||
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
||||
)
|
||||
comp = comp_result.scalar_one_or_none()
|
||||
|
||||
dimensions = []
|
||||
missing = []
|
||||
for dim in DIMENSIONS:
|
||||
found = next((ds for ds in dim_scores_list if ds.dimension == dim), None)
|
||||
if found is not None:
|
||||
dimensions.append({
|
||||
"dimension": found.dimension,
|
||||
"score": found.score,
|
||||
"is_stale": found.is_stale,
|
||||
"computed_at": found.computed_at,
|
||||
})
|
||||
else:
|
||||
missing.append(dim)
|
||||
|
||||
return {
|
||||
"symbol": ticker.symbol,
|
||||
"composite_score": comp.score if comp else None,
|
||||
"composite_stale": comp.is_stale if comp else False,
|
||||
"weights": weights,
|
||||
"dimensions": dimensions,
|
||||
"missing_dimensions": missing,
|
||||
"computed_at": comp.computed_at if comp else None,
|
||||
}
|
||||
|
||||
|
||||
async def get_rankings(db: AsyncSession) -> dict:
|
||||
"""Get all tickers ranked by composite score descending.
|
||||
|
||||
Returns dict suitable for RankingResponse.
|
||||
"""
|
||||
weights = await _get_weights(db)
|
||||
|
||||
# Get all tickers
|
||||
result = await db.execute(select(Ticker).order_by(Ticker.symbol))
|
||||
tickers = list(result.scalars().all())
|
||||
|
||||
rankings: list[dict] = []
|
||||
for ticker in tickers:
|
||||
# Get composite score
|
||||
comp_result = await db.execute(
|
||||
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
||||
)
|
||||
comp = comp_result.scalar_one_or_none()
|
||||
|
||||
# If no composite or stale, recompute
|
||||
if comp is None or comp.is_stale:
|
||||
# Recompute stale dimensions first
|
||||
dim_result = await db.execute(
|
||||
select(DimensionScore).where(
|
||||
DimensionScore.ticker_id == ticker.id
|
||||
)
|
||||
)
|
||||
dim_scores = {ds.dimension: ds for ds in dim_result.scalars().all()}
|
||||
for dim in DIMENSIONS:
|
||||
ds = dim_scores.get(dim)
|
||||
if ds is None or ds.is_stale:
|
||||
await compute_dimension_score(db, ticker.symbol, dim)
|
||||
|
||||
await compute_composite_score(db, ticker.symbol, weights)
|
||||
|
||||
await db.commit()
|
||||
|
||||
# Re-fetch
|
||||
comp_result = await db.execute(
|
||||
select(CompositeScore).where(CompositeScore.ticker_id == ticker.id)
|
||||
)
|
||||
comp = comp_result.scalar_one_or_none()
|
||||
if comp is None:
|
||||
continue
|
||||
|
||||
dim_result = await db.execute(
|
||||
select(DimensionScore).where(
|
||||
DimensionScore.ticker_id == ticker.id
|
||||
)
|
||||
)
|
||||
dims = [
|
||||
{
|
||||
"dimension": ds.dimension,
|
||||
"score": ds.score,
|
||||
"is_stale": ds.is_stale,
|
||||
"computed_at": ds.computed_at,
|
||||
}
|
||||
for ds in dim_result.scalars().all()
|
||||
]
|
||||
|
||||
rankings.append({
|
||||
"symbol": ticker.symbol,
|
||||
"composite_score": comp.score,
|
||||
"dimensions": dims,
|
||||
})
|
||||
|
||||
# Sort by composite score descending
|
||||
rankings.sort(key=lambda r: r["composite_score"], reverse=True)
|
||||
|
||||
return {
|
||||
"rankings": rankings,
|
||||
"weights": weights,
|
||||
}
|
||||
|
||||
|
||||
async def update_weights(
|
||||
db: AsyncSession, weights: dict[str, float]
|
||||
) -> dict[str, float]:
|
||||
"""Update scoring weights and recompute all composite scores.
|
||||
|
||||
Validates that all weights are positive and dimensions are valid.
|
||||
Returns the new weights.
|
||||
"""
|
||||
# Validate
|
||||
for dim, w in weights.items():
|
||||
if dim not in DIMENSIONS:
|
||||
raise ValidationError(
|
||||
f"Unknown dimension: {dim}. Valid: {', '.join(DIMENSIONS)}"
|
||||
)
|
||||
if w < 0:
|
||||
raise ValidationError(f"Weight for {dim} must be non-negative, got {w}")
|
||||
|
||||
# Ensure all dimensions have a weight (default 0 for unspecified)
|
||||
full_weights = {dim: weights.get(dim, 0.0) for dim in DIMENSIONS}
|
||||
|
||||
# Persist
|
||||
await _save_weights(db, full_weights)
|
||||
|
||||
# Recompute all composite scores
|
||||
result = await db.execute(select(Ticker))
|
||||
tickers = list(result.scalars().all())
|
||||
|
||||
for ticker in tickers:
|
||||
await compute_composite_score(db, ticker.symbol, full_weights)
|
||||
|
||||
await db.commit()
|
||||
return full_weights
|
||||
Reference in New Issue
Block a user