major update
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
"""Fundamentals router — fundamental data endpoints."""
|
||||
|
||||
import json
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
@@ -11,6 +13,17 @@ from app.services.fundamental_service import get_fundamental
|
||||
router = APIRouter(tags=["fundamentals"])
|
||||
|
||||
|
||||
def _parse_unavailable_fields(raw_json: str) -> dict[str, str]:
|
||||
"""Deserialize unavailable_fields_json, defaulting to {} on invalid JSON."""
|
||||
try:
|
||||
parsed = json.loads(raw_json)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return {}
|
||||
if not isinstance(parsed, dict):
|
||||
return {}
|
||||
return {k: v for k, v in parsed.items() if isinstance(k, str) and isinstance(v, str)}
|
||||
|
||||
|
||||
@router.get("/fundamentals/{symbol}", response_model=APIEnvelope)
|
||||
async def read_fundamentals(
|
||||
symbol: str,
|
||||
@@ -30,6 +43,7 @@ async def read_fundamentals(
|
||||
earnings_surprise=record.earnings_surprise,
|
||||
market_cap=record.market_cap,
|
||||
fetched_at=record.fetched_at,
|
||||
unavailable_fields=_parse_unavailable_fields(record.unavailable_fields_json),
|
||||
)
|
||||
|
||||
return APIEnvelope(status="success", data=data.model_dump())
|
||||
|
||||
@@ -19,7 +19,7 @@ from app.exceptions import ProviderError
|
||||
from app.models.user import User
|
||||
from app.providers.alpaca import AlpacaOHLCVProvider
|
||||
from app.providers.fmp import FMPFundamentalProvider
|
||||
from app.providers.gemini_sentiment import GeminiSentimentProvider
|
||||
from app.providers.openai_sentiment import OpenAISentimentProvider
|
||||
from app.schemas.common import APIEnvelope
|
||||
from app.services import fundamental_service, ingestion_service, sentiment_service
|
||||
|
||||
@@ -67,10 +67,10 @@ async def fetch_symbol(
|
||||
sources["ohlcv"] = {"status": "error", "records": 0, "message": str(exc)}
|
||||
|
||||
# --- Sentiment ---
|
||||
if settings.gemini_api_key:
|
||||
if settings.openai_api_key:
|
||||
try:
|
||||
sent_provider = GeminiSentimentProvider(
|
||||
settings.gemini_api_key, settings.gemini_model
|
||||
sent_provider = OpenAISentimentProvider(
|
||||
settings.openai_api_key, settings.openai_model
|
||||
)
|
||||
data = await sent_provider.fetch_sentiment(symbol_upper)
|
||||
await sentiment_service.store_sentiment(
|
||||
@@ -80,6 +80,8 @@ async def fetch_symbol(
|
||||
confidence=data.confidence,
|
||||
source=data.source,
|
||||
timestamp=data.timestamp,
|
||||
reasoning=data.reasoning,
|
||||
citations=data.citations,
|
||||
)
|
||||
sources["sentiment"] = {
|
||||
"status": "ok",
|
||||
@@ -93,7 +95,7 @@ async def fetch_symbol(
|
||||
else:
|
||||
sources["sentiment"] = {
|
||||
"status": "skipped",
|
||||
"message": "Gemini API key not configured",
|
||||
"message": "OpenAI API key not configured",
|
||||
}
|
||||
|
||||
# --- Fundamentals ---
|
||||
@@ -108,6 +110,7 @@ async def fetch_symbol(
|
||||
revenue_growth=fdata.revenue_growth,
|
||||
earnings_surprise=fdata.earnings_surprise,
|
||||
market_cap=fdata.market_cap,
|
||||
unavailable_fields=fdata.unavailable_fields,
|
||||
)
|
||||
sources["fundamentals"] = {"status": "ok", "message": None}
|
||||
except Exception as exc:
|
||||
|
||||
@@ -6,14 +6,41 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from app.dependencies import get_db, require_access
|
||||
from app.schemas.common import APIEnvelope
|
||||
from app.schemas.score import (
|
||||
CompositeBreakdownResponse,
|
||||
DimensionScoreResponse,
|
||||
RankingEntry,
|
||||
RankingResponse,
|
||||
ScoreBreakdownResponse,
|
||||
ScoreResponse,
|
||||
SubScoreResponse,
|
||||
WeightUpdateRequest,
|
||||
)
|
||||
from app.services.scoring_service import get_rankings, get_score, update_weights
|
||||
|
||||
|
||||
def _map_breakdown(raw: dict | None) -> ScoreBreakdownResponse | None:
|
||||
"""Convert a raw breakdown dict from the scoring service into a Pydantic model."""
|
||||
if raw is None:
|
||||
return None
|
||||
return ScoreBreakdownResponse(
|
||||
sub_scores=[SubScoreResponse(**s) for s in raw.get("sub_scores", [])],
|
||||
formula=raw.get("formula", ""),
|
||||
unavailable=raw.get("unavailable", []),
|
||||
)
|
||||
|
||||
|
||||
def _map_composite_breakdown(raw: dict | None) -> CompositeBreakdownResponse | None:
|
||||
"""Convert a raw composite breakdown dict into a Pydantic model."""
|
||||
if raw is None:
|
||||
return None
|
||||
return CompositeBreakdownResponse(
|
||||
weights=raw["weights"],
|
||||
available_dimensions=raw["available_dimensions"],
|
||||
missing_dimensions=raw["missing_dimensions"],
|
||||
renormalized_weights=raw["renormalized_weights"],
|
||||
formula=raw["formula"],
|
||||
)
|
||||
|
||||
router = APIRouter(tags=["scores"])
|
||||
|
||||
|
||||
@@ -32,10 +59,20 @@ async def read_score(
|
||||
composite_stale=result["composite_stale"],
|
||||
weights=result["weights"],
|
||||
dimensions=[
|
||||
DimensionScoreResponse(**d) for d in result["dimensions"]
|
||||
DimensionScoreResponse(
|
||||
dimension=d["dimension"],
|
||||
score=d["score"],
|
||||
is_stale=d["is_stale"],
|
||||
computed_at=d.get("computed_at"),
|
||||
breakdown=_map_breakdown(d.get("breakdown")),
|
||||
)
|
||||
for d in result["dimensions"]
|
||||
],
|
||||
missing_dimensions=result["missing_dimensions"],
|
||||
computed_at=result["computed_at"],
|
||||
composite_breakdown=_map_composite_breakdown(
|
||||
result.get("composite_breakdown")
|
||||
),
|
||||
)
|
||||
return APIEnvelope(status="success", data=data.model_dump(mode="json"))
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
"""Sentiment router — sentiment data endpoints."""
|
||||
|
||||
import json
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.dependencies import get_db, require_access
|
||||
from app.schemas.common import APIEnvelope
|
||||
from app.schemas.sentiment import SentimentResponse, SentimentScoreResult
|
||||
from app.schemas.sentiment import CitationItem, SentimentResponse, SentimentScoreResult
|
||||
from app.services.sentiment_service import (
|
||||
compute_sentiment_dimension_score,
|
||||
get_sentiment_scores,
|
||||
@@ -14,6 +16,17 @@ from app.services.sentiment_service import (
|
||||
router = APIRouter(tags=["sentiment"])
|
||||
|
||||
|
||||
def _parse_citations(citations_json: str) -> list[CitationItem]:
|
||||
"""Deserialize citations_json, defaulting to [] on invalid JSON."""
|
||||
try:
|
||||
raw = json.loads(citations_json)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return []
|
||||
if not isinstance(raw, list):
|
||||
return []
|
||||
return [CitationItem(**item) for item in raw if isinstance(item, dict)]
|
||||
|
||||
|
||||
@router.get("/sentiment/{symbol}", response_model=APIEnvelope)
|
||||
async def read_sentiment(
|
||||
symbol: str,
|
||||
@@ -36,6 +49,8 @@ async def read_sentiment(
|
||||
confidence=s.confidence,
|
||||
source=s.source,
|
||||
timestamp=s.timestamp,
|
||||
reasoning=s.reasoning,
|
||||
citations=_parse_citations(s.citations_json),
|
||||
)
|
||||
for s in scores
|
||||
],
|
||||
|
||||
@@ -5,8 +5,9 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.dependencies import get_db, require_access
|
||||
from app.schemas.common import APIEnvelope
|
||||
from app.schemas.sr_level import SRLevelResponse, SRLevelResult
|
||||
from app.services.sr_service import get_sr_levels
|
||||
from app.schemas.sr_level import SRLevelResponse, SRLevelResult, SRZoneResult
|
||||
from app.services.price_service import query_ohlcv
|
||||
from app.services.sr_service import cluster_sr_zones, get_sr_levels
|
||||
|
||||
router = APIRouter(tags=["sr-levels"])
|
||||
|
||||
@@ -15,24 +16,55 @@ router = APIRouter(tags=["sr-levels"])
|
||||
async def read_sr_levels(
|
||||
symbol: str,
|
||||
tolerance: float = Query(0.005, ge=0, le=0.1, description="Merge tolerance (default 0.5%)"),
|
||||
max_zones: int = Query(6, ge=0, description="Max S/R zones to return (default 6)"),
|
||||
_user=Depends(require_access),
|
||||
db: AsyncSession = Depends(get_db),
|
||||
) -> APIEnvelope:
|
||||
"""Get support/resistance levels for a symbol, sorted by strength descending."""
|
||||
levels = await get_sr_levels(db, symbol, tolerance)
|
||||
|
||||
level_results = [
|
||||
SRLevelResult(
|
||||
id=lvl.id,
|
||||
price_level=lvl.price_level,
|
||||
type=lvl.type,
|
||||
strength=lvl.strength,
|
||||
detection_method=lvl.detection_method,
|
||||
created_at=lvl.created_at,
|
||||
)
|
||||
for lvl in levels
|
||||
]
|
||||
|
||||
# Compute S/R zones from the fetched levels
|
||||
zones: list[SRZoneResult] = []
|
||||
if levels and max_zones > 0:
|
||||
# Get current price from latest OHLCV close
|
||||
ohlcv_records = await query_ohlcv(db, symbol)
|
||||
if ohlcv_records:
|
||||
current_price = ohlcv_records[-1].close
|
||||
level_dicts = [
|
||||
{"price_level": lvl.price_level, "strength": lvl.strength}
|
||||
for lvl in levels
|
||||
]
|
||||
raw_zones = cluster_sr_zones(
|
||||
level_dicts, current_price, tolerance=0.02, max_zones=max_zones
|
||||
)
|
||||
zones = [SRZoneResult(**z) for z in raw_zones]
|
||||
|
||||
# Filter levels to only those within at least one zone's [low, high] range
|
||||
visible_levels: list[SRLevelResult] = []
|
||||
if zones:
|
||||
visible_levels = [
|
||||
lvl
|
||||
for lvl in level_results
|
||||
if any(z.low <= lvl.price_level <= z.high for z in zones)
|
||||
]
|
||||
|
||||
data = SRLevelResponse(
|
||||
symbol=symbol.upper(),
|
||||
levels=[
|
||||
SRLevelResult(
|
||||
id=lvl.id,
|
||||
price_level=lvl.price_level,
|
||||
type=lvl.type,
|
||||
strength=lvl.strength,
|
||||
detection_method=lvl.detection_method,
|
||||
created_at=lvl.created_at,
|
||||
)
|
||||
for lvl in levels
|
||||
],
|
||||
levels=level_results,
|
||||
zones=zones,
|
||||
visible_levels=visible_levels,
|
||||
count=len(levels),
|
||||
)
|
||||
return APIEnvelope(status="success", data=data.model_dump())
|
||||
|
||||
Reference in New Issue
Block a user