mcp_server_fastmcp.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. from __future__ import annotations
  2. import asyncio
  3. import logging
  4. from collections import Counter
  5. from datetime import datetime, timezone
  6. from email.utils import parsedate_to_datetime
  7. from fastapi import FastAPI
  8. from mcp.server.fastmcp import FastMCP
  9. from mcp.server.transport_security import TransportSecuritySettings
  10. from news_mcp.config import DEFAULT_LOOKBACK_HOURS, DEFAULT_TOPICS, DB_PATH
  11. from news_mcp.config import (
  12. NEWS_PRUNE_INTERVAL_HOURS,
  13. NEWS_PRUNING_ENABLED,
  14. NEWS_REFRESH_INTERVAL_SECONDS,
  15. NEWS_BACKGROUND_REFRESH_ENABLED,
  16. NEWS_BACKGROUND_REFRESH_ON_START,
  17. NEWS_RETENTION_DAYS,
  18. )
  19. from news_mcp.jobs.poller import refresh_clusters
  20. from news_mcp.storage.sqlite_store import SQLiteClusterStore
  21. from news_mcp.dashboard.dashboard_store import DashboardStore
  22. from news_mcp.enrichment.llm_enrich import summarize_cluster_llm
  23. from news_mcp.trends_resolution import resolve_entity_via_trends
  24. from news_mcp.llm import active_llm_config
  25. from news_mcp.entity_normalize import normalize_query
  26. from news_mcp.related_entities import related_recent_entities
  27. logging.basicConfig(
  28. level=logging.INFO,
  29. format="%(asctime)s %(levelname)s %(name)s: %(message)s",
  30. )
  31. mcp = FastMCP(
  32. "news-mcp",
  33. transport_security=TransportSecuritySettings(enable_dns_rebinding_protection=False),
  34. )
  35. def _cluster_entity_haystack(cluster: dict) -> list[str]:
  36. """Collect the normalized entity clues attached to a cluster."""
  37. values: list[str] = []
  38. for ent in cluster.get("entities", []) or []:
  39. values.append(str(ent).strip().lower())
  40. for res in cluster.get("entityResolutions", []) or []:
  41. if not isinstance(res, dict):
  42. continue
  43. for key in ("normalized", "canonical_label", "mid"):
  44. val = res.get(key)
  45. if val:
  46. values.append(str(val).strip().lower())
  47. return [v for v in values if v]
  48. def _parse_cluster_timestamp(value) -> datetime:
  49. if not value:
  50. return datetime.min.replace(tzinfo=timezone.utc)
  51. text = str(value).strip()
  52. if not text:
  53. return datetime.min.replace(tzinfo=timezone.utc)
  54. try:
  55. dt = datetime.fromisoformat(text.replace("Z", "+00:00"))
  56. if dt.tzinfo is None:
  57. dt = dt.replace(tzinfo=timezone.utc)
  58. return dt.astimezone(timezone.utc)
  59. except Exception:
  60. pass
  61. try:
  62. dt = parsedate_to_datetime(text)
  63. if dt.tzinfo is None:
  64. dt = dt.replace(tzinfo=timezone.utc)
  65. return dt.astimezone(timezone.utc)
  66. except Exception:
  67. return datetime.min.replace(tzinfo=timezone.utc)
  68. def _sort_clusters_by_recency(clusters: list[dict]) -> list[dict]:
  69. return sorted(
  70. clusters,
  71. key=lambda c: (
  72. _parse_cluster_timestamp(c.get("timestamp")),
  73. float(c.get("importance", 0.0) or 0.0),
  74. ),
  75. reverse=True,
  76. )
  77. def _tool_card(name: str, description: str, inputs: list[dict], outputs: list[str], notes: list[str] | None = None) -> dict:
  78. return {
  79. "name": name,
  80. "description": description,
  81. "inputs": inputs,
  82. "outputs": outputs,
  83. "notes": notes or [],
  84. }
  85. NEWS_TOOL_CARDS = [
  86. _tool_card(
  87. "get_latest_events",
  88. "Get the newest deduplicated clusters for a topic or resolved entity-like query.",
  89. [
  90. {"name": "topic", "type": "string", "default": "crypto", "meaning": "coarse category or entity-like topic"},
  91. {"name": "limit", "type": "integer", "default": 5, "range": "1-20"},
  92. {"name": "include_articles", "type": "boolean", "default": False},
  93. ],
  94. ["headline", "summary", "entities", "sentiment", "importance", "sources", "timestamp", "articles?"],
  95. ["Use when you want the freshest clusters and are willing to let the server decide topic vs entity mode."],
  96. ),
  97. _tool_card(
  98. "get_events_for_entity",
  99. "Search recent clusters for a person, place, company, or theme by entity matching.",
  100. [
  101. {"name": "entity", "type": "string", "meaning": "entity label or phrase"},
  102. {"name": "timeframe", "type": "string", "default": "24h", "examples": ["24h", "72h", "3d"]},
  103. {"name": "limit", "type": "integer", "default": 10, "range": "1-30"},
  104. {"name": "include_articles", "type": "boolean", "default": False},
  105. ],
  106. ["headline", "summary", "entities", "sentiment", "importance", "sources", "timestamp", "articles?"],
  107. ["Normalization is automatic; use this for an entity-centered deep dive."],
  108. ),
  109. _tool_card(
  110. "get_event_summary",
  111. "Produce a concise LLM-written explanation for one cluster and key facts.",
  112. [
  113. {"name": "event_id", "type": "string", "meaning": "cluster_id; do not surface in user-facing prose"},
  114. {"name": "include_articles", "type": "boolean", "default": False},
  115. ],
  116. ["headline", "mergedSummary", "keyFacts", "sources", "articles?"],
  117. ["Prefer this after you have already chosen a specific cluster to explain."],
  118. ),
  119. _tool_card(
  120. "detect_emerging_topics",
  121. "Surface entities and phrases starting to matter in the recent window.",
  122. [{"name": "limit", "type": "integer", "default": 10, "range": "1-20"}],
  123. ["topic", "trend_score", "related_entities", "signal_type", "count", "avg_importance"],
  124. ["Good for 'what is heating up?' style questions."],
  125. ),
  126. _tool_card(
  127. "get_news_sentiment",
  128. "Estimate sentiment around an entity over a lookback window.",
  129. [
  130. {"name": "entity", "type": "string"},
  131. {"name": "timeframe", "type": "string", "default": "24h"},
  132. ],
  133. ["entity", "sentiment", "score", "cluster_count"],
  134. ["Use after locating a cluster set or entity neighborhood."],
  135. ),
  136. _tool_card(
  137. "get_related_recent_entities",
  138. "Blend local co-occurrence with Google Trends related topics, while preserving mids where available.",
  139. [
  140. {"name": "subject", "type": "string", "meaning": "canonical entity or subject phrase"},
  141. {"name": "timeframe", "type": "string", "default": "72h"},
  142. {"name": "limit", "type": "integer", "default": 10, "range": "1-25"},
  143. {"name": "include_trends", "type": "boolean", "default": True},
  144. ],
  145. ["subject", "related[].normalized", "related[].canonical_label", "related[].mid", "related[].sources", "related[].scores"],
  146. ["Use this to drill from a subject into related entities, then feed those into get_events_for_entity."],
  147. ),
  148. ]
  149. NEWS_COMPOSITION_RECIPES = [
  150. {
  151. "name": "fresh-news-tail",
  152. "steps": [
  153. "get_latest_events(topic=...)",
  154. "optionally get_event_summary(event_id=...) for the strongest cluster",
  155. ],
  156. "notes": ["Best for a quick tail of what is happening now."]
  157. },
  158. {
  159. "name": "entity-deep-dive",
  160. "steps": [
  161. "get_events_for_entity(entity=...)",
  162. "get_event_summary(event_id=...)",
  163. "get_news_sentiment(entity=..., timeframe=...)",
  164. ],
  165. "notes": ["Prefer canonical entity labels when you have them; the server normalizes for you."],
  166. },
  167. {
  168. "name": "subject-neighborhood",
  169. "steps": [
  170. "get_related_recent_entities(subject=...)",
  171. "for each strong related entity, call get_events_for_entity(entity=...)",
  172. ],
  173. "notes": ["Use this when you want a graph-like expansion around a subject."]
  174. },
  175. {
  176. "name": "emerging-signal",
  177. "steps": [
  178. "detect_emerging_topics(limit=...)",
  179. "choose a topic/entity",
  180. "get_events_for_entity(entity=...)",
  181. "get_news_sentiment(entity=...)",
  182. ],
  183. "notes": ["Good for trend scouting and risk mapping."],
  184. },
  185. ]
  186. NEWS_AGENT_TIPS = [
  187. "If you need a fast answer, start with get_latest_events, then summarize the strongest cluster with get_event_summary.",
  188. "If a user asks about a person/place/company/theme, use get_events_for_entity before broadening to get_related_recent_entities.",
  189. "Treat cluster_id as an internal cursor, not user-facing output; use it only for follow-up tool calls.",
  190. "When describing clusters, keep sources and timestamps visible so the user can assess recency and provenance.",
  191. "Prefer a short chain of tools over many parallel calls unless you are building a neighborhood map or comparison table.",
  192. "For tricky names, rely on the server’s resolver instead of inventing alias rules in the client.",
  193. ]
  194. NEWS_EXAMPLE_CHAINS = [
  195. {
  196. "task": "What is happening now?",
  197. "chain": [
  198. "get_latest_events(topic=...)",
  199. "get_event_summary(event_id=...) if one cluster looks important",
  200. ],
  201. },
  202. {
  203. "task": "Deep dive on an entity",
  204. "chain": [
  205. "get_events_for_entity(entity=..., timeframe=...)",
  206. "get_news_sentiment(entity=..., timeframe=...)",
  207. "get_event_summary(event_id=...) for the strongest cluster",
  208. ],
  209. },
  210. {
  211. "task": "Broaden from a subject",
  212. "chain": [
  213. "get_related_recent_entities(subject=..., include_trends=true)",
  214. "get_events_for_entity(entity=...) for the strongest related entities",
  215. ],
  216. },
  217. {
  218. "task": "Find what is emerging",
  219. "chain": [
  220. "detect_emerging_topics(limit=...)",
  221. "get_events_for_entity(entity=...) on one or two emerging terms",
  222. ],
  223. },
  224. ]
  225. @mcp.tool(description="Investigate a topic and return the newest deduplicated news clusters, sorted by recency.")
  226. async def get_latest_events(topic: str = "crypto", limit: int = 5, include_articles: bool = False):
  227. limit = max(1, min(int(limit), 20))
  228. # If the caller passes an entity-like value, resolve it and use the canonical
  229. # entity as the query lens. Otherwise keep the original topic path.
  230. topic_norm = normalize_query(topic).lower()
  231. resolved = resolve_entity_via_trends(topic_norm)
  232. allowed = {t.lower() for t in DEFAULT_TOPICS}
  233. is_topic = topic_norm in allowed
  234. query_terms = {
  235. topic_norm,
  236. str(resolved.get("normalized") or "").strip().lower(),
  237. str(resolved.get("canonical_label") or "").strip().lower(),
  238. str(resolved.get("mid") or "").strip().lower(),
  239. }
  240. query_terms = {q for q in query_terms if q}
  241. store = SQLiteClusterStore(DB_PATH)
  242. if is_topic:
  243. # Cache-first: only refresh if we currently have no fresh clusters for this topic.
  244. clusters = store.get_latest_clusters(topic=topic_norm, ttl_hours=DEFAULT_LOOKBACK_HOURS, limit=limit)
  245. if not clusters:
  246. await refresh_clusters(topic=topic_norm, limit=200)
  247. clusters = store.get_latest_clusters(topic=topic_norm, ttl_hours=DEFAULT_LOOKBACK_HOURS, limit=limit)
  248. else:
  249. # Entity-aware mode: search recent clusters across all topics and match by
  250. # raw entity, canonical label, or MID.
  251. clusters = store.get_latest_clusters_all_topics(ttl_hours=DEFAULT_LOOKBACK_HOURS, limit=limit * 8)
  252. filtered = []
  253. for c in clusters:
  254. haystack = _cluster_entity_haystack(c)
  255. if any(any(term in item for item in haystack) for term in query_terms):
  256. filtered.append(c)
  257. if len(filtered) >= limit:
  258. break
  259. clusters = filtered
  260. out = []
  261. for c in _sort_clusters_by_recency(clusters):
  262. item = {
  263. "cluster_id": c.get("cluster_id"),
  264. "headline": c.get("headline"),
  265. "summary": c.get("summary"),
  266. "entities": c.get("entities", []),
  267. "sentiment": c.get("sentiment", "neutral"),
  268. "importance": c.get("importance", 0.0),
  269. "sources": c.get("sources", []),
  270. "timestamp": c.get("timestamp"),
  271. }
  272. if include_articles:
  273. # Return minimal article fields to keep responses compact.
  274. arts = c.get("articles", []) or []
  275. item["articles"] = [
  276. {
  277. "title": a.get("title"),
  278. "url": a.get("url"),
  279. "source": a.get("source"),
  280. "timestamp": a.get("timestamp"),
  281. }
  282. for a in arts
  283. if isinstance(a, dict)
  284. ]
  285. out.append(item)
  286. return out
  287. @mcp.tool(description="Investigate a person, company, place, or theme by matching extracted entities within a time window.")
  288. async def get_events_for_entity(entity: str, limit: int = 10, timeframe: str = "24h", include_articles: bool = False):
  289. limit = max(1, min(int(limit), 30))
  290. query = normalize_query(entity).strip().lower()
  291. if not query:
  292. return []
  293. resolved = resolve_entity_via_trends(query)
  294. query_terms = {
  295. query,
  296. str(resolved.get("normalized") or "").strip().lower(),
  297. str(resolved.get("canonical_label") or "").strip().lower(),
  298. str(resolved.get("mid") or "").strip().lower(),
  299. }
  300. query_terms = {q for q in query_terms if q}
  301. store = SQLiteClusterStore(DB_PATH)
  302. def _match_clusters(clusters: list[dict]) -> list[dict]:
  303. hits: list[dict] = []
  304. for c in _sort_clusters_by_recency(clusters):
  305. haystack = _cluster_entity_haystack(c)
  306. if any(any(term in item for item in haystack) for term in query_terms):
  307. hits.append(c)
  308. if len(hits) >= limit:
  309. break
  310. return hits
  311. hours = _parse_timeframe_to_hours(timeframe)
  312. clusters = store.get_latest_clusters_all_topics(ttl_hours=hours, limit=max(200, limit * 10))
  313. hits = _match_clusters(clusters)
  314. out = []
  315. for c in hits:
  316. item = {
  317. "cluster_id": c.get("cluster_id"),
  318. "headline": c.get("headline"),
  319. "summary": c.get("summary"),
  320. "entities": c.get("entities", []),
  321. "sentiment": c.get("sentiment", "neutral"),
  322. "importance": c.get("importance", 0.0),
  323. "sources": c.get("sources", []),
  324. "timestamp": c.get("timestamp"),
  325. }
  326. if include_articles:
  327. arts = c.get("articles", []) or []
  328. item["articles"] = [
  329. {
  330. "title": a.get("title"),
  331. "url": a.get("url"),
  332. "source": a.get("source"),
  333. "timestamp": a.get("timestamp"),
  334. }
  335. for a in arts
  336. if isinstance(a, dict)
  337. ]
  338. out.append(item)
  339. return out
  340. @mcp.tool(description="Return entities most commonly associated with the subject in recent clusters, optionally blended with Google Trends suggestions.")
  341. async def get_related_recent_entities(subject: str, timeframe: str = "72h", limit: int = 10, include_trends: bool = True):
  342. limit = max(1, min(int(limit), 25))
  343. hours = _parse_timeframe_to_hours(timeframe)
  344. include_trends_bool = str(include_trends).strip().lower() not in {"false", "0", "no"}
  345. store = SQLiteClusterStore(DB_PATH)
  346. result = related_recent_entities(
  347. store=store,
  348. subject=subject,
  349. timeframe_hours=hours,
  350. limit=limit,
  351. include_trends=include_trends_bool,
  352. )
  353. return result
  354. @mcp.tool(description="Investigate one cluster in depth and return a concise LLM-written explanation plus key facts.")
  355. async def get_event_summary(event_id: str, include_articles: bool = False):
  356. store = SQLiteClusterStore(DB_PATH)
  357. # Summary cache: reuse if present within TTL.
  358. cached_summary = store.get_cluster_summary(
  359. cluster_id=event_id,
  360. ttl_hours=DEFAULT_LOOKBACK_HOURS,
  361. )
  362. if cached_summary:
  363. out = {
  364. "event_id": event_id,
  365. "headline": cached_summary.get("headline"),
  366. "mergedSummary": cached_summary.get("mergedSummary"),
  367. "keyFacts": cached_summary.get("keyFacts", []),
  368. "sources": cached_summary.get("sources", []),
  369. }
  370. if include_articles:
  371. cluster = store.get_cluster_by_id(event_id)
  372. arts = (cluster or {}).get("articles", []) or []
  373. out["articles"] = [
  374. {
  375. "title": a.get("title"),
  376. "url": a.get("url"),
  377. "source": a.get("source"),
  378. "timestamp": a.get("timestamp"),
  379. }
  380. for a in arts
  381. if isinstance(a, dict)
  382. ]
  383. return out
  384. cluster = store.get_cluster_by_id(event_id)
  385. if not cluster:
  386. return {
  387. "event_id": event_id,
  388. "error": "NOT_FOUND",
  389. }
  390. articles_out = None
  391. if include_articles:
  392. arts = cluster.get("articles", []) or []
  393. articles_out = [
  394. {
  395. "title": a.get("title"),
  396. "url": a.get("url"),
  397. "source": a.get("source"),
  398. "timestamp": a.get("timestamp"),
  399. }
  400. for a in arts
  401. if isinstance(a, dict)
  402. ]
  403. summary = await summarize_cluster_llm(cluster)
  404. store.upsert_cluster_summary(event_id, summary)
  405. out = {
  406. "event_id": event_id,
  407. "headline": summary.get("headline"),
  408. "mergedSummary": summary.get("mergedSummary"),
  409. "keyFacts": summary.get("keyFacts", []),
  410. "sources": summary.get("sources", []),
  411. }
  412. if include_articles:
  413. out["articles"] = articles_out or []
  414. return out
  415. @mcp.tool(description="Explore what is starting to matter: surface emerging entities and phrases from recent clusters.")
  416. async def detect_emerging_topics(limit: int = 10):
  417. limit = max(1, min(int(limit), 20))
  418. store = SQLiteClusterStore(DB_PATH)
  419. clusters = store.get_latest_clusters_all_topics(ttl_hours=DEFAULT_LOOKBACK_HOURS, limit=200)
  420. import re
  421. entity_counts = Counter()
  422. entity_importance_sum = Counter()
  423. # co-occurrence: ent -> other_ent -> count
  424. entity_cooccur = {}
  425. phrase_counts = Counter()
  426. topic_counts = Counter()
  427. # Very light heuristics to reduce “meta entities” dominating emerging topics.
  428. # Keep it conservative: only skip obvious boilerplate.
  429. def _is_generic_entity(ent: str) -> bool:
  430. e = str(ent).strip().lower()
  431. if not e:
  432. return True
  433. if len(e) < 4:
  434. return True
  435. # common outlet-ish / meta-ish tokens
  436. if e in {"news", "latest", "breaking"}:
  437. return True
  438. return False
  439. for c in clusters:
  440. topic_counts[c.get("topic", "other")] += 1
  441. ents_in_cluster = [e for e in (c.get("entities", []) or []) if not _is_generic_entity(e)]
  442. ents_in_cluster_norm = [str(e).strip().lower() for e in ents_in_cluster if str(e).strip()]
  443. for ent in ents_in_cluster_norm:
  444. if _is_generic_entity(ent):
  445. continue
  446. entity_counts[ent] += 1
  447. try:
  448. entity_importance_sum[ent] += float(c.get("importance", 0.0) or 0.0)
  449. except Exception:
  450. pass
  451. # update co-occurrence counts
  452. for i in range(len(ents_in_cluster_norm)):
  453. a = ents_in_cluster_norm[i]
  454. if not a:
  455. continue
  456. entity_cooccur.setdefault(a, Counter())
  457. for j in range(len(ents_in_cluster_norm)):
  458. if i == j:
  459. continue
  460. b = ents_in_cluster_norm[j]
  461. if not b:
  462. continue
  463. entity_cooccur[a][b] += 1
  464. text = f"{c.get('headline','')} {c.get('summary','')}"
  465. words = [w for w in re.findall(r"[A-Za-z][A-Za-z0-9\-]{2,}", text.lower())]
  466. for i in range(len(words) - 1):
  467. phrase = f"{words[i]} {words[i+1]}"
  468. if len(phrase) > 6:
  469. phrase_counts[phrase] += 1
  470. emerging = []
  471. # Combine frequency with average importance so “big signal” rises over pure repetition.
  472. for ent, count in entity_counts.most_common(limit):
  473. avg_imp = entity_importance_sum[ent] / max(1, count)
  474. # avg_imp is typically 0..~1; keep score bounded.
  475. trend_score = 0.25 + 0.40 * min(1.0, avg_imp) + 0.08 * min(6.0, float(count))
  476. related = []
  477. for other, _cnt in (entity_cooccur.get(ent) or Counter()).most_common(3):
  478. # avoid returning the entity itself (shouldn't happen, but be safe)
  479. if other != ent:
  480. related.append(other)
  481. emerging.append({
  482. "topic": ent,
  483. "trend_score": min(0.99, round(trend_score, 2)),
  484. "related_entities": related if related else [ent],
  485. "signal_type": "entity",
  486. "count": count,
  487. "avg_importance": round(avg_imp, 3),
  488. })
  489. for phrase, count in phrase_counts.most_common(limit * 2):
  490. if any(item["topic"] == phrase for item in emerging):
  491. continue
  492. emerging.append({
  493. "topic": phrase.title(),
  494. "trend_score": min(0.99, round(0.20 + 0.10 * count, 2)),
  495. "related_entities": [],
  496. "signal_type": "phrase",
  497. "count": count,
  498. })
  499. if len(emerging) >= limit:
  500. break
  501. return emerging[:limit]
  502. @mcp.tool(description="Investigate whether sentiment around an entity is positive, negative, or neutral over a chosen lookback window.")
  503. async def get_news_sentiment(entity: str, timeframe: str = "24h"):
  504. store = SQLiteClusterStore(DB_PATH)
  505. ent = normalize_query(entity).strip().lower()
  506. resolved = resolve_entity_via_trends(ent)
  507. query_terms = {
  508. ent,
  509. str(resolved.get("normalized") or "").strip().lower(),
  510. str(resolved.get("canonical_label") or "").strip().lower(),
  511. str(resolved.get("mid") or "").strip().lower(),
  512. }
  513. query_terms = {q for q in query_terms if q}
  514. if not ent:
  515. return {
  516. "entity": entity,
  517. "sentiment": "neutral",
  518. "score": 0.0,
  519. "cluster_count": 0,
  520. }
  521. # timeframe: accept '24h' or '24'
  522. tf = str(timeframe).strip().lower()
  523. try:
  524. hours = int(tf[:-1]) if tf.endswith("h") else int(tf)
  525. except Exception:
  526. hours = 24
  527. hours = max(1, min(int(hours), 168))
  528. clusters = store.get_latest_clusters_all_topics(ttl_hours=hours, limit=500)
  529. matched = []
  530. for c in clusters:
  531. haystack = _cluster_entity_haystack(c)
  532. if any(any(term in item for item in haystack) for term in query_terms):
  533. matched.append(c)
  534. if not matched:
  535. return {
  536. "entity": entity,
  537. "sentiment": "neutral",
  538. "score": 0.0,
  539. "cluster_count": 0,
  540. }
  541. scores = []
  542. for c in matched:
  543. s = c.get("sentimentScore")
  544. if s is not None:
  545. try:
  546. scores.append(float(s))
  547. except Exception:
  548. pass
  549. avg_score = sum(scores) / len(scores) if scores else 0.0
  550. # Keep the label aligned with the numeric score.
  551. # Small magnitudes are treated as neutral to avoid noisy label flips.
  552. if avg_score >= 0.15:
  553. sentiment = "positive"
  554. elif avg_score <= -0.15:
  555. sentiment = "negative"
  556. else:
  557. sentiment = "neutral"
  558. return {
  559. "entity": entity,
  560. "sentiment": sentiment,
  561. "score": round(avg_score, 3),
  562. "cluster_count": len(matched),
  563. }
  564. @mcp.tool(description="Describe the server tool surface, how tools fit together, and output conventions for downstream agents.")
  565. async def get_capabilities():
  566. return {
  567. "server": {
  568. "name": "news-mcp",
  569. "purpose": "Recent news clusters, entity drill-down, sentiment, emerging topics, and related-entity expansion.",
  570. "output_conventions": {
  571. "cluster_ids": "Do not surface cluster_id in user-facing prose unless explicitly requested; treat it as internal navigation metadata.",
  572. "sources": "Always preserve and display sources when summarizing a cluster or entity result.",
  573. "timestamps": "Mention timestamps consistently when comparing multiple clusters or when recency matters.",
  574. },
  575. },
  576. "tools": NEWS_TOOL_CARDS,
  577. "recipes": NEWS_COMPOSITION_RECIPES,
  578. "example_chains": NEWS_EXAMPLE_CHAINS,
  579. "agent_tips": NEWS_AGENT_TIPS,
  580. "guidance": [
  581. "Use get_latest_events for a tail, get_events_for_entity for entity deep dives, and get_related_recent_entities for neighborhood expansion.",
  582. "Prefer normalized/canonical entities when possible, but the server will resolve common aliases and MIDs for you.",
  583. "When presenting results to users, summarize the cluster; avoid exposing internal IDs unless they are needed for follow-up tool calls.",
  584. ],
  585. }
  586. def _parse_timeframe_to_hours(timeframe: str) -> int:
  587. tf = str(timeframe).strip().lower()
  588. try:
  589. if tf.endswith("d"):
  590. days = int(tf[:-1])
  591. return max(1, days * 24)
  592. if tf.endswith("h"):
  593. return max(1, int(tf[:-1]))
  594. return max(1, int(tf))
  595. except Exception:
  596. return 24
  597. from contextlib import asynccontextmanager
  598. @asynccontextmanager
  599. async def _lifespan(app: FastAPI):
  600. asyncio.ensure_future(_background_refresh_loop())
  601. yield
  602. app = FastAPI(title="News MCP Server", lifespan=_lifespan)
  603. logger = logging.getLogger("news_mcp.startup")
  604. app.mount("/mcp", mcp.sse_app())
  605. # Shared store — single connection pool
  606. _shared_store = SQLiteClusterStore(DB_PATH)
  607. _refresh_lock = asyncio.Lock()
  608. _refresh_started = False
  609. async def _background_refresh_loop():
  610. """Non-blocking background refresher: prune then poll.
  611. Protected by an async lock so a second event-loop wake-up cannot
  612. start a parallel ingestion cycle.
  613. """
  614. global _refresh_started
  615. async with _refresh_lock:
  616. if _refresh_started:
  617. return
  618. _refresh_started = True
  619. logger.info("news-mcp llm config: %s", active_llm_config())
  620. # Prune off-thread so we do not block the event loop
  621. prune_result = await asyncio.to_thread(
  622. _shared_store.prune_if_due,
  623. NEWS_PRUNING_ENABLED,
  624. NEWS_RETENTION_DAYS,
  625. NEWS_PRUNE_INTERVAL_HOURS,
  626. )
  627. logger.info("startup prune_result=%s", prune_result)
  628. if not NEWS_BACKGROUND_REFRESH_ENABLED:
  629. return
  630. async def _loop():
  631. if not NEWS_BACKGROUND_REFRESH_ON_START:
  632. logger.info("background refresh delayed start interval_seconds=%s", NEWS_REFRESH_INTERVAL_SECONDS)
  633. await asyncio.sleep(float(NEWS_REFRESH_INTERVAL_SECONDS))
  634. while True:
  635. try:
  636. logger.info("background refresh tick start")
  637. await refresh_clusters(topic=None, limit=200)
  638. logger.info("background refresh tick complete")
  639. except Exception:
  640. logger.exception("background refresh tick failed")
  641. await asyncio.sleep(float(NEWS_REFRESH_INTERVAL_SECONDS))
  642. asyncio.create_task(_loop())
  643. @app.get("/")
  644. def root():
  645. return {
  646. "status": "ok",
  647. "transport": "fastmcp+sse",
  648. "mount": "/mcp",
  649. "tools": [
  650. "get_latest_events",
  651. "get_events_for_entity",
  652. "get_event_summary",
  653. "detect_emerging_topics",
  654. "get_news_sentiment",
  655. "get_related_recent_entities",
  656. "get_capabilities",
  657. ],
  658. "refresh": {
  659. "enabled": NEWS_BACKGROUND_REFRESH_ENABLED,
  660. "interval_seconds": NEWS_REFRESH_INTERVAL_SECONDS,
  661. },
  662. "retention": {
  663. "lookback_hours": DEFAULT_LOOKBACK_HOURS,
  664. "retention_days": NEWS_RETENTION_DAYS,
  665. },
  666. "pruning": {
  667. "enabled": NEWS_PRUNING_ENABLED,
  668. "interval_hours": NEWS_PRUNE_INTERVAL_HOURS,
  669. },
  670. }
  671. # ------------------------------------------------------------------
  672. # Dashboard REST API endpoints
  673. # ------------------------------------------------------------------
  674. from fastapi.staticfiles import StaticFiles
  675. from fastapi.responses import JSONResponse
  676. app.mount("/dashboard", StaticFiles(directory="dashboard", html=True), name="dashboard")
  677. import logging as _log
  678. API_LOG = _log.getLogger("news_mcp.api")
  679. def _api_ok(data: dict) -> dict:
  680. return data
  681. def _api_err(exc: Exception, ctx: str) -> JSONResponse:
  682. API_LOG.exception(f"API error in {ctx}")
  683. return JSONResponse(status_code=500, content={"error": str(exc), "ctx": ctx})
  684. @app.get("/api/v1/health")
  685. def api_health():
  686. """Extended health + dashboard stats."""
  687. try:
  688. store = DashboardStore(_shared_store)
  689. return store.get_dashboard_stats()
  690. except Exception as e:
  691. return _api_err(e, "health")
  692. @app.get("/api/v1/clusters")
  693. def api_clusters(
  694. topic: str | None = None,
  695. hours: int = 24,
  696. limit: int = 50,
  697. offset: int = 0,
  698. ):
  699. """Paginated cluster listing."""
  700. try:
  701. store = DashboardStore(_shared_store)
  702. clusters = store.get_clusters_page(topic=topic, hours=hours, limit=limit, offset=offset)
  703. with store._store._conn() as conn:
  704. if topic and topic != "all":
  705. count_row = conn.execute(
  706. "SELECT COUNT(*) FROM clusters WHERE updated_at >= datetime('now', ? || ' hours') AND topic = ?",
  707. (-hours, topic),
  708. ).fetchone()
  709. else:
  710. count_row = conn.execute(
  711. "SELECT COUNT(*) FROM clusters WHERE updated_at >= datetime('now', ? || ' hours')",
  712. (-hours,),
  713. ).fetchone()
  714. total = count_row[0] if count_row else 0
  715. return {"clusters": clusters, "total": total, "topic": topic or "all", "hours": hours}
  716. except Exception as e:
  717. return _api_err(e, f"clusters(topic={topic},hours={hours})")
  718. @app.get("/api/v1/sentiment-series")
  719. def api_sentiment_series(
  720. topic: str | None = None,
  721. hours: int = 24,
  722. bucket_hours: float = 1.0,
  723. ):
  724. """Sentiment time-series for Chart.js."""
  725. try:
  726. store = DashboardStore(_shared_store)
  727. series = store.get_sentiment_series(topic=topic, hours=hours, bucket_hours=bucket_hours)
  728. return {"series": series, "topic": topic or "all"}
  729. except Exception as e:
  730. return _api_err(e, f"sentiment(topic={topic})")
  731. @app.get("/api/v1/entities")
  732. def api_entities(
  733. hours: int = 24,
  734. limit: int = 30,
  735. ):
  736. """Top entity frequencies."""
  737. try:
  738. store = DashboardStore(_shared_store)
  739. entities = store.get_entity_frequencies(hours=hours, limit=limit)
  740. return {"entities": entities, "hours": hours}
  741. except Exception as e:
  742. return _api_err(e, f"entities(hours={hours})")
  743. @app.get("/api/v1/cluster/{cluster_id}")
  744. def api_cluster_detail(cluster_id: str):
  745. """Full cluster detail for drill-down."""
  746. try:
  747. store = DashboardStore(_shared_store)
  748. detail = store.get_cluster_detail(cluster_id)
  749. if not detail:
  750. return JSONResponse(status_code=404, content={"error": "Cluster not found", "id": cluster_id})
  751. return detail
  752. except Exception as e:
  753. return _api_err(e, f"detail({cluster_id})")
  754. @app.get("/health")
  755. def health():
  756. return {
  757. "status": "ok",
  758. "lookback_hours": DEFAULT_LOOKBACK_HOURS,
  759. "db": str(DB_PATH),
  760. "last_refresh_at": _shared_store.get_meta("last_refresh_at"),
  761. "feeds": _shared_store.get_all_feed_states(),
  762. "pruning": _shared_store.get_prune_state(
  763. pruning_enabled=NEWS_PRUNING_ENABLED,
  764. retention_days=NEWS_RETENTION_DAYS,
  765. interval_hours=NEWS_PRUNE_INTERVAL_HOURS,
  766. ),
  767. }