test_news_mcp.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. from __future__ import annotations
  2. import tempfile
  3. from pathlib import Path
  4. from news_mcp.dedup.cluster import dedup_and_cluster_articles
  5. from news_mcp.storage.sqlite_store import SQLiteClusterStore
  6. from news_mcp.enrichment.importance import compute_importance
  7. from news_mcp.enrichment.llm_enrich import _filter_entities, _matches_blacklist
  8. from news_mcp.entity_normalize import normalize_query, normalize_entities
  9. from news_mcp.llm import build_extraction_prompt, call_llm, load_prompt
  10. from news_mcp.trends_resolution import resolve_entity_via_trends
  11. from news_mcp.mcp_server_fastmcp import _sort_clusters_by_recency
  12. def _article(title: str, url: str = "https://example.com/x", source: str = "Src", ts: str = "Mon, 30 Mar 2026 12:00:00 GMT"):
  13. return {
  14. "title": title,
  15. "url": url,
  16. "source": source,
  17. "timestamp": ts,
  18. "summary": "summary text",
  19. }
  20. def test_dedup_merges_similar_titles():
  21. articles = [
  22. _article("Trump warns Iran war could spread"),
  23. _article("Trump warns Iran conflict could spread"),
  24. _article("Unrelated sports result"),
  25. ]
  26. clustered = dedup_and_cluster_articles(articles, similarity_threshold=0.75)
  27. # We expect the Trump/Iran items to be merged into one cluster in the same topic bucket.
  28. total_clusters = sum(len(v) for v in clustered.values())
  29. assert total_clusters == 2
  30. def test_sqlite_feed_hash_roundtrip():
  31. with tempfile.TemporaryDirectory() as td:
  32. db = Path(td) / "news.sqlite"
  33. store = SQLiteClusterStore(db)
  34. assert store.get_feed_hash("breakingthenews") is None
  35. store.set_feed_hash("breakingthenews", "abc123")
  36. assert store.get_feed_hash("breakingthenews") == "abc123"
  37. def test_sqlite_summary_cache_roundtrip():
  38. with tempfile.TemporaryDirectory() as td:
  39. db = Path(td) / "news.sqlite"
  40. store = SQLiteClusterStore(db)
  41. # Upsert a base cluster first.
  42. store.upsert_clusters([
  43. {
  44. "cluster_id": "cid1",
  45. "headline": "Headline",
  46. "summary": "Summary",
  47. "entities": ["Iran"],
  48. "sentiment": "negative",
  49. "importance": 0.5,
  50. "sources": ["BreakingTheNews"],
  51. "timestamp": "Mon, 30 Mar 2026 12:00:00 GMT",
  52. "articles": [],
  53. "first_seen": "Mon, 30 Mar 2026 12:00:00 GMT",
  54. "last_updated": "Mon, 30 Mar 2026 12:00:00 GMT",
  55. }
  56. ], topic="other")
  57. store.upsert_cluster_summary(
  58. "cid1",
  59. {
  60. "headline": "Headline",
  61. "mergedSummary": "Merged summary",
  62. "keyFacts": ["Fact 1"],
  63. "sources": ["BreakingTheNews"],
  64. },
  65. )
  66. cached = store.get_cluster_summary("cid1", ttl_hours=24)
  67. assert cached is not None
  68. assert cached["mergedSummary"] == "Merged summary"
  69. assert cached["keyFacts"] == ["Fact 1"]
  70. def test_sqlite_summary_cache_does_not_create_placeholder_row():
  71. with tempfile.TemporaryDirectory() as td:
  72. db = Path(td) / "news.sqlite"
  73. store = SQLiteClusterStore(db)
  74. store.upsert_cluster_summary(
  75. "missing",
  76. {
  77. "headline": "Missing",
  78. "mergedSummary": "Summary",
  79. "keyFacts": [],
  80. "sources": [],
  81. },
  82. )
  83. assert store.get_cluster_by_id("missing") is None
  84. assert store.get_cluster_summary("missing", ttl_hours=24) is None
  85. def test_prune_clusters_deletes_rows_older_than_retention():
  86. with tempfile.TemporaryDirectory() as td:
  87. db = Path(td) / "news.sqlite"
  88. store = SQLiteClusterStore(db)
  89. store.upsert_clusters([
  90. {
  91. "cluster_id": "fresh",
  92. "headline": "Fresh",
  93. "summary": "Fresh summary",
  94. "entities": ["Bitcoin"],
  95. "timestamp": "Wed, 01 Apr 2026 12:00:00 GMT",
  96. "articles": [],
  97. },
  98. {
  99. "cluster_id": "stale",
  100. "headline": "Stale",
  101. "summary": "Stale summary",
  102. "entities": ["Iran"],
  103. "timestamp": "Wed, 01 Apr 2026 11:00:00 GMT",
  104. "articles": [],
  105. },
  106. ], topic="other")
  107. with store._conn() as conn:
  108. conn.execute(
  109. "UPDATE clusters SET updated_at=? WHERE cluster_id=?",
  110. ("2025-01-01T00:00:00+00:00", "stale"),
  111. )
  112. deleted = store.prune_clusters(retention_days=30)
  113. assert deleted == 1
  114. assert store.get_cluster_by_id("stale") is None
  115. assert store.get_cluster_by_id("fresh") is not None
  116. assert store.get_prune_state(pruning_enabled=True, retention_days=30, interval_hours=24)["last_prune_at"] is not None
  117. def test_prune_if_due_skips_deletes_when_pruning_disabled():
  118. with tempfile.TemporaryDirectory() as td:
  119. db = Path(td) / "news.sqlite"
  120. store = SQLiteClusterStore(db)
  121. store.upsert_clusters([
  122. {
  123. "cluster_id": "stale",
  124. "headline": "Stale",
  125. "summary": "Stale summary",
  126. "entities": ["Iran"],
  127. "timestamp": "Wed, 01 Apr 2026 11:00:00 GMT",
  128. "articles": [],
  129. }
  130. ], topic="other")
  131. with store._conn() as conn:
  132. conn.execute(
  133. "UPDATE clusters SET updated_at=? WHERE cluster_id=?",
  134. ("2025-01-01T00:00:00+00:00", "stale"),
  135. )
  136. result = store.prune_if_due(pruning_enabled=False, retention_days=30, interval_hours=24)
  137. assert result["enabled"] is False
  138. assert result["deleted"] == 0
  139. assert store.get_cluster_by_id("stale") is not None
  140. def test_get_latest_clusters_orders_by_updated_at_before_limit():
  141. with tempfile.TemporaryDirectory() as td:
  142. db = Path(td) / "news.sqlite"
  143. store = SQLiteClusterStore(db)
  144. store.upsert_clusters(
  145. [
  146. {
  147. "cluster_id": "old",
  148. "headline": "Old",
  149. "summary": "Old summary",
  150. "entities": ["Iran"],
  151. "timestamp": "Wed, 01 Apr 2026 09:00:00 GMT",
  152. "articles": [],
  153. },
  154. {
  155. "cluster_id": "new",
  156. "headline": "New",
  157. "summary": "New summary",
  158. "entities": ["Bitcoin"],
  159. "timestamp": "Wed, 01 Apr 2026 11:00:00 GMT",
  160. "articles": [],
  161. },
  162. ],
  163. topic="crypto",
  164. )
  165. with store._conn() as conn:
  166. conn.execute("UPDATE clusters SET updated_at=? WHERE cluster_id=?", ("2025-01-01T00:00:00+00:00", "new"))
  167. conn.execute("UPDATE clusters SET updated_at=? WHERE cluster_id=?", ("2026-01-01T00:00:00+00:00", "old"))
  168. latest = store.get_latest_clusters(topic="crypto", ttl_hours=24 * 365, limit=1)
  169. assert len(latest) == 1
  170. assert latest[0]["cluster_id"] == "new"
  171. def test_get_entity_metadata_prefers_mid_scoped_row():
  172. with tempfile.TemporaryDirectory() as td:
  173. db = Path(td) / "news.sqlite"
  174. store = SQLiteClusterStore(db)
  175. store.upsert_entity_metadata("Bitcoin", canonical_label="Bitcoin", mid=None, sources=["local"])
  176. store.upsert_entity_metadata("Bitcoin", canonical_label="Bitcoin", mid="/m/Bitcoin", sources=["trends"])
  177. store.record_entity_request("Bitcoin", mid="/m/Bitcoin")
  178. meta = store.get_entity_metadata("Bitcoin")
  179. assert meta is not None
  180. assert meta["mid"] == "/m/Bitcoin"
  181. def test_blacklist_filters_entities_case_insensitively():
  182. entities = ["Bloomberg", "Reuters", "bloomberg", "CoinDesk"]
  183. filtered = _filter_entities(entities, blacklist=["bloomberg"])
  184. assert filtered == ["Reuters", "CoinDesk"]
  185. def test_blacklist_supports_wildcards():
  186. assert _matches_blacklist("Bloomberg Economics", blacklist=["bloomberg*"])
  187. assert _matches_blacklist("bloomberg", blacklist=["*berg"])
  188. assert not _matches_blacklist("Reuters", blacklist=["bloomberg*"])
  189. def test_query_normalization_keeps_common_shorthand_working():
  190. assert normalize_query("btc") == "Bitcoin"
  191. assert normalize_query("Trump") == "Donald Trump"
  192. assert normalize_query("nvidia") == "nvidia"
  193. def test_entity_normalization_deduplicates_aliases():
  194. assert normalize_entities(["btc", "Bitcoin", "BTC", "Ethereum"]) == ["Bitcoin", "Ethereum"]
  195. def test_load_prompt_reads_prompt_files():
  196. text = load_prompt("extract_entities.prompt")
  197. assert "Return STRICT JSON" in text
  198. def test_resolve_entity_falls_back_cleanly_when_provider_unavailable(monkeypatch):
  199. import news_mcp.trends_resolution as trends_resolution
  200. trends_resolution.resolve_entity_via_trends.cache_clear()
  201. trends_resolution._provider.cache_clear()
  202. monkeypatch.setattr(trends_resolution, "_provider", lambda: None)
  203. resolved = resolve_entity_via_trends("btc")
  204. assert resolved["normalized"] == "Bitcoin"
  205. assert resolved["canonical_label"] == "Bitcoin"
  206. assert resolved["mid"] is None
  207. assert resolved["candidates"] == []
  208. assert resolved["source"] == "fallback"
  209. trends_resolution.resolve_entity_via_trends.cache_clear()
  210. def test_sort_clusters_by_recency_prefers_newer_timestamp_over_importance():
  211. clusters = [
  212. {"headline": "older", "timestamp": "Wed, 01 Apr 2026 10:00:00 GMT", "importance": 0.9},
  213. {"headline": "newer", "timestamp": "Wed, 01 Apr 2026 11:00:00 GMT", "importance": 0.1},
  214. ]
  215. sorted_clusters = _sort_clusters_by_recency(clusters)
  216. assert [c["headline"] for c in sorted_clusters] == ["newer", "older"]
  217. def test_build_extraction_prompt_is_stable_without_blacklist():
  218. cluster = {
  219. "headline": "Bloomberg reports Bitcoin rallies after US rate comments",
  220. "summary": "A report from Bloomberg says Bitcoin moved higher after comments from the Fed.",
  221. "articles": [],
  222. }
  223. prompt = build_extraction_prompt(cluster)
  224. assert "Bloomberg reports Bitcoin rallies" in prompt
  225. assert "Do NOT return empty entities" in prompt
  226. assert "Bloomberg" in prompt # present in the input, not filtered here
  227. def test_call_llm_dispatches_to_selected_provider(monkeypatch):
  228. async def fake_groq(model, messages, response_json=True):
  229. return '{"ok": true, "provider": "groq"}'
  230. async def fake_openai(model, messages, response_json=True):
  231. return '{"ok": true, "provider": "openai"}'
  232. monkeypatch.setattr("news_mcp.llm._call_groq", fake_groq)
  233. monkeypatch.setattr("news_mcp.llm._call_openai", fake_openai)
  234. import asyncio
  235. groq = asyncio.run(call_llm("groq", "x", "sys", "user"))
  236. openai = asyncio.run(call_llm("openai", "x", "sys", "user"))
  237. assert '"provider": "groq"' in groq
  238. assert '"provider": "openai"' in openai
  239. def test_refresh_skips_reprocessing_when_feed_hash_is_unchanged(monkeypatch):
  240. import news_mcp.jobs.poller as poller
  241. import hashlib
  242. from news_mcp.config import NEWS_FEED_URL, NEWS_FEED_URLS
  243. calls = {"fetch": 0, "cluster": 0, "enrich": 0, "classify": 0}
  244. rss_urls = [u.strip() for u in NEWS_FEED_URLS.split(",") if u.strip()] or [NEWS_FEED_URL]
  245. material = "\n".join(
  246. [
  247. "Bitcoin rallies|https://example.com/a|Wed, 01 Apr 2026 12:00:00 GMT",
  248. ]
  249. )
  250. expected_hash = hashlib.sha1(material.encode("utf-8")).hexdigest()
  251. async def fake_to_thread(fn, limit):
  252. calls["fetch"] += 1
  253. return [
  254. {
  255. "title": "Bitcoin rallies",
  256. "url": "https://example.com/a",
  257. "source": "Src",
  258. "timestamp": "Wed, 01 Apr 2026 12:00:00 GMT",
  259. "summary": "summary",
  260. }
  261. ]
  262. def fake_cluster(articles):
  263. calls["cluster"] += 1
  264. return {
  265. "crypto": [
  266. {
  267. "cluster_id": "cid",
  268. "headline": "Bitcoin rallies",
  269. "summary": "summary",
  270. "entities": [],
  271. "sentiment": "neutral",
  272. "importance": 0.0,
  273. "sources": ["Src"],
  274. "timestamp": "Wed, 01 Apr 2026 12:00:00 GMT",
  275. "articles": [],
  276. }
  277. ]
  278. }
  279. def fake_enrich(cluster):
  280. calls["enrich"] += 1
  281. return cluster
  282. async def fake_classify(cluster):
  283. calls["classify"] += 1
  284. return cluster
  285. class DummyStore:
  286. def __init__(self, *args, **kwargs):
  287. self.meta = {}
  288. self.feed_hash = expected_hash
  289. def get_feed_hash(self, feed_key):
  290. return self.feed_hash
  291. def set_feed_hash(self, feed_key, last_hash):
  292. self.feed_hash = last_hash
  293. def get_cluster_by_id(self, cluster_id):
  294. return None
  295. def upsert_clusters(self, clusters, topic):
  296. self.meta["upserted"] = (len(clusters), topic)
  297. def prune_if_due(self, **kwargs):
  298. self.meta["prune"] = kwargs
  299. return {"deleted": 0}
  300. def set_meta(self, key, value):
  301. self.meta[key] = value
  302. monkeypatch.setattr(poller, "SQLiteClusterStore", DummyStore)
  303. monkeypatch.setattr(poller, "fetch_news_articles", lambda limit: [{"title": "Bitcoin rallies", "url": "https://example.com/a", "timestamp": "Wed, 01 Apr 2026 12:00:00 GMT"}])
  304. monkeypatch.setattr(poller.asyncio, "to_thread", fake_to_thread)
  305. monkeypatch.setattr(poller, "dedup_and_cluster_articles", fake_cluster)
  306. monkeypatch.setattr(poller, "enrich_cluster", fake_enrich)
  307. monkeypatch.setattr(poller, "classify_cluster_llm", fake_classify)
  308. poller.store = None
  309. async def run_once():
  310. await poller.refresh_clusters(topic=None, limit=80)
  311. import asyncio
  312. asyncio.run(run_once())
  313. assert calls["fetch"] == 1
  314. assert calls["cluster"] == 0
  315. assert calls["enrich"] == 0
  316. assert calls["classify"] == 0
  317. def test_importance_prefers_llm_signal():
  318. # Two clusters with same coverage but different sentiment magnitude.
  319. base = {
  320. "sources": ["A", "B"],
  321. "articles": [{}, {}],
  322. "sentiment": "neutral",
  323. "sentimentScore": 0.0,
  324. }
  325. pos = dict(base, sentimentScore=0.9)
  326. neg = dict(base, sentimentScore=-0.8)
  327. imp_base = compute_importance(base)
  328. imp_pos = compute_importance(pos)
  329. imp_neg = compute_importance(neg)
  330. assert imp_pos >= imp_base
  331. assert imp_neg >= imp_base