-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathmcp_server.py
More file actions
1011 lines (822 loc) · 32 KB
/
mcp_server.py
File metadata and controls
1011 lines (822 loc) · 32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Tessera MCP server — exposes document search tools to Claude Desktop."""
from __future__ import annotations
import logging
import logging.handlers
import sys
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from pathlib import Path
# Ensure project root is on sys.path so `src` package resolves
_project_root = str(Path(__file__).parent)
if _project_root not in sys.path:
sys.path.insert(0, _project_root)
from mcp.server.fastmcp import FastMCP
from src.config import workspace
from src.search import invalidate_search_cache
# Configure logging: file + stderr
_log_dir = Path(_project_root) / "data" / "logs"
_log_dir.mkdir(parents=True, exist_ok=True)
_file_handler = logging.handlers.RotatingFileHandler(
_log_dir / "tessera.log",
maxBytes=5 * 1024 * 1024, # 5MB
backupCount=3,
encoding="utf-8",
)
_file_handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s [%(name)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
)
_file_handler.setLevel(logging.DEBUG)
_stderr_handler = logging.StreamHandler(sys.stderr)
_stderr_handler.setLevel(logging.WARNING)
_stderr_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logging.basicConfig(level=logging.DEBUG, handlers=[_file_handler, _stderr_handler])
logger = logging.getLogger(__name__)
from src.search import search # noqa: E402 — kept so tests can patch mcp_server.search
from src import core # noqa: E402
@asynccontextmanager
async def lifespan(server: FastMCP) -> AsyncIterator[dict]:
"""Run auto-sync on server startup, then watch for file changes."""
ctx = {}
watcher = None
if workspace.sync_auto:
try:
from src.graph.vector_store import OntologyVectorStore
from src.ingestion.pipeline import IngestionPipeline
from src.sync import FileMetaDB, run_incremental_sync
meta_db = FileMetaDB(workspace.meta_db_path)
vector_store = OntologyVectorStore()
pipeline = IngestionPipeline(vector_store=vector_store)
def _ingest(paths: list[Path]) -> tuple[int, dict[str, int]]:
return pipeline.run(source_paths=paths)
def _do_background_sync() -> None:
"""Run sync in background so server starts immediately."""
try:
result = run_incremental_sync(
ws=workspace,
meta_db=meta_db,
vector_store_delete_fn=vector_store.delete_by_source,
ingest_fn=_ingest,
)
if result.has_changes:
invalidate_search_cache()
logger.info("Background auto-sync complete: %s", result.summary())
except Exception as exc:
logger.warning("Background auto-sync failed: %s", exc)
# Run sync in background thread — server starts immediately
import asyncio
loop = asyncio.get_event_loop()
loop.run_in_executor(None, _do_background_sync)
logger.info("Auto-sync started in background")
# Run quiet curation after sync (non-blocking)
def _do_quiet_curation() -> None:
try:
from src.quiet_curation import run_quiet_curation
run_quiet_curation()
except Exception as exc:
logger.debug("Quiet curation skipped: %s", exc)
loop.run_in_executor(None, _do_quiet_curation)
ctx["meta_db"] = meta_db
# Start file watcher for continuous auto-sync
from src.file_watcher import FileWatcher
def _on_file_change() -> None:
"""Callback: re-run incremental sync when files change."""
try:
sync_result = run_incremental_sync(
ws=workspace,
meta_db=meta_db,
vector_store_delete_fn=vector_store.delete_by_source,
ingest_fn=_ingest,
)
if sync_result.has_changes:
invalidate_search_cache()
logger.info("File watcher sync: %s", sync_result.summary())
except Exception as exc:
logger.warning("File watcher sync failed: %s", exc)
watch_dirs = workspace.all_source_paths()
watcher = FileWatcher(
watch_dirs=watch_dirs,
extensions=workspace.extensions,
on_change=_on_file_change,
poll_interval=workspace.watcher.poll_interval,
debounce=workspace.watcher.debounce,
)
watcher.start()
ctx["watcher"] = watcher
logger.info("File watcher started for %d directories", len(watch_dirs))
except Exception as exc:
logger.warning("Auto-sync failed (non-fatal): %s", exc)
try:
yield ctx
finally:
if watcher:
watcher.stop()
logger.info("File watcher stopped")
# Save session summary on shutdown
try:
from src.interaction_log import SESSION_ID
from src.session_summary import save_session_summary
il = core._get_interaction_log()
interactions = il.get_session_interactions(SESSION_ID, limit=200)
if interactions:
result = save_session_summary(SESSION_ID, interactions)
if result:
logger.info("Session summary saved: %s", result["file_path"])
except Exception as exc:
logger.debug("Session summary failed (non-fatal): %s", exc)
mcp = FastMCP(
name="tessera",
lifespan=lifespan,
instructions=(
"Tessera provides semantic search across the user's local workspace documents "
"and cross-session memory.\n\n"
"## Auto-use rules\n"
"When the user asks about topics that may be in their workspace, "
"**call unified_search first** (searches documents AND memories together):\n"
"- Project-related content (PRDs, specs, requirements)\n"
"- Past decisions, meeting notes, session logs\n"
"- Previously remembered facts or preferences\n\n"
"## Memory\n"
"- 'Remember this' → call remember\n"
"- 'What did I say about...' → call recall\n"
"- 'What have I saved?' → call list_memories\n"
"- 'Forget that memory' → call forget_memory\n\n"
"## Workspace management\n"
"- Cleanup requests: call suggest_cleanup first, then organize_files after confirmation\n"
"- Project status: call project_status automatically\n"
"- Decision questions: call extract_decisions automatically\n"
"- Server health: call tessera_status\n\n"
"## Workflow\n"
"1. Call unified_search with keywords from the user's question\n"
"2. If results are insufficient, retry with different keywords or use search_documents\n"
"3. Use read_file for full document contents when needed\n"
"4. Answer based on search results, citing source document names\n"
),
)
# --- Tools (thin wrappers delegating to core) ---
@mcp.tool(
description=(
"Hybrid (semantic + keyword) search across indexed workspace documents "
"(PRDs, decision logs, session logs, etc.). "
"Call this tool first when the user asks about project-related content.\n\n"
"Filter by project ID or doc_type (prd, session_log, decision_log, document)."
)
)
def search_documents(
query: str,
top_k: int = 5,
project: str | None = None,
doc_type: str | None = None,
) -> str:
"""Search indexed documents with hybrid vector+keyword search."""
# Sync the module-level `search` reference so tests patching
# ``mcp_server.search`` propagate into core.
core.search = search
return core.search_documents(query, top_k=top_k, project=project, doc_type=doc_type)
@mcp.tool(
description=(
"Return full contents of a file as a structured view. "
"CSV → markdown table, XLSX → tables per sheet, MD → raw text, DOCX → paragraphs. "
"Use when the user wants to see the complete file, not just search results."
)
)
def view_file_full(file_path: str) -> str:
"""Return full contents of any supported file as structured text."""
return core.view_file_full(file_path)
@mcp.tool(description="List all indexed source files.")
def list_sources() -> str:
"""List all indexed source files."""
return core.list_sources()
@mcp.tool(description="Read file contents by absolute path.")
def read_file(file_path: str) -> str:
"""Read file contents by path."""
return core.read_file(file_path)
@mcp.tool(
description=(
"Organize files in the workspace. "
"action: 'move', 'archive', 'rename', 'list'. "
"Always call suggest_cleanup first and get user confirmation before organizing."
)
)
def organize_files(
action: str,
path: str,
destination: str | None = None,
new_name: str | None = None,
recursive: bool = False,
) -> str:
"""Organize files in the workspace."""
return core.organize_files(action, path, destination=destination, new_name=new_name, recursive=recursive)
@mcp.tool(
description=(
"Generate cleanup suggestions for the workspace. "
"Detects root-level files, backup files, large files, empty directories. "
"Call this tool first when cleanup is requested."
)
)
def suggest_cleanup(path: str | None = None) -> str:
"""Suggest cleanup actions for the workspace."""
return core.suggest_cleanup(path)
@mcp.tool(
description=(
"Get project status including HANDOFF.md, recent changes, and file statistics. "
"Call automatically when asked about project status."
)
)
def project_status(project_id: str | None = None) -> str:
"""Get project status. If no project_id, returns all projects summary."""
return core.project_status(project_id)
@mcp.tool(
description=(
"Extract decisions from session logs and decision logs. "
"Call automatically when asked about past decisions."
)
)
def extract_decisions(project_id: str | None = None, since: str | None = None) -> str:
"""Extract decisions from session/decision logs."""
return core.extract_decisions(project_id=project_id, since=since)
@mcp.tool(
description=(
"Audit a PRD file for quality and completeness against a 13-section structure. "
"Checks section coverage, Mermaid syntax, wireframes, versioning, and changelog.\n\n"
"check_sprawl=True: Detect multiple versions of the same PRD (suggest archiving old ones)\n"
"check_consistency=True: Check cross-PRD consistency for period selectors and tiers"
)
)
def audit_prd(
file_path: str,
check_sprawl: bool = False,
check_consistency: bool = False,
) -> str:
"""Audit a PRD file for quality and completeness."""
return core.audit_prd(file_path, check_sprawl=check_sprawl, check_consistency=check_consistency)
# --- Memory Tools ---
@mcp.tool(
description=(
"Save a piece of knowledge for cross-session persistence. "
"Use this when the user says 'remember this' or when important decisions, "
"preferences, or facts should be preserved across conversations."
)
)
def remember(content: str, tags: list[str] | None = None, project: str | None = None) -> str:
"""Save a memory for cross-session persistence."""
return core.remember(content, tags=tags, project=project)
@mcp.tool(
description=(
"Search past memories from previous sessions. "
"Call this when the user asks 'what did I say about...', "
"'do you remember...', or references past conversations.\n\n"
"Supports time filters (since/until as ISO date, e.g. '2026-03-01') "
"and category filter (decision/preference/fact)."
)
)
def recall(
query: str,
top_k: int = 5,
since: str | None = None,
until: str | None = None,
category: str | None = None,
project: str | None = None,
) -> str:
"""Search past memories with optional time, category, and project filters."""
return core.recall(query, top_k=top_k, since=since, until=until, category=category, project=project)
@mcp.tool(
description=(
"Auto-learn: save new knowledge and immediately index it for search. "
"Use this to capture insights, patterns, or facts discovered during conversation."
)
)
def learn(content: str, tags: list[str] | None = None, source: str = "auto-learn") -> str:
"""Save and immediately index new knowledge."""
return core.learn(content, tags=tags, source=source)
# --- Knowledge Graph Tools ---
@mcp.tool(
description=(
"Build a knowledge graph from indexed documents showing relationships "
"between concepts, decisions, and entities. "
"Returns a Mermaid diagram of the document relationships.\n\n"
"scope: 'project' (single project) or 'all' (entire workspace)\n"
"max_nodes: limit the number of nodes in the graph (default 30)"
)
)
def knowledge_graph(
query: str | None = None,
project: str | None = None,
scope: str = "all",
max_nodes: int = 30,
) -> str:
"""Build and return a knowledge graph as Mermaid diagram."""
return core.knowledge_graph(query=query, project=project, scope=scope, max_nodes=max_nodes)
@mcp.tool(
description=(
"Show connections for a specific document or concept in the knowledge graph. "
"Returns related documents, shared topics, and a focused Mermaid subgraph."
)
)
def explore_connections(query: str, top_k: int = 10) -> str:
"""Explore connections around a specific topic or document."""
return core.explore_connections(query, top_k=top_k)
# --- Unified Search ---
@mcp.tool(
description=(
"Search across BOTH indexed documents AND past memories in one call. "
"Returns combined results ranked by similarity. "
"Use this instead of calling search_documents + recall separately."
)
)
def unified_search(
query: str,
top_k: int = 5,
project: str | None = None,
doc_type: str | None = None,
) -> str:
"""Search documents and memories together."""
return core.unified_search(query, top_k=top_k, project=project, doc_type=doc_type)
# --- Indexing Tools ---
@mcp.tool(
description=(
"Index (or re-index) all workspace documents into the vector store. "
"Run this when setting up Tessera for the first time, or when you want to "
"rebuild the entire index from scratch. "
"Optionally pass specific directory paths to index only those."
)
)
def ingest_documents(paths: list[str] | None = None) -> str:
"""Full ingestion of workspace documents."""
return core.ingest_documents(paths)
@mcp.tool(
description=(
"Incrementally sync the index with your workspace. "
"Only processes new, changed, or deleted files since the last sync. "
"Much faster than full ingestion. Run this when you've updated some documents."
)
)
def sync_documents() -> str:
"""Incremental sync — only new/changed/deleted files."""
return core.sync_documents()
# --- Operations Tools ---
@mcp.tool(
description=(
"Show Tessera server health: tracked files, sync history, "
"index size, cache stats, and watcher status. "
"Call this when asked about server status or troubleshooting."
)
)
def tessera_status() -> str:
"""Return server health and operational status."""
return core.tessera_status()
@mcp.tool(
description=(
"List saved memories with optional filtering. "
"Use to browse what Tessera has remembered across sessions."
)
)
def list_memories(limit: int = 20) -> str:
"""List saved memory files."""
return core.list_memories(limit)
@mcp.tool(
description=(
"Delete a specific memory by filename (without .md extension). "
"Use list_memories first to find the memory to delete."
)
)
def forget_memory(memory_name: str) -> str:
"""Delete a saved memory file."""
return core.forget_memory(memory_name)
# --- Freshness Tools ---
@mcp.tool(
description=(
"Check for stale/outdated documents that haven't been modified recently. "
"Returns a list grouped by project showing file names and days since last update. "
"Use this proactively to suggest document reviews."
)
)
def check_document_freshness(days_threshold: int = 90) -> str:
"""Check for stale documents exceeding the age threshold."""
return core.check_document_freshness(days_threshold)
@mcp.tool(
description=(
"Run a comprehensive health check on the Tessera workspace. "
"Checks: config validity, dependencies, index status, stale documents, "
"zero-result query patterns. Returns actionable recommendations."
)
)
def health_check() -> str:
"""Run workspace health diagnostics."""
return core.health_check()
# --- Analytics Tools ---
@mcp.tool(
description=(
"Show search usage analytics: total queries, top queries, zero-result queries, "
"response times, and daily trends. Use for understanding search patterns."
)
)
def search_analytics(days: int = 30) -> str:
"""Return search analytics summary."""
return core.search_analytics(days)
# --- Batch Memory Tools ---
@mcp.tool(
description=(
"Export all saved memories as JSON for backup or transfer. "
"Returns a JSON string with all memories and their metadata."
)
)
def export_memories() -> str:
"""Export all memories as JSON."""
return core.export_memories()
@mcp.tool(
description=(
"Import memories from a JSON string (batch import). "
"Format: [{\"content\": \"...\", \"tags\": [\"...\"], \"source\": \"...\"}]. "
"Use export_memories to get the expected format."
)
)
def import_memories(data: str) -> str:
"""Import memories from JSON."""
return core.import_memories(data)
# --- Similarity Tools ---
@mcp.tool(
description=(
"Find documents similar to a given document. "
"Returns related documents ranked by similarity. "
"Use this when users ask 'what else is related to this document'."
)
)
def find_similar(source_path: str, top_k: int = 5) -> str:
"""Find documents similar to the given source file."""
return core.find_similar(source_path, top_k=top_k)
# --- Tag Tools ---
@mcp.tool(
description=(
"List all unique tags across saved memories with their counts. "
"Useful for browsing memory categories."
)
)
def memory_tags() -> str:
"""List all memory tags with counts."""
return core.memory_tags()
@mcp.tool(
description=(
"Search memories by a specific tag. "
"Use memory_tags first to see available tags."
)
)
def search_by_tag(tag: str) -> str:
"""Find all memories with a specific tag."""
return core.search_by_tag(tag)
@mcp.tool(
description=(
"List all memory categories with counts. "
"Categories are auto-detected: decision, preference, fact, reference, context."
)
)
def memory_categories() -> str:
"""List all memory categories with counts."""
return core.memory_categories()
@mcp.tool(
description=(
"Search memories by category (e.g. 'decision', 'preference', 'fact'). "
"Use memory_categories first to see available categories."
)
)
def search_by_category(category: str) -> str:
"""Find all memories with a specific category."""
return core.search_by_category(category)
@mcp.tool(
description=(
"Show decision timeline — how decisions evolved over time, grouped by topic. "
"Detects when you changed your mind about something."
)
)
def decision_timeline() -> str:
"""Show decision evolution timeline."""
return core.decision_timeline()
@mcp.tool(
description=(
"Build an optimal context window for a query within a token budget. "
"Retrieves relevant memories and documents, assembles them in priority order, "
"and truncates to fit. Use this to prepare context for another AI tool."
)
)
def context_window(
query: str,
token_budget: int = 4000,
include_documents: bool = True,
) -> str:
"""Build context window for a query."""
return core.context_window(query, token_budget, include_documents)
@mcp.tool(
description=(
"Get personalized query suggestions based on your past searches and memories. "
"Analyzes patterns to recommend what you might want to explore next."
)
)
def smart_suggest(max_suggestions: int = 5) -> str:
"""Get personalized query suggestions."""
return core.smart_suggest(max_suggestions)
@mcp.tool(
description=(
"Generate a topic map showing how your knowledge is organized. "
"Clusters memories by shared keywords and shows topic distribution. "
"Use format='mermaid' for a visual Mermaid mindmap diagram."
)
)
def topic_map(output_format: str = "text") -> str:
"""Generate topic map of all memories."""
return core.topic_map(output_format)
@mcp.tool(
description=(
"Get knowledge statistics — total memories, category breakdown, "
"tag distribution, growth by month, and date range. "
"A dashboard overview of everything Tessera knows."
)
)
def knowledge_stats() -> str:
"""Get aggregate knowledge statistics."""
return core.knowledge_stats()
@mcp.tool(
description=(
"View your user profile — preferences, decisions, top topics, "
"language preference, tool usage patterns, and knowledge areas."
)
)
def user_profile() -> str:
"""Get user profile summary."""
return core.user_profile()
@mcp.tool(
description=(
"Get a session-start context briefing with recent decisions, preferences, "
"active topics, and last session summary. Call this at the beginning of a "
"conversation to prime your context with the user's recent knowledge."
)
)
def session_prime(days: int = 7) -> str:
"""Prime the session with recent context."""
return core.session_prime(days=days)
@mcp.tool(
description=(
"Trace the provenance lineage of a memory — where it came from, "
"which session created it, and what parent memories it was derived from."
)
)
def memory_lineage(memory_id: str) -> str:
"""Trace a memory's origin chain."""
return core.memory_lineage(memory_id)
@mcp.tool(
description=(
"Get aggregate provenance statistics — how many memories have provenance, "
"breakdown by source type and session."
)
)
def provenance_stats() -> str:
"""Get provenance statistics."""
return core.provenance_stats()
@mcp.tool(
description=(
"List all project spaces with memory counts, latest activity, "
"and top tags. Shows how knowledge is distributed across projects."
)
)
def list_projects() -> str:
"""List all project spaces."""
return core.list_projects()
@mcp.tool(
description=(
"Assign a memory to a project space. Use this to organize memories "
"by project context (e.g., 'tessera', 'frontend-app', 'api-design')."
)
)
def assign_memory_project(memory_id: str, project: str) -> str:
"""Assign a memory to a project."""
return core.assign_memory_project(memory_id, project)
@mcp.tool(
description=(
"Export all your knowledge in various formats. "
"Supported formats: 'markdown' (default), 'obsidian' (with wikilinks and frontmatter), "
"'csv' (spreadsheet-compatible), 'json' (machine-readable). "
"Use 'obsidian' to import into Obsidian vaults."
)
)
def export_knowledge(format: str = "markdown") -> str:
"""Export knowledge in the specified format."""
return core.export_knowledge(format)
@mcp.tool(
description=(
"Export memories for use in another AI tool. "
"Supported targets: 'chatgpt' (ChatGPT memory JSON), 'gemini' (Gemini context format), "
"'standard' (Tessera interchange format). "
"Use this when migrating knowledge to another AI platform."
)
)
def export_for_ai(target: str = "chatgpt") -> str:
"""Export memories in another AI tool's format."""
return core.export_for_ai(target)
@mcp.tool(
description=(
"Import memories from another AI tool. "
"Paste the exported JSON data and specify the source: 'chatgpt', 'gemini', or 'standard'. "
"Memories will be automatically categorized and stored in Tessera."
)
)
def import_from_ai(data: str, source: str = "chatgpt") -> str:
"""Import memories from another AI tool's export."""
return core.import_from_ai(data, source)
@mcp.tool(
description=(
"Import past conversations from ChatGPT, Claude, or Gemini exports. "
"Paste the exported JSON data and specify the source: 'chatgpt', 'claude', 'gemini', or 'text'. "
"Tessera extracts decisions, preferences, and facts from the conversations. "
"Use this to recover knowledge from past AI interactions."
)
)
def import_conversations(data: str, source: str = "chatgpt") -> str:
"""Import conversations and extract knowledge."""
return core.import_conversations(data, source)
@mcp.tool(
description=(
"Migrate Tessera data to the latest schema version. "
"Creates a backup before migration. Use dry_run=True to preview changes. "
"Handles v0.6.x through v1.0.0 data format upgrades."
)
)
def migrate_data(dry_run: bool = False) -> str:
"""Run data migration."""
return core.migrate_data(dry_run)
@mcp.tool(
description=(
"Check vault encryption status. "
"When TESSERA_VAULT_KEY is set, memories are encrypted at rest using AES-256-CBC. "
"All encryption is local — no cloud, no external services."
)
)
def vault_status() -> str:
"""Get vault encryption status."""
return core.vault_status_info()
@mcp.tool(
description=(
"Toggle or check auto-learning status. "
"When enabled, Tessera automatically extracts decisions, preferences, "
"and facts from conversations. Call without arguments to check status."
)
)
def toggle_auto_learn(enabled: bool | None = None) -> str:
"""Toggle or check auto-learning on/off."""
return core.toggle_auto_learn(enabled)
@mcp.tool(
description=(
"Review recently auto-learned memories. "
"Shows memories created by auto-extract, digest, or session summary."
)
)
def review_learned(limit: int = 20) -> str:
"""Review auto-learned memories."""
return core.review_learned(limit)
# --- MCP Resources ---
@mcp.resource("docs://index")
def document_index() -> str:
"""Provide a browsable index of all indexed documents."""
return core.document_index()
@mcp.resource("workspace://status")
def workspace_status() -> str:
"""Provide current workspace status across all projects."""
return core.workspace_status()
# --- Auto-Learn Tools ---
@mcp.tool(
description=(
"Digest the current conversation: extract decisions, preferences, and facts "
"from this session's interactions and save them as memories automatically. "
"Call this at the end of a conversation to preserve important knowledge. "
"You can also pass a summary text to extract facts from."
)
)
def digest_conversation(summary: str = "") -> str:
"""Extract and save knowledge from the current session."""
return core.digest_conversation(summary)
# --- Interaction Log Tools ---
@mcp.tool(
description=(
"View what happened in the current or past sessions. "
"Shows tool calls, queries, and results — useful for reviewing "
"what was discussed and decided."
)
)
def session_interactions(session_id: str | None = None, limit: int = 20) -> str:
"""List tool interactions from a session."""
return core.session_interactions(session_id, limit)
@mcp.tool(
description=(
"View summary of recent sessions — when they started, ended, "
"and how many tool calls were made. Useful for understanding "
"usage patterns over time."
)
)
def recent_sessions(limit: int = 10) -> str:
"""List recent session summaries."""
return core.recent_sessions(limit)
# --- Insight Phase (v1.1.0) ---
@mcp.tool(
description=(
"Detect contradictions among your stored memories. "
"Finds decisions, preferences, or facts that conflict with each other. "
"Shows severity (HIGH/MEDIUM) and which memory is newer. "
"Use periodically to keep your knowledge base consistent."
)
)
def detect_contradictions() -> str:
"""Find conflicting memories."""
return core.detect_contradictions()
@mcp.tool(
description=(
"Search documents using multiple query angles for better recall. "
"Decomposes your query into 2-4 perspectives (core keywords, "
"individual terms, reversed emphasis) and merges the best results. "
"Each result includes a verdict: confident match, possible match, "
"or low relevance. Use when standard search misses relevant results."
)
)
def deep_search(
query: str,
top_k: int = 5,
project: str | None = None,
doc_type: str | None = None,
) -> str:
"""Multi-angle document search with verdict scoring."""
core.search = search
return core.multi_angle_search_documents(query, top_k=top_k, project=project, doc_type=doc_type)
@mcp.tool(
description=(
"Search memories using multiple query angles for better recall. "
"Like deep_search but for memories. Decomposes the query into "
"multiple perspectives and merges results. Each result includes "
"a confidence verdict. Use when recall misses relevant memories."
)
)
def deep_recall(
query: str,
top_k: int = 5,
since: str | None = None,
until: str | None = None,
category: str | None = None,
) -> str:
"""Multi-angle memory search with verdict scoring."""
return core.multi_angle_recall(query, top_k=top_k, since=since, until=until, category=category)
@mcp.tool(
description=(
"Analyze confidence scores for all memories. "
"Rates each memory based on repetition (confirmed by other memories), "
"source diversity, recency, and category stability. "
"Returns high-confidence memories you can trust and low-confidence "
"ones that may need review."
)
)
def memory_confidence() -> str:
"""Score memory reliability."""
return core.memory_confidence()
@mcp.tool(
description=(
"Analyze memory health: classify all memories as healthy, stale (90+ days old), "
"or orphaned (minimal metadata). Returns a health score, breakdown, "
"recommendations for cleanup, and growth statistics over time."
)
)
def memory_health() -> str:
"""Memory health analytics."""
return core.memory_health()
@mcp.tool(
description=(
"List all registered plugin hooks. "
"Shows which events have hooks attached and what scripts/functions "
"will be called. Configure hooks in workspace.yaml under 'hooks:' section."
)
)
def list_plugin_hooks() -> str:
"""Show registered hooks."""
return core.list_plugin_hooks()
def main() -> None:
"""Entry point for the MCP server.
Supports two transports:
- stdio (default): for Claude Desktop local integration
- sse: for remote/network access (e.g., browser extensions, remote clients)
Usage:
tessera-mcp # stdio (default)
tessera-mcp --sse # SSE on port 8395
tessera-mcp --sse 9000 # SSE on custom port
"""
import argparse
parser = argparse.ArgumentParser(description="Tessera MCP Server")
parser.add_argument(
"--sse",
nargs="?",
const=8395,
type=int,
metavar="PORT",
help="Run in SSE mode (default port: 8395)",
)
args = parser.parse_args()