Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
9f09d34
feat: add structure reorganizer and conflict resolver
CaralHsi Jul 6, 2025
edb2619
env: add schedule package in poetry
J1awei-Yang Jul 6, 2025
922ccc5
style: reformat
CaralHsi Jul 6, 2025
1b8a9b0
Merge branch 'feat/struct-conflict-organize' of github.com:CaralHsi/M…
CaralHsi Jul 6, 2025
3478f2c
test: update tree manager test script
CaralHsi Jul 6, 2025
cfb234e
feat(merge): add handle_merge func in reorganizer & change queue in r…
J1awei-Yang Jul 6, 2025
6af5a04
test: update tree manager test script
CaralHsi Jul 6, 2025
721c362
style: reformat
J1awei-Yang Jul 6, 2025
ff17ebb
Merge branch 'feat/struct-conflict-organize' of github.com:CaralHsi/M…
CaralHsi Jul 6, 2025
c1da966
env: add schedule package in poetry
J1awei-Yang Jul 6, 2025
290b376
test: update tree manager test script
CaralHsi Jul 6, 2025
8413579
test: update tree manager test script
CaralHsi Jul 6, 2025
dbda929
feat(merge): add handle_merge func in reorganizer & change queue in r…
J1awei-Yang Jul 6, 2025
ace3555
style: reformat
J1awei-Yang Jul 6, 2025
69f4a42
merge: origin
CaralHsi Jul 7, 2025
345b973
feat: add is_organize to tree config with DEFAULT FALSE
CaralHsi Jul 7, 2025
b825cfb
chore(template): move conflict detection and resolution prompts to te…
J1awei-Yang Jul 8, 2025
2021231
refactor(reorganizer): rename `run` method to `_run_message_consumer_…
J1awei-Yang Jul 9, 2025
5206212
feat(redundancy): add redundancy framework
J1awei-Yang Jul 9, 2025
3c2c632
refactor(conflict&redundancy): rename detector and resolver classes t…
J1awei-Yang Jul 9, 2025
6d16d29
feat: add 'reason' node in graph; reason node is a node which is infe…
CaralHsi Jul 10, 2025
6ea94e0
chore: regenerate poetry.lock to include schedule
CaralHsi Jul 10, 2025
858b774
feat: add struct reorganize prompt
CaralHsi Jul 10, 2025
0d21844
feat: add struct reorganize prompt
CaralHsi Jul 10, 2025
63b08b1
feat: add recall via rag in neo4j
CaralHsi Jul 10, 2025
9ddfea3
feat: update gitignore
CaralHsi Jul 10, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ evaluation/*tmp/
evaluation/results
evaluation/.env
evaluation/configs/*
**tree_textual_memory_locomo**
.env

# Byte-compiled / optimized / DLL files
Expand Down Expand Up @@ -165,6 +166,7 @@ venv.bak/
*.xlsx
*.json
*.pkl
*.html

# but do not ignore docs/openapi.json
!docs/openapi.json
Expand Down
213 changes: 213 additions & 0 deletions examples/basic_modules/tree_textual_memory_relation_reason_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,213 @@
import uuid

from memos import log
from memos.configs.embedder import EmbedderConfigFactory
from memos.configs.graph_db import GraphDBConfigFactory
from memos.configs.llm import LLMConfigFactory
from memos.embedders.factory import EmbedderFactory
from memos.graph_dbs.factory import GraphStoreFactory
from memos.graph_dbs.item import GraphDBNode
from memos.llms.factory import LLMFactory
from memos.memories.textual.item import TreeNodeTextualMemoryMetadata
from memos.memories.textual.tree_text_memory.organize.relation_reason_detector import (
RelationAndReasoningDetector,
)


logger = log.get_logger(__name__)

# === Step 1: Initialize embedder ===
embedder_config = EmbedderConfigFactory.model_validate(
{
"backend": "ollama",
"config": {
"model_name_or_path": "nomic-embed-text:latest",
},
}
)
embedder = EmbedderFactory.from_config(embedder_config)

# === Step 2: Initialize Neo4j GraphStore ===
graph_config = GraphDBConfigFactory(
backend="neo4j",
config={
"uri": "bolt://localhost:7687",
"user": "neo4j",
"password": "12345678",
"db_name": "lucy4",
"auto_create": True,
},
)
graph_store = GraphStoreFactory.from_config(graph_config)

# === Step 3: Initialize LLM for pairwise relation detection ===
# Step 1: Load LLM config and instantiate
config = LLMConfigFactory.model_validate(
{
"backend": "ollama",
"config": {
"model_name_or_path": "qwen3:0.6b",
"temperature": 0.7,
"max_tokens": 1024,
},
}
)
llm = LLMFactory.from_config(config)

# === Step 4: Create a mock GraphDBNode to test relation detection ===

node_a = GraphDBNode(
id=str(uuid.uuid4()),
memory="Caroline faced increased workload stress during the project deadline.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=[0.1] * 10,
key="Workload stress",
tags=["stress", "workload"],
type="fact",
background="Project",
confidence=0.95,
updated_at="2024-06-28T09:00:00Z",
),
)

node_b = GraphDBNode(
id=str(uuid.uuid4()),
memory="After joining the support group, Caroline reported improved mental health.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=[0.1] * 10,
key="Improved mental health",
tags=["mental health", "support group"],
type="fact",
background="Personal follow-up",
confidence=0.95,
updated_at="2024-07-10T12:00:00Z",
),
)

node_c = GraphDBNode(
id=str(uuid.uuid4()),
memory="Peer support groups are effective in reducing stress for LGBTQ individuals.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=[0.1] * 10,
key="Support group benefits",
tags=["LGBTQ", "support group", "stress"],
type="fact",
background="General research",
confidence=0.95,
updated_at="2024-06-29T14:00:00Z",
),
)

# === D: Work pressure ➜ stress ===
node_d = GraphDBNode(
id=str(uuid.uuid4()),
memory="Excessive work pressure increases stress levels among employees.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=[0.1] * 10,
key="Work pressure impact",
tags=["stress", "work pressure"],
type="fact",
background="Workplace study",
confidence=0.9,
updated_at="2024-06-15T08:00:00Z",
),
)

# === E: Stress ➜ poor sleep ===
node_e = GraphDBNode(
id=str(uuid.uuid4()),
memory="High stress levels often result in poor sleep quality.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=[0.1] * 10,
key="Stress and sleep",
tags=["stress", "sleep"],
type="fact",
background="Health study",
confidence=0.9,
updated_at="2024-06-18T10:00:00Z",
),
)

# === F: Poor sleep ➜ low performance ===
node_f = GraphDBNode(
id=str(uuid.uuid4()),
memory="Employees with poor sleep show reduced work performance.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=[0.1] * 10,
key="Sleep and performance",
tags=["sleep", "performance"],
type="fact",
background="HR report",
confidence=0.9,
updated_at="2024-06-20T12:00:00Z",
),
)

node = GraphDBNode(
id="a88db9ce-3c77-4e83-8d61-aa9ef95c957e",
memory="Caroline joined an LGBTQ support group to cope with work-related stress.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
embedding=embedder.embed(
["Caroline joined an LGBTQ support group to cope with work-related stress."]
)[0],
key="Caroline LGBTQ stress",
tags=["LGBTQ", "support group", "stress"],
type="fact",
background="Personal",
confidence=0.95,
updated_at="2024-07-01T10:00:00Z",
),
)


for n in [node, node_a, node_b, node_c, node_d, node_e, node_f]:
graph_store.add_node(n.id, n.memory, n.metadata.dict())


# === Step 5: Initialize RelationDetector and run detection ===
relation_detector = RelationAndReasoningDetector(
graph_store=graph_store, llm=llm, embedder=embedder
)

results = relation_detector.process_node(
node=node,
exclude_ids=[node.id], # Exclude self when searching for neighbors
top_k=5,
)

# === Step 6: Print detected relations ===
print("\n=== Detected Global Relations ===")


# === Step 6: Pretty-print detected results ===
print("\n=== Detected Pairwise Relations ===")
for rel in results["relations"]:
print(f" Source ID: {rel['source_id']}")
print(f" Target ID: {rel['target_id']}")
print(f" Relation Type: {rel['relation_type']}")
print("------")

print("\n=== Inferred Nodes ===")
for node in results["inferred_nodes"]:
print(f" New Fact: {node.memory}")
print(f" Sources: {node.metadata.sources}")
print("------")

print("\n=== Sequence Links (FOLLOWS) ===")
for link in results["sequence_links"]:
print(f" From: {link['from_id']} -> To: {link['to_id']}")
print("------")

print("\n=== Aggregate Concepts ===")
for agg in results["aggregate_nodes"]:
print(f" Concept Key: {agg.metadata.key}")
print(f" Concept Memory: {agg.memory}")
print(f" Sources: {agg.metadata.sources}")
print("------")
5 changes: 5 additions & 0 deletions examples/core_memories/tree_textual_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,7 @@ def embed_memory_item(memory: str) -> list[float]:

for m_list in memory:
my_tree_textual_memory.add(m_list)
my_tree_textual_memory.memory_manager.wait_reorganizer()

results = my_tree_textual_memory.search(
"Talk about the user's childhood story?",
Expand Down Expand Up @@ -211,6 +212,7 @@ def embed_memory_item(memory: str) -> list[float]:

for m_list in doc_memory:
my_tree_textual_memory.add(m_list)
my_tree_textual_memory.memory_manager.wait_reorganizer()

results = my_tree_textual_memory.search(
"Tell me about what memos consist of?",
Expand All @@ -222,6 +224,9 @@ def embed_memory_item(memory: str) -> list[float]:
print(f"{i}'th similar result is: " + str(r["memory"]))
print(f"Successfully search {len(results)} memories")

# close the synchronous thread in memory manager
my_tree_textual_memory.memory_manager.close()


# my_tree_textual_memory.dump
my_tree_textual_memory.dump("tmp/my_tree_textual_memory")
Expand Down
3 changes: 2 additions & 1 deletion examples/data/config/tree_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,5 +33,6 @@
"auto_create": true,
"embedding_dimension": 768
}
}
},
"reorganize": false
}
17 changes: 16 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ fastapi = {extras = ["all"], version = "^0.115.12"}
sentence-transformers = "^4.1.0"
sqlalchemy = "^2.0.41"
redis = "^6.2.0"
schedule = "^1.2.2"

[tool.poetry.group.dev]
optional = false
Expand Down
5 changes: 5 additions & 0 deletions src/memos/configs/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,11 @@ class TreeTextMemoryConfig(BaseTextMemoryConfig):
description="Internet retriever configuration (optional)",
)

reorganize: bool | None = Field(
False,
description="Optional description for this memory configuration.",
)


# ─── 3. Global Memory Config Factory ──────────────────────────────────────────

Expand Down
46 changes: 46 additions & 0 deletions src/memos/graph_dbs/item.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import uuid

from typing import Any, Literal

from pydantic import BaseModel, ConfigDict, Field, field_validator

from memos.memories.textual.item import TextualMemoryItem


class GraphDBNode(TextualMemoryItem):
pass


class GraphDBEdge(BaseModel):
"""Represents an edge in a graph database (corresponds to Neo4j relationship)."""

id: str = Field(
default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for the edge"
)
source: str = Field(..., description="Source node ID")
target: str = Field(..., description="Target node ID")
type: Literal["RELATED", "PARENT"] = Field(
..., description="Relationship type (must be one of 'RELATED', 'PARENT')"
)
properties: dict[str, Any] | None = Field(
default=None, description="Additional properties for the edge"
)

model_config = ConfigDict(extra="forbid")

@field_validator("id")
@classmethod
def validate_id(cls, v):
"""Validate that ID is a valid UUID."""
if not isinstance(v, str) or not uuid.UUID(v, version=4):
raise ValueError("ID must be a valid UUID string")
return v

@classmethod
def from_dict(cls, data: dict[str, Any]) -> "GraphDBEdge":
"""Create GraphDBEdge from dictionary."""
return cls(**data)

def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary format."""
return self.model_dump(exclude_none=True)
Loading