diff --git a/README.md b/README.md index e2dcd0c..570a18f 100644 --- a/README.md +++ b/README.md @@ -95,6 +95,33 @@ memory.add("User likes coffee", user_id="user123") results = memory.search("user preferences", user_id="user123") for result in results.get('results', []): print(f"- {result.get('memory')}") + +# Advanced filters +results = memory.search( + "work updates", + filters={ + "created_after": "2024-01-01", + "min_importance": 0.7, + "tags": ["work", "urgent"], + "tag_logic": "AND", + "memory_types": ["long_term"], + "metadata_contains": {"category": "shopping"}, + }, + user_id="user123", +) + +# Fluent filter builder +from powermem import FilterBuilder + +filters = ( + FilterBuilder() + .after("2024-01-01") + .importance(minimum=0.7) + .tags(["work"], logic="AND") + .metadata_contains({"category": "shopping"}) + .build() +) +results = memory.search("work updates", filters=filters, user_id="user123") ``` For more detailed examples and usage patterns, see the [Getting Started Guide](docs/guides/0001-getting_started.md). @@ -223,4 +250,4 @@ The MCP server provides tools for memory management including adding, searching, ## 📄 License -This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. \ No newline at end of file +This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. diff --git a/src/powermem/__init__.py b/src/powermem/__init__.py index fdd8e73..8804abd 100644 --- a/src/powermem/__init__.py +++ b/src/powermem/__init__.py @@ -15,6 +15,7 @@ from .core.async_memory import AsyncMemory from .core.base import MemoryBase from .user_memory import UserMemory +from .search import FilterBuilder # Import configuration loader from .config_loader import load_config_from_env, create_config, validate_config, auto_config diff --git a/src/powermem/config_loader.py b/src/powermem/config_loader.py index 2796140..099813f 100644 --- a/src/powermem/config_loader.py +++ b/src/powermem/config_loader.py @@ -396,6 +396,10 @@ class EmbeddingSettings(_BasePowermemSettings): default=None, validation_alias=AliasChoices("EMBEDDING_DIMS", "DIMS"), ) + ollama_base_url: Optional[str] = Field( + default=None, + validation_alias=AliasChoices("OLLAMA_EMBEDDING_BASE_URL", "OLLAMA_BASE_URL"), + ) def to_config(self) -> Dict[str, Any]: embedding_provider = self.provider.lower() @@ -405,7 +409,7 @@ def to_config(self) -> Dict[str, Any]: ) provider_settings = config_cls() overrides = {} - for field in ("api_key", "model", "embedding_dims"): + for field in ("api_key", "model", "embedding_dims", "ollama_base_url"): if field in self.model_fields_set: value = getattr(self, field) if value is not None: diff --git a/src/powermem/core/async_memory.py b/src/powermem/core/async_memory.py index 503a04d..9487f82 100644 --- a/src/powermem/core/async_memory.py +++ b/src/powermem/core/async_memory.py @@ -553,6 +553,8 @@ async def _simple_add_async( "category": category, "metadata": enhanced_metadata or {}, "filters": filters or {}, + "scope": scope, + "memory_type": memory_type, "created_at": get_current_datetime(), "updated_at": get_current_datetime(), } @@ -734,7 +736,9 @@ async def _intelligent_add_async( run_id=run_id, metadata=metadata, filters=filters, - existing_embeddings=fact_embeddings + existing_embeddings=fact_embeddings, + scope=scope, + memory_type=memory_type, ) results.append({ "id": memory_id, @@ -870,6 +874,8 @@ async def _create_memory_async( metadata: Optional[Dict[str, Any]] = None, filters: Optional[Dict[str, Any]] = None, existing_embeddings: Optional[Dict[str, Any]] = None, + scope: Optional[str] = None, + memory_type: Optional[str] = None, ) -> int: """Create a memory asynchronously with optional embeddings.""" # Validate content is not empty @@ -913,6 +919,8 @@ async def _create_memory_async( "category": category, "metadata": enhanced_metadata or {}, "filters": filters or {}, + "scope": scope, + "memory_type": memory_type, "created_at": get_current_datetime(), "updated_at": get_current_datetime(), } diff --git a/src/powermem/core/memory.py b/src/powermem/core/memory.py index 2a43f7f..67918ec 100644 --- a/src/powermem/core/memory.py +++ b/src/powermem/core/memory.py @@ -682,6 +682,8 @@ def _simple_add( "category": category, "metadata": enhanced_metadata or {}, "filters": filters or {}, + "scope": scope, + "memory_type": memory_type, "created_at": get_current_datetime(), "updated_at": get_current_datetime(), } @@ -860,7 +862,9 @@ def _intelligent_add( run_id=run_id, metadata=metadata, filters=filters, - existing_embeddings=fact_embeddings + existing_embeddings=fact_embeddings, + scope=scope, + memory_type=memory_type, ) results.append({ "id": memory_id, @@ -994,6 +998,8 @@ def _create_memory( metadata: Optional[Dict[str, Any]] = None, filters: Optional[Dict[str, Any]] = None, existing_embeddings: Optional[Dict[str, Any]] = None, + scope: Optional[str] = None, + memory_type: Optional[str] = None, ) -> int: """Create a memory with optional embeddings.""" # Validate content is not empty @@ -1037,6 +1043,8 @@ def _create_memory( "category": category, "metadata": enhanced_metadata or {}, "filters": filters or {}, + "scope": scope, + "memory_type": memory_type, "created_at": get_current_datetime(), "updated_at": get_current_datetime(), } diff --git a/src/powermem/integrations/embeddings/config/providers.py b/src/powermem/integrations/embeddings/config/providers.py index d5e1dc5..7122608 100644 --- a/src/powermem/integrations/embeddings/config/providers.py +++ b/src/powermem/integrations/embeddings/config/providers.py @@ -87,7 +87,10 @@ class OllamaEmbeddingConfig(BaseEmbedderConfig): model: Optional[str] = Field(default=None) ollama_base_url: Optional[str] = Field( default=None, - validation_alias=AliasChoices("OLLAMA_EMBEDDING_BASE_URL"), + validation_alias=AliasChoices( + "OLLAMA_EMBEDDING_BASE_URL", + "OLLAMA_BASE_URL", + ), ) diff --git a/src/powermem/search/__init__.py b/src/powermem/search/__init__.py new file mode 100644 index 0000000..6bb2b84 --- /dev/null +++ b/src/powermem/search/__init__.py @@ -0,0 +1,5 @@ +"""Search utilities for PowerMem.""" + +from .filters import FilterBuilder + +__all__ = ["FilterBuilder"] diff --git a/src/powermem/search/filters.py b/src/powermem/search/filters.py new file mode 100644 index 0000000..2c187c2 --- /dev/null +++ b/src/powermem/search/filters.py @@ -0,0 +1,85 @@ +"""Filter builder utilities for memory search.""" + +from __future__ import annotations + +from datetime import datetime +from typing import Any, Dict, List, Optional + + +class FilterBuilder: + """Fluent builder for advanced memory search filters.""" + + def __init__(self) -> None: + self._filters: Dict[str, Any] = {} + + def created_after(self, value: datetime | str) -> "FilterBuilder": + self._filters["created_after"] = value + return self + + def created_before(self, value: datetime | str) -> "FilterBuilder": + self._filters["created_before"] = value + return self + + def updated_after(self, value: datetime | str) -> "FilterBuilder": + self._filters["updated_after"] = value + return self + + def updated_before(self, value: datetime | str) -> "FilterBuilder": + self._filters["updated_before"] = value + return self + + def after(self, value: datetime | str) -> "FilterBuilder": + return self.created_after(value) + + def before(self, value: datetime | str) -> "FilterBuilder": + return self.created_before(value) + + def importance(self, minimum: Optional[float] = None, maximum: Optional[float] = None) -> "FilterBuilder": + if minimum is not None: + self._filters["min_importance"] = minimum + if maximum is not None: + self._filters["max_importance"] = maximum + return self + + def retention(self, minimum: Optional[float] = None, maximum: Optional[float] = None) -> "FilterBuilder": + if minimum is not None: + self._filters["min_retention"] = minimum + if maximum is not None: + self._filters["max_retention"] = maximum + return self + + def memory_types(self, memory_types: List[str]) -> "FilterBuilder": + self._filters["memory_types"] = memory_types + return self + + def tags(self, tags: List[str], logic: str = "OR") -> "FilterBuilder": + self._filters["tags"] = tags + self._filters["tag_logic"] = logic + return self + + def scopes(self, scopes: List[str]) -> "FilterBuilder": + self._filters["scopes"] = scopes + return self + + def user_ids(self, user_ids: List[str]) -> "FilterBuilder": + self._filters["user_ids"] = user_ids + return self + + def agent_ids(self, agent_ids: List[str]) -> "FilterBuilder": + self._filters["agent_ids"] = agent_ids + return self + + def metadata_contains(self, mapping: Dict[str, Any]) -> "FilterBuilder": + existing = self._filters.get("metadata_contains", {}) + existing.update(mapping) + self._filters["metadata_contains"] = existing + return self + + def metadata_equals(self, mapping: Dict[str, Any]) -> "FilterBuilder": + existing = self._filters.get("metadata_equals", {}) + existing.update(mapping) + self._filters["metadata_equals"] = existing + return self + + def build(self) -> Dict[str, Any]: + return dict(self._filters) diff --git a/src/powermem/settings.py b/src/powermem/settings.py index d023597..95aca34 100644 --- a/src/powermem/settings.py +++ b/src/powermem/settings.py @@ -32,6 +32,7 @@ def settings_config( env_prefix: str = "", extra: str = "ignore", arbitrary_types_allowed: bool = True, + populate_by_name: bool = True, env_file: Optional[str] = _DEFAULT_ENV_FILE, ) -> SettingsConfigDict: return SettingsConfigDict( @@ -41,4 +42,5 @@ def settings_config( env_file=env_file, env_file_encoding="utf-8", arbitrary_types_allowed=arbitrary_types_allowed, + populate_by_name=populate_by_name, ) diff --git a/src/powermem/storage/adapter.py b/src/powermem/storage/adapter.py index 581cb22..48401ea 100644 --- a/src/powermem/storage/adapter.py +++ b/src/powermem/storage/adapter.py @@ -61,6 +61,31 @@ def add_memory(self, memory_data: Dict[str, Any]) -> int: # Create vector from content using embedding service content = memory_data.get("content", "") metadata = memory_data.get("metadata", {}) + intelligence = metadata.get("intelligence", {}) if isinstance(metadata, dict) else {} + + importance_score = memory_data.get("importance_score") + if importance_score is None and isinstance(metadata, dict): + importance_score = metadata.get("importance_score") + if importance_score is None and isinstance(intelligence, dict): + importance_score = intelligence.get("importance_score") + + retention_score = memory_data.get("retention_score") + if retention_score is None and isinstance(metadata, dict): + retention_score = metadata.get("retention_score") + if retention_score is None and isinstance(intelligence, dict): + retention_score = intelligence.get("current_retention", intelligence.get("retention_score")) + + memory_type = memory_data.get("memory_type") + if memory_type is None and isinstance(metadata, dict): + memory_type = metadata.get("memory_type") + + scope = memory_data.get("scope") + if scope is None and isinstance(metadata, dict): + scope = metadata.get("scope") + + tags = memory_data.get("tags") + if tags is None and isinstance(metadata, dict): + tags = metadata.get("tags") # Route to target store (main or sub store) target_store = self._route_to_store(metadata) @@ -103,6 +128,11 @@ def add_memory(self, memory_data: Dict[str, Any]) -> int: "updated_at": serialize_datetime(memory_data.get("updated_at", "")), "category": memory_data.get("category", ""), "fulltext_content": content, # For full-text search + "importance_score": importance_score, + "retention_score": retention_score, + "memory_type": memory_type, + "scope": scope, + "tags": tags, } # Add sparse embedding to payload if available @@ -126,6 +156,172 @@ def add_memory(self, memory_data: Dict[str, Any]) -> int: raise ValueError("Failed to insert memory: no ID returned from vector store") memory_id = generated_ids[0] # Get the first (and only) generated Snowflake ID return memory_id + + def _normalize_search_filters( + self, + filters: Optional[Dict[str, Any]], + target_store: VectorStoreBase, + ) -> Optional[Dict[str, Any]]: + if not filters: + return None + + if hasattr(filters, "model_dump"): + filters = filters.model_dump(exclude_none=True) + + if not isinstance(filters, dict): + raise ValueError("Search filters must be a dictionary of filter options.") + + filters = serialize_datetime(filters) + filters = filters.copy() + + from powermem.storage.sqlite.sqlite_vector_store import SQLiteVectorStore + + is_sqlite = isinstance(target_store, SQLiteVectorStore) + advanced_keys = { + "created_after", + "created_before", + "updated_after", + "updated_before", + "min_importance", + "max_importance", + "min_retention", + "max_retention", + "memory_types", + "tags", + "tag_logic", + "user_ids", + "agent_ids", + "scopes", + "metadata_contains", + "metadata_equals", + } + + base_logic = {} + if "AND" in filters or "OR" in filters: + if "AND" in filters: + base_logic["AND"] = filters.pop("AND") + if "OR" in filters: + base_logic["OR"] = filters.pop("OR") + + conditions: List[Dict[str, Any]] = [] + + def add_condition(condition: Dict[str, Any]) -> None: + if condition: + conditions.append(condition) + + def require_list(name: str, value: Any) -> List[Any]: + if not isinstance(value, list) or not value: + raise ValueError(f"Filter '{name}' must be a non-empty list.") + return value + + def require_number(name: str, value: Any) -> float: + if not isinstance(value, (int, float)): + raise ValueError(f"Filter '{name}' must be a number.") + return float(value) + + created_after = filters.pop("created_after", None) + if created_after is not None: + add_condition({"created_at": {"gte": created_after}}) + + created_before = filters.pop("created_before", None) + if created_before is not None: + add_condition({"created_at": {"lte": created_before}}) + + updated_after = filters.pop("updated_after", None) + if updated_after is not None: + add_condition({"updated_at": {"gte": updated_after}}) + + updated_before = filters.pop("updated_before", None) + if updated_before is not None: + add_condition({"updated_at": {"lte": updated_before}}) + + min_importance = filters.pop("min_importance", None) + if min_importance is not None: + add_condition({"importance_score": {"gte": require_number("min_importance", min_importance)}}) + + max_importance = filters.pop("max_importance", None) + if max_importance is not None: + add_condition({"importance_score": {"lte": require_number("max_importance", max_importance)}}) + + min_retention = filters.pop("min_retention", None) + if min_retention is not None: + add_condition({"retention_score": {"gte": require_number("min_retention", min_retention)}}) + + max_retention = filters.pop("max_retention", None) + if max_retention is not None: + add_condition({"retention_score": {"lte": require_number("max_retention", max_retention)}}) + + memory_types = filters.pop("memory_types", None) + if memory_types is not None: + add_condition({"memory_type": {"in": require_list("memory_types", memory_types)}}) + + user_ids = filters.pop("user_ids", None) + if user_ids is not None: + add_condition({"user_id": {"in": require_list("user_ids", user_ids)}}) + + agent_ids = filters.pop("agent_ids", None) + if agent_ids is not None: + add_condition({"agent_id": {"in": require_list("agent_ids", agent_ids)}}) + + scopes = filters.pop("scopes", None) + if scopes is not None: + add_condition({"scope": {"in": require_list("scopes", scopes)}}) + + tags = filters.pop("tags", None) + tag_logic = filters.pop("tag_logic", "OR") + if tags is not None: + tags_list = require_list("tags", tags) + if tag_logic not in {"AND", "OR"}: + raise ValueError("Filter 'tag_logic' must be either 'AND' or 'OR'.") + if is_sqlite: + op = "contains_all" if tag_logic == "AND" else "contains_any" + add_condition({"tags": {op: tags_list}}) + else: + tag_conditions = [] + for tag in tags_list: + tag_conditions.append({"tags": {"like": f"%\"{tag}\"%"}}) + add_condition({tag_logic: tag_conditions}) + + metadata_contains = filters.pop("metadata_contains", None) + if metadata_contains is not None: + if not isinstance(metadata_contains, dict): + raise ValueError("Filter 'metadata_contains' must be an object of key-value pairs.") + for key, value in metadata_contains.items(): + if key == "category": + if isinstance(value, list): + add_condition({"category": {"in": value}}) + else: + add_condition({"category": value}) + continue + if isinstance(value, list): + add_condition({f"metadata.{key}": {"contains_any": value}}) + else: + add_condition({f"metadata.{key}": {"contains": value}}) + + metadata_equals = filters.pop("metadata_equals", None) + if metadata_equals is not None: + if not isinstance(metadata_equals, dict): + raise ValueError("Filter 'metadata_equals' must be an object of key-value pairs.") + for key, value in metadata_equals.items(): + add_condition({f"metadata.{key}": value}) + + for key, value in filters.items(): + if key in advanced_keys: + continue + add_condition({key: value}) + + if base_logic: + if conditions: + return {"AND": [base_logic, *conditions]} + return base_logic + + if not conditions: + return None + + if len(conditions) == 1: + return conditions[0] + + return {"AND": conditions} def search_memories( self, @@ -162,6 +358,9 @@ def search_memories( # Route to target store (main or sub store) target_store = self._route_to_store(effective_filters) + # Normalize advanced filters for the target store + normalized_filters = self._normalize_search_filters(effective_filters, target_store) + # Unified search method - try OceanBase format first, fallback to SQLite # Pass query text to enable hybrid search (vector + full-text search) try: @@ -171,13 +370,13 @@ def search_memories( import inspect search_sig = inspect.signature(target_store.search) if 'sparse_embedding' in search_sig.parameters: - results = target_store.search(search_query, vectors=query_vector, limit=limit, filters=effective_filters, sparse_embedding=sparse_embedding) + results = target_store.search(search_query, vectors=query_vector, limit=limit, filters=normalized_filters, sparse_embedding=sparse_embedding) else: - results = target_store.search(search_query, vectors=query_vector, limit=limit, filters=effective_filters) + results = target_store.search(search_query, vectors=query_vector, limit=limit, filters=normalized_filters) except TypeError: # Fallback to SQLite format (doesn't support query text parameter) # Pass filters to ensure filtering works correctly - results = target_store.search(search_query if query else "", vectors=[query_vector], limit=limit, filters=effective_filters) + results = target_store.search(search_query if query else "", vectors=[query_vector], limit=limit, filters=normalized_filters) # Convert results to unified format memories = [] diff --git a/src/powermem/storage/sqlite/sqlite_vector_store.py b/src/powermem/storage/sqlite/sqlite_vector_store.py index 9e7b27c..c39def3 100644 --- a/src/powermem/storage/sqlite/sqlite_vector_store.py +++ b/src/powermem/storage/sqlite/sqlite_vector_store.py @@ -120,14 +120,10 @@ def search(self, query: str, vectors: List[List[float]] = None, limit: int = 5, # Apply filters if provided if filters: - conditions = [] - for key, value in filters.items(): - # Filter by JSON field in payload - conditions.append(f"json_extract(payload, '$.{key}') = ?") - query_params.append(value) - - if conditions: - query_sql += " WHERE " + " AND ".join(conditions) + where_clause, params = self._build_where_clause(filters) + if where_clause: + query_sql += f" WHERE {where_clause}" + query_params.extend(params) logger.info(f"SQLite search with filters: {query_sql}, params: {query_params}") else: logger.debug("SQLite search: filters provided but empty after processing") @@ -159,6 +155,141 @@ def search(self, query: str, vectors: List[List[float]] = None, limit: int = 5, # Sort by similarity (descending) and return top results results.sort(key=lambda x: x.score, reverse=True) return results[:limit] + + def _build_where_clause(self, filters: Dict[str, Any]) -> tuple[Optional[str], List[Any]]: + def get_json_extract(field: str) -> str: + if field.startswith("metadata."): + path = f"$.metadata.{field.split('.', 1)[1]}" + else: + path = f"$.{field}" + return f"json_extract(payload, '{path}')" + + def build_condition(key: str, value: Any) -> tuple[Optional[str], List[Any]]: + column_expr = get_json_extract(key) + + if isinstance(value, dict): + parts: List[str] = [] + params: List[Any] = [] + for op, op_value in value.items(): + if op in {"eq"}: + parts.append(f"{column_expr} = ?") + params.append(op_value) + elif op in {"ne"}: + parts.append(f"{column_expr} != ?") + params.append(op_value) + elif op in {"gt", "gte", "lt", "lte"}: + comparator = { + "gt": ">", + "gte": ">=", + "lt": "<", + "lte": "<=", + }[op] + expr = ( + f"CAST({column_expr} AS REAL)" + if isinstance(op_value, (int, float)) + else column_expr + ) + parts.append(f"{expr} {comparator} ?") + params.append(op_value) + elif op == "in": + if not isinstance(op_value, list) or not op_value: + raise ValueError("Filter operator 'in' requires a non-empty list.") + placeholders = ", ".join(["?"] * len(op_value)) + parts.append(f"{column_expr} IN ({placeholders})") + params.extend(op_value) + elif op == "nin": + if not isinstance(op_value, list) or not op_value: + raise ValueError("Filter operator 'nin' requires a non-empty list.") + placeholders = ", ".join(["?"] * len(op_value)) + parts.append(f"{column_expr} NOT IN ({placeholders})") + params.extend(op_value) + elif op == "like": + parts.append(f"{column_expr} LIKE ?") + params.append(op_value) + elif op == "ilike": + parts.append(f"LOWER({column_expr}) LIKE LOWER(?)") + params.append(op_value) + elif op in {"contains", "contains_any", "contains_all"}: + values = op_value if isinstance(op_value, list) else [op_value] + if not values: + raise ValueError("Filter operator 'contains' requires a value.") + if op == "contains_all": + sub_parts = [] + for single_value in values: + sub_parts.append( + f"EXISTS (SELECT 1 FROM json_each({column_expr}) WHERE value = ?)" + ) + params.append(single_value) + parts.append("(" + " AND ".join(sub_parts) + ")") + else: + placeholders = ", ".join(["?"] * len(values)) + parts.append( + f"EXISTS (SELECT 1 FROM json_each({column_expr}) WHERE value IN ({placeholders}))" + ) + params.extend(values) + else: + raise ValueError(f"Unsupported filter operator: {op}") + + if not parts: + return None, [] + return "(" + " AND ".join(parts) + ")", params + + if value is None: + return f"{column_expr} IS NULL", [] + + return f"{column_expr} = ?", [value] + + def process_condition(cond: Any) -> tuple[Optional[str], List[Any]]: + if isinstance(cond, dict): + if "AND" in cond: + clauses = [] + params: List[Any] = [] + for item in cond["AND"]: + clause, clause_params = process_condition(item) + if clause: + clauses.append(clause) + params.extend(clause_params) + if not clauses: + return None, [] + return "(" + " AND ".join(clauses) + ")", params + if "OR" in cond: + clauses = [] + params = [] + for item in cond["OR"]: + clause, clause_params = process_condition(item) + if clause: + clauses.append(clause) + params.extend(clause_params) + if not clauses: + return None, [] + return "(" + " OR ".join(clauses) + ")", params + + clauses = [] + params: List[Any] = [] + for key, value in cond.items(): + clause, clause_params = build_condition(key, value) + if clause: + clauses.append(clause) + params.extend(clause_params) + if not clauses: + return None, [] + return "(" + " AND ".join(clauses) + ")", params + + if isinstance(cond, list): + clauses = [] + params: List[Any] = [] + for item in cond: + clause, clause_params = process_condition(item) + if clause: + clauses.append(clause) + params.extend(clause_params) + if not clauses: + return None, [] + return "(" + " AND ".join(clauses) + ")", params + + return None, [] + + return process_condition(filters) def delete(self, vector_id: int) -> None: """Delete a vector by ID.""" diff --git a/src/server/models/request.py b/src/server/models/request.py index f163405..6252160 100644 --- a/src/server/models/request.py +++ b/src/server/models/request.py @@ -2,7 +2,8 @@ Request models for PowerMem API """ -from typing import Any, Dict, List, Optional +from datetime import datetime +from typing import Any, Dict, List, Optional, Literal from pydantic import BaseModel, Field @@ -63,6 +64,27 @@ class MemoryBatchUpdateRequest(BaseModel): agent_id: Optional[str] = Field(None, description="Agent ID for access control") +class SearchFilters(BaseModel): + """Advanced filter options for memory search""" + + created_after: Optional[datetime] = Field(None, description="Only include memories created after this datetime") + created_before: Optional[datetime] = Field(None, description="Only include memories created before this datetime") + updated_after: Optional[datetime] = Field(None, description="Only include memories updated after this datetime") + updated_before: Optional[datetime] = Field(None, description="Only include memories updated before this datetime") + min_importance: Optional[float] = Field(None, description="Minimum importance score (0-1)") + max_importance: Optional[float] = Field(None, description="Maximum importance score (0-1)") + min_retention: Optional[float] = Field(None, description="Minimum retention score (0-1)") + max_retention: Optional[float] = Field(None, description="Maximum retention score (0-1)") + memory_types: Optional[List[str]] = Field(None, description="Filter by memory types") + tags: Optional[List[str]] = Field(None, description="Filter by tags") + tag_logic: Optional[Literal["AND", "OR"]] = Field("OR", description="Tag matching logic") + user_ids: Optional[List[str]] = Field(None, description="Filter by user IDs") + agent_ids: Optional[List[str]] = Field(None, description="Filter by agent IDs") + scopes: Optional[List[str]] = Field(None, description="Filter by memory scopes") + metadata_contains: Optional[Dict[str, Any]] = Field(None, description="Metadata contains key-value pairs") + metadata_equals: Optional[Dict[str, Any]] = Field(None, description="Metadata equals key-value pairs") + + class SearchRequest(BaseModel): """Request model for searching memories""" @@ -70,7 +92,7 @@ class SearchRequest(BaseModel): user_id: Optional[str] = Field(None, description="Filter by user ID") agent_id: Optional[str] = Field(None, description="Filter by agent ID") run_id: Optional[str] = Field(None, description="Filter by run ID") - filters: Optional[Dict[str, Any]] = Field(None, description="Additional filters") + filters: Optional[SearchFilters] = Field(None, description="Additional filters") limit: int = Field(default=30, ge=1, le=100, description="Maximum number of results") diff --git a/src/server/services/search_service.py b/src/server/services/search_service.py index ad6a589..ce9f2c3 100644 --- a/src/server/services/search_service.py +++ b/src/server/services/search_service.py @@ -61,12 +61,18 @@ def search_memories( status_code=400, ) + filters_payload = ( + filters.model_dump(exclude_none=True) + if hasattr(filters, "model_dump") + else filters + ) + results = self.memory.search( query=query, user_id=user_id, agent_id=agent_id, run_id=run_id, - filters=filters, + filters=filters_payload, limit=limit, ) @@ -78,6 +84,14 @@ def search_memories( return results + except ValueError as e: + metrics_collector = get_metrics_collector() + metrics_collector.record_memory_operation("search", "failed") + raise APIError( + code=ErrorCode.INVALID_SEARCH_PARAMS, + message=str(e), + status_code=400, + ) except APIError: # Record failed memory operation for API errors metrics_collector = get_metrics_collector()