From e2b25b5d24a947c5c75924c8b40cd5b493cbb383 Mon Sep 17 00:00:00 2001 From: Dev-Khant Date: Tue, 18 Mar 2025 13:49:18 +0530 Subject: [PATCH 1/3] Fix azure ai vector store --- mem0/vector_stores/azure_ai_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mem0/vector_stores/azure_ai_search.py b/mem0/vector_stores/azure_ai_search.py index 03a476338a..3601698077 100644 --- a/mem0/vector_stores/azure_ai_search.py +++ b/mem0/vector_stores/azure_ai_search.py @@ -169,7 +169,7 @@ def insert(self, vectors, payloads=None, ids=None): ] response = self.search_client.upload_documents(documents) for doc in response: - if not doc.get("status", False): + if not hasattr(doc, "status_code") and doc.get("status_code") != 201: raise Exception(f"Insert failed for document {doc.get('id')}: {doc}") return response From c8c6f41ac4ad139e706f78cbab9724097d862286 Mon Sep 17 00:00:00 2001 From: Dev-Khant Date: Tue, 18 Mar 2025 13:50:54 +0530 Subject: [PATCH 2/3] version bump -> 0.1.70 --- poetry.lock | 86 +++++++++++++++++++++++++++++++++++++++++++------- pyproject.toml | 2 +- 2 files changed, 75 insertions(+), 13 deletions(-) diff --git a/poetry.lock b/poetry.lock index e810398ac6..2e0a10543a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -7,7 +7,7 @@ description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "aiohappyeyeballs-2.4.6-py3-none-any.whl", hash = "sha256:147ec992cf873d74f5062644332c539fcd42956dc69453fe5204195e560517e1"}, {file = "aiohappyeyeballs-2.4.6.tar.gz", hash = "sha256:9b05052f9042985d32ecbe4b59a77ae19c006a78f1344d7fdad69d28ded3d0b0"}, @@ -20,7 +20,7 @@ description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "aiohttp-3.11.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4fe27dbbeec445e6e1291e61d61eb212ee9fed6e47998b27de71d70d3e8777d"}, {file = "aiohttp-3.11.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9e64ca2dbea28807f8484c13f684a2f761e69ba2640ec49dacd342763cc265ef"}, @@ -125,7 +125,7 @@ description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, @@ -189,7 +189,7 @@ description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, @@ -508,7 +508,7 @@ description = "A list-like structure which implements collections.abc.MutableSeq optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, @@ -1160,7 +1160,7 @@ description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "langchain-0.3.19-py3-none-any.whl", hash = "sha256:1e16d97db9106640b7de4c69f8f5ed22eeda56b45b9241279e83f111640eff16"}, {file = "langchain-0.3.19.tar.gz", hash = "sha256:b96f8a445f01d15d522129ffe77cc89c8468dbd65830d153a676de8f6b899e7b"}, @@ -1199,6 +1199,46 @@ openai = ["langchain-openai"] together = ["langchain-together"] xai = ["langchain-xai"] +[[package]] +name = "langchain" +version = "0.3.20" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and python_version < \"3.12\" and extra == \"graph\"" +files = [ + {file = "langchain-0.3.20-py3-none-any.whl", hash = "sha256:273287f8e61ffdf7e811cf8799e6a71e9381325b8625fd6618900faba79cfdd0"}, + {file = "langchain-0.3.20.tar.gz", hash = "sha256:edcc3241703e1f6557ef5a5c35cd56f9ccc25ff12e38b4829c66d94971737a93"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} +langchain-core = ">=0.3.41,<1.0.0" +langchain-text-splitters = ">=0.3.6,<1.0.0" +langsmith = ">=0.1.17,<0.4" +pydantic = ">=2.7.4,<3.0.0" +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" + +[package.extras] +anthropic = ["langchain-anthropic"] +aws = ["langchain-aws"] +cohere = ["langchain-cohere"] +community = ["langchain-community"] +deepseek = ["langchain-deepseek"] +fireworks = ["langchain-fireworks"] +google-genai = ["langchain-google-genai"] +google-vertexai = ["langchain-google-vertexai"] +groq = ["langchain-groq"] +huggingface = ["langchain-huggingface"] +mistralai = ["langchain-mistralai"] +ollama = ["langchain-ollama"] +openai = ["langchain-openai"] +together = ["langchain-together"] +xai = ["langchain-xai"] + [[package]] name = "langchain-core" version = "0.3.40" @@ -1206,7 +1246,7 @@ description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "langchain_core-0.3.40-py3-none-any.whl", hash = "sha256:9f31358741f10a13db8531e8288b8a5ae91904018c5c2e6f739d6645a98fca03"}, {file = "langchain_core-0.3.40.tar.gz", hash = "sha256:893a238b38491967c804662c1ec7c3e6ebaf223d1125331249c3cf3862ff2746"}, @@ -1224,6 +1264,28 @@ PyYAML = ">=5.3" tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0" typing-extensions = ">=4.7" +[[package]] +name = "langchain-core" +version = "0.3.45" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and python_version < \"3.12\" and extra == \"graph\"" +files = [ + {file = "langchain_core-0.3.45-py3-none-any.whl", hash = "sha256:fe560d644c102c3f5dcfb44eb5295e26d22deab259fdd084f6b1b55a0350b77c"}, + {file = "langchain_core-0.3.45.tar.gz", hash = "sha256:a39b8446495d1ea97311aa726478c0a13ef1d77cb7644350bad6d9d3c0141a0c"}, +] + +[package.dependencies] +jsonpatch = ">=1.33,<2.0" +langsmith = ">=0.1.125,<0.4" +packaging = ">=23.2,<25" +pydantic = {version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""} +PyYAML = ">=5.3" +tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10.0.0" +typing-extensions = ">=4.7" + [[package]] name = "langchain-neo4j" version = "0.4.0" @@ -1307,7 +1369,7 @@ description = "multidict implementation" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, @@ -1470,7 +1532,7 @@ description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "python_version <= \"3.12\"" +markers = "python_version < \"3.10\"" files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -1517,7 +1579,7 @@ description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" groups = ["main"] -markers = "python_version >= \"3.13\"" +markers = "python_version >= \"3.10\"" files = [ {file = "numpy-2.2.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8146f3550d627252269ac42ae660281d673eb6f8b32f113538e0cc2a9aed42b9"}, {file = "numpy-2.2.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e642d86b8f956098b564a45e6f6ce68a22c2c97a04f5acd3f221f57b8cb850ae"}, @@ -1774,7 +1836,7 @@ description = "Accelerated property cache" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "propcache-0.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:efa44f64c37cc30c9f05932c740a8b40ce359f51882c70883cc95feac842da4d"}, {file = "propcache-0.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2383a17385d9800b6eb5855c2f05ee550f803878f344f58b6e194de08b96352c"}, @@ -2676,7 +2738,7 @@ description = "Yet another URL library" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"graph\"" +markers = "(python_version < \"3.10\" or python_version >= \"3.12\") and extra == \"graph\"" files = [ {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, diff --git a/pyproject.toml b/pyproject.toml index a06a2c6975..7fae3f3248 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mem0ai" -version = "0.1.69" +version = "0.1.70" description = "Long-term memory for AI Agents" authors = ["Mem0 "] exclude = [ From 6c79f91d338bfc5f50d7cf04783631b60eed68b1 Mon Sep 17 00:00:00 2001 From: Dev-Khant Date: Tue, 18 Mar 2025 14:12:19 +0530 Subject: [PATCH 3/3] more fixes --- docs/components/vectordbs/dbs/azure_ai_search.mdx | 15 --------------- mem0/vector_stores/azure_ai_search.py | 13 +++++-------- 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/docs/components/vectordbs/dbs/azure_ai_search.mdx b/docs/components/vectordbs/dbs/azure_ai_search.mdx index 9a02e8ccf3..7a6805498d 100644 --- a/docs/components/vectordbs/dbs/azure_ai_search.mdx +++ b/docs/components/vectordbs/dbs/azure_ai_search.mdx @@ -33,17 +33,6 @@ messages = [ m.add(messages, user_id="alice", metadata={"category": "movies"}) ``` -## Advanced Usage - -```python -# Search with specific filter mode -result = m.search( - "sci-fi movies", - filters={"user_id": "alice"}, - limit=5, - vector_filter_mode="preFilter" # Apply filters before vector search -) - # Using binary compression for large vector collections config = { "vector_store": { @@ -78,10 +67,6 @@ config = { - `scalar`: Scalar quantization with reasonable balance of speed and accuracy - `binary`: Binary quantization for maximum compression with some accuracy trade-off -- **vector_filter_mode**: - - `preFilter`: Applies filters before vector search (faster) - - `postFilter`: Applies filters after vector search (may provide better relevance) - - **use_float16**: Using half precision (float16) reduces storage requirements but may slightly impact accuracy. Useful for very large vector collections. - **Filterable Fields**: The implementation automatically extracts `user_id`, `run_id`, and `agent_id` fields from payloads for filtering. \ No newline at end of file diff --git a/mem0/vector_stores/azure_ai_search.py b/mem0/vector_stores/azure_ai_search.py index 3601698077..ad3728c9ab 100644 --- a/mem0/vector_stores/azure_ai_search.py +++ b/mem0/vector_stores/azure_ai_search.py @@ -189,7 +189,7 @@ def _build_filter_expression(self, filters): filter_expression = " and ".join(filter_conditions) return filter_expression - def search(self, query, limit=5, filters=None, vector_filter_mode="preFilter"): + def search(self, query, limit=5, filters=None): """ Search for similar vectors. @@ -197,8 +197,6 @@ def search(self, query, limit=5, filters=None, vector_filter_mode="preFilter"): query (List[float]): Query vector. limit (int, optional): Number of results to return. Defaults to 5. filters (Dict, optional): Filters to apply to the search. Defaults to None. - vector_filter_mode (str): Determines whether filters are applied before or after the vector search. - Known values: "preFilter" (default) and "postFilter". Returns: List[OutputData]: Search results. @@ -213,8 +211,7 @@ def search(self, query, limit=5, filters=None, vector_filter_mode="preFilter"): search_results = self.search_client.search( vector_queries=[vector_query], filter=filter_expression, - top=limit, - vector_filter_mode=vector_filter_mode, + top=limit ) results = [] @@ -236,7 +233,7 @@ def delete(self, vector_id): """ response = self.search_client.delete_documents(documents=[{"id": vector_id}]) for doc in response: - if not doc.get("status", False): + if not hasattr(doc, "status_code") and doc.get("status_code") != 200: raise Exception(f"Delete failed for document {vector_id}: {doc}") logger.info(f"Deleted document with ID '{vector_id}' from index '{self.index_name}'.") return response @@ -260,7 +257,7 @@ def update(self, vector_id, vector=None, payload=None): document[field] = payload.get(field) response = self.search_client.merge_or_upload_documents(documents=[document]) for doc in response: - if not doc.get("status", False): + if not hasattr(doc, "status_code") and doc.get("status_code") != 200: raise Exception(f"Update failed for document {vector_id}: {doc}") return response @@ -335,7 +332,7 @@ def list(self, filters=None, limit=100): id=result["id"], score=result["@search.score"], payload=payload ) ) - return results + return [results] def __del__(self): """Close the search client when the object is deleted."""