Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 20 additions & 14 deletions meilisearch_python_sdk/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -623,15 +623,18 @@ async def multi_search(
>>> search_results = await client.search(queries)
"""
url = "multi-search"
if federation:
processed_queries = []
for query in queries:
q = query.model_dump(by_alias=True)
processed_queries = []
for query in queries:
q = query.model_dump(by_alias=True)

if query.retrieve_vectors is None:
del q["retrieveVectors"]

if federation:
del q["limit"]
del q["offset"]
processed_queries.append(q)
else:
processed_queries = [x.model_dump(by_alias=True) for x in queries]

processed_queries.append(q)

if federation:
federation_payload = federation.model_dump(by_alias=True)
Expand Down Expand Up @@ -1479,15 +1482,18 @@ def multi_search(
>>> search_results = client.search(queries)
"""
url = "multi-search"
if federation:
processed_queries = []
for query in queries:
q = query.model_dump(by_alias=True)
processed_queries = []
for query in queries:
q = query.model_dump(by_alias=True)

if query.retrieve_vectors is None:
del q["retrieveVectors"]

if federation:
del q["limit"]
del q["offset"]
processed_queries.append(q)
else:
processed_queries = [x.model_dump(by_alias=True) for x in queries]

processed_queries.append(q)

if federation:
federation_payload = federation.model_dump(by_alias=True)
Expand Down
16 changes: 16 additions & 0 deletions meilisearch_python_sdk/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -721,6 +721,7 @@ async def search(
vector: list[float] | None = None,
hybrid: Hybrid | None = None,
locales: list[str] | None = None,
retrieve_vectors: bool | None = None,
) -> SearchResults:
"""Search the index.

Expand Down Expand Up @@ -781,6 +782,7 @@ async def search(
with caution.
locales: Specifies the languages for the search. This parameter can only be used with
Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
retrieve_vectors: Return document vector data with search result.

Returns:
Results of the search
Expand Down Expand Up @@ -824,6 +826,7 @@ async def search(
hybrid=hybrid,
ranking_score_threshold=ranking_score_threshold,
locales=locales,
retrieve_vectors=retrieve_vectors,
)
search_url = f"{self._base_url_with_uid}/search"

Expand Down Expand Up @@ -986,6 +989,7 @@ async def facet_search(
ranking_score_threshold: float | None = None,
vector: list[float] | None = None,
locales: list[str] | None = None,
retrieve_vectors: bool | None = None,
) -> FacetSearchResults:
"""Search the index.

Expand Down Expand Up @@ -1038,6 +1042,7 @@ async def facet_search(
with caution.
locales: Specifies the languages for the search. This parameter can only be used with
Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
retrieve_vectors: Return document vector data with search result.

Returns:
Results of the search
Expand Down Expand Up @@ -1085,6 +1090,7 @@ async def facet_search(
ranking_score_threshold=ranking_score_threshold,
vector=vector,
locales=locales,
retrieve_vectors=retrieve_vectors,
)
search_url = f"{self._base_url_with_uid}/facet-search"

Expand Down Expand Up @@ -5085,6 +5091,7 @@ def search(
vector: list[float] | None = None,
hybrid: Hybrid | None = None,
locales: list[str] | None = None,
retrieve_vectors: bool | None = None,
) -> SearchResults:
"""Search the index.

Expand Down Expand Up @@ -5145,6 +5152,7 @@ def search(
with caution.
locales: Specifies the languages for the search. This parameter can only be used with
Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
retrieve_vectors: Return document vector data with search result.

Returns:
Results of the search
Expand Down Expand Up @@ -5188,6 +5196,7 @@ def search(
hybrid=hybrid,
ranking_score_threshold=ranking_score_threshold,
locales=locales,
retrieve_vectors=retrieve_vectors,
)

if self._pre_search_plugins:
Expand Down Expand Up @@ -5256,6 +5265,7 @@ def facet_search(
ranking_score_threshold: float | None = None,
vector: list[float] | None = None,
locales: list[str] | None = None,
retrieve_vectors: bool | None = None,
) -> FacetSearchResults:
"""Search the index.

Expand Down Expand Up @@ -5308,6 +5318,7 @@ def facet_search(
with caution.
locales: Specifies the languages for the search. This parameter can only be used with
Milisearch >= v1.10.0. Defaults to None letting the Meilisearch pick.
retrieve_vectors: Return document vector data with search result.

Returns:
Results of the search
Expand Down Expand Up @@ -5355,6 +5366,7 @@ def facet_search(
ranking_score_threshold=ranking_score_threshold,
vector=vector,
locales=locales,
retrieve_vectors=retrieve_vectors,
)

if self._pre_facet_search_plugins:
Expand Down Expand Up @@ -8262,6 +8274,7 @@ def _process_search_parameters(
vector: list[float] | None = None,
hybrid: Hybrid | None = None,
locales: list[str] | None = None,
retrieve_vectors: bool | None = None,
) -> JsonDict:
if attributes_to_retrieve is None:
attributes_to_retrieve = ["*"]
Expand Down Expand Up @@ -8310,6 +8323,9 @@ def _process_search_parameters(
if locales:
body["locales"] = locales

if retrieve_vectors is not None:
body["retrieveVectors"] = retrieve_vectors

return body


Expand Down
1 change: 1 addition & 0 deletions meilisearch_python_sdk/models/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ class SearchParams(CamelBase):
vector: list[float] | None = None
hybrid: Hybrid | None = None
locales: list[str] | None = None
retrieve_vectors: bool | None = None

@field_validator("ranking_score_threshold", mode="before") # type: ignore[attr-defined]
@classmethod
Expand Down
8 changes: 8 additions & 0 deletions meilisearch_python_sdk/models/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ class OpenAiEmbedder(CamelBase):
document_template: str | None = None
document_template_max_bytes: int | None = None
distribution: Distribution | None = None
binary_quantized: bool | None = None


class HuggingFaceEmbedder(CamelBase):
Expand All @@ -65,6 +66,8 @@ class HuggingFaceEmbedder(CamelBase):
document_template: str | None = None
document_template_max_bytes: int | None = None
distribution: Distribution | None = None
dimensions: int | None = None
binary_quantized: bool | None = None


class OllamaEmbedder(CamelBase):
Expand All @@ -76,6 +79,7 @@ class OllamaEmbedder(CamelBase):
document_template: str | None = None
document_template_max_bytes: int | None = None
distribution: Distribution | None = None
binary_quantized: bool | None = None


class RestEmbedder(CamelBase):
Expand All @@ -89,12 +93,16 @@ class RestEmbedder(CamelBase):
headers: JsonDict | None = None
request: JsonDict
response: JsonDict
binary_quantized: bool | None = None


class UserProvidedEmbedder(CamelBase):
source: str = "userProvided"
dimensions: int
distribution: Distribution | None = None
document_template: str | None = None
document_template_max_bytes: int | None = None
binary_quantized: bool | None = None


class Embedders(CamelBase):
Expand Down
9 changes: 0 additions & 9 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,15 +295,6 @@ async def default_search_key(async_client):
return key


@pytest.fixture(scope="session", autouse=True)
async def enable_vector_search(base_url, ssl_verify):
async with HttpxAsyncClient(
base_url=base_url, headers={"Authorization": f"Bearer {MASTER_KEY}"}, verify=ssl_verify
) as client:
await client.patch("/experimental-features", json={"vectorStore": True})
yield


@pytest.fixture(scope="session", autouse=True)
async def enable_edit_by_function(base_url, ssl_verify):
async with HttpxAsyncClient(
Expand Down
4 changes: 2 additions & 2 deletions tests/test_async_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ async def test_get_settings_default(
assert response.non_separator_tokens == []
assert response.search_cutoff_ms is None
assert response.dictionary == []
assert response.embedders is None
assert response.embedders == {}
assert response.facet_search is True
assert response.prefix_search == "indexingTime"

Expand Down Expand Up @@ -233,7 +233,7 @@ async def test_reset_settings(async_empty_index, new_settings, default_ranking_r
assert response.faceting.max_values_per_facet == 100
assert response.pagination.max_total_hits == 1000
assert response.proximity_precision is ProximityPrecision.BY_WORD
assert response.embedders is None
assert response.embedders == {}


async def test_get_ranking_rules_default(async_empty_index, default_ranking_rules):
Expand Down
24 changes: 24 additions & 0 deletions tests/test_async_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,30 @@ async def test_vector_search(async_index_with_documents_and_vectors):
assert len(response.hits) >= 1


async def test_vector_search_retrieve_vectors(async_index_with_documents_and_vectors):
index = await async_index_with_documents_and_vectors()
response = await index.search(
"",
vector=[0.1, 0.2],
hybrid=Hybrid(semantic_ratio=1.0, embedder="default"),
retrieve_vectors=True,
)
assert len(response.hits) >= 1
assert response.hits[0].get("_vectors") is not None


async def test_vector_search_retrieve_vectors_false(async_index_with_documents_and_vectors):
index = await async_index_with_documents_and_vectors()
response = await index.search(
"",
vector=[0.1, 0.2],
hybrid=Hybrid(semantic_ratio=1.0, embedder="default"),
retrieve_vectors=False,
)
assert len(response.hits) >= 1
assert response.hits[0].get("_vectors") is None


async def test_basic_facet_search(async_index_with_documents):
index = await async_index_with_documents()
update = await index.update_filterable_attributes(["genre"])
Expand Down
4 changes: 2 additions & 2 deletions tests/test_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def test_get_settings_default(
assert response.non_separator_tokens == []
assert response.search_cutoff_ms is None
assert response.dictionary == []
assert response.embedders is None
assert response.embedders == {}
assert response.facet_search is True
assert response.prefix_search == "indexingTime"

Expand Down Expand Up @@ -225,7 +225,7 @@ def test_reset_settings(empty_index, new_settings, default_ranking_rules):
assert response.faceting.max_values_per_facet == 100
assert response.pagination.max_total_hits == 1000
assert response.proximity_precision is ProximityPrecision.BY_WORD
assert response.embedders is None
assert response.embedders == {}


def test_get_ranking_rules_default(empty_index, default_ranking_rules):
Expand Down
24 changes: 24 additions & 0 deletions tests/test_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,30 @@ def test_vector_search(index_with_documents_and_vectors):
assert len(response.hits) >= 1


def test_vector_search_retrieve_vectors(index_with_documents_and_vectors):
index = index_with_documents_and_vectors()
response = index.search(
"",
vector=[0.1, 0.2],
hybrid=Hybrid(semantic_ratio=1.0, embedder="default"),
retrieve_vectors=True,
)
assert len(response.hits) >= 1
assert response.hits[0].get("_vectors") is not None


def test_vector_search_retrieve_vectors_false(index_with_documents_and_vectors):
index = index_with_documents_and_vectors()
response = index.search(
"",
vector=[0.1, 0.2],
hybrid=Hybrid(semantic_ratio=1.0, embedder="default"),
retrieve_vectors=False,
)
assert len(response.hits) >= 1
assert response.hits[0].get("_vectors") is None


def test_basic_facet_search(index_with_documents):
index = index_with_documents()
update = index.update_filterable_attributes(["genre"])
Expand Down