From a6e98b14ded15e4bc6f6f0c7b6f21b55fdf8407b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 28 Aug 2024 21:45:06 +0200 Subject: [PATCH 1/4] Rename kinds --- inference/core/__init__.py | 1 - inference/core/workflows/core_steps/loader.py | 30 -- .../execution_engine/entities/types.py | 272 +++++------------- .../models_predictions_tests/conftest.py | 9 +- .../models_predictions_tests/test_sam2.py | 31 +- .../test_workflow_with_custom_python_block.py | 14 +- .../execution/test_workflow_with_sam2.py | 3 +- .../introspection/test_blocks_loader.py | 4 +- 8 files changed, 99 insertions(+), 265 deletions(-) diff --git a/inference/core/__init__.py b/inference/core/__init__.py index 2d3be786e..22cae88f5 100644 --- a/inference/core/__init__.py +++ b/inference/core/__init__.py @@ -2,7 +2,6 @@ import time import requests - from packaging import version as packaging_version from inference.core.env import DISABLE_VERSION_CHECK, VERSION_CHECK_MODE diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index ad9d3c7bb..c1db0d6df 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -174,21 +174,6 @@ TriangleVisualizationBlockV1, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BAR_CODE_DETECTION_KIND, - BATCH_OF_BOOLEAN_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_DICTIONARY_KIND, - BATCH_OF_IMAGE_METADATA_KIND, - BATCH_OF_IMAGES_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_PREDICTION_TYPE_KIND, - BATCH_OF_QR_CODE_DETECTION_KIND, - BATCH_OF_SERIALISED_PAYLOADS_KIND, - BATCH_OF_STRING_KIND, - BATCH_OF_TOP_CLASS_KIND, BOOLEAN_KIND, CONTOURS_KIND, DETECTION_KIND, @@ -290,37 +275,22 @@ def load_kinds() -> List[Kind]: return [ WILDCARD_KIND, IMAGE_KIND, - BATCH_OF_IMAGES_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, ROBOFLOW_API_KEY_KIND, FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, - BATCH_OF_SERIALISED_PAYLOADS_KIND, BOOLEAN_KIND, - BATCH_OF_BOOLEAN_KIND, INTEGER_KIND, STRING_KIND, - BATCH_OF_STRING_KIND, - BATCH_OF_TOP_CLASS_KIND, FLOAT_KIND, DICTIONARY_KIND, - BATCH_OF_DICTIONARY_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, DETECTION_KIND, POINT_KIND, ZONE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_QR_CODE_DETECTION_KIND, - BATCH_OF_BAR_CODE_DETECTION_KIND, - BATCH_OF_PREDICTION_TYPE_KIND, - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_IMAGE_METADATA_KIND, RGB_COLOR_KIND, IMAGE_KEYPOINTS_KIND, CONTOURS_KIND, diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index a576c4ac5..c4fb696a9 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -18,13 +18,6 @@ def __hash__(self) -> int: KIND_KEY = "kind" DIMENSIONALITY_OFFSET_KEY = "dimensionality_offset" DIMENSIONALITY_REFERENCE_PROPERTY_KEY = "dimensionality_reference_property" -DOCS_NOTE_ABOUT_BATCH = """ -**Important note**: - -When you see `Batch[]` in a name, it means that each group of data, called a batch, will contain elements -of type ``. This also implies that if there are multiple inputs or outputs for a batch-wise operation, -they will maintain the same order of elements within each batch. -""" WILDCARD_KIND_DOCS = """ This is a special kind that represents Any value - which is to be used by default if @@ -38,9 +31,8 @@ def __hash__(self) -> int: WILDCARD_KIND = Kind( name="*", description="Equivalent of any element", docs=WILDCARD_KIND_DOCS ) -IMAGE_KIND = Kind(name="image", description="Image in workflows", docs="TODO") IMAGE_KIND_DOCS = f""" -This is the representation of image batch in `workflows`. The value behind this kind +This is the representation of image in `workflows`. The value behind this kind is Python list of dictionaries. Each of this dictionary is native `inference` image with the following keys defined: ```python @@ -55,12 +47,9 @@ def __hash__(self) -> int: Some blocks that output images may add additional fields - like "parent_id", which should not be modified but may be used is specific contexts - for instance when one needs to tag predictions with identifier of parent image. - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_IMAGES_KIND = Kind( - name="Batch[image]", description="Image in workflows", docs=IMAGE_KIND_DOCS -) +IMAGE_KIND = Kind(name="image", description="Image in workflows", docs=IMAGE_KIND_DOCS) + VIDEO_METADATA_KIND_DOCS = """ This is representation of metadata that describe images that come from videos. @@ -190,7 +179,7 @@ def __hash__(self) -> int: docs=IMAGE_KEYPOINTS_KIND_DOCS, ) -BATCH_OF_SERIALISED_PAYLOADS_KIND_DOCS = f""" +SERIALISED_PAYLOADS_KIND_DOCS = f""" This value represents list of serialised values. Each serialised value is either string or bytes - if something else is provided - it will be attempted to be serialised to JSON. @@ -202,35 +191,18 @@ def __hash__(self) -> int: This kind is to be used in combination with sinks blocks and serializers blocks. Serializer should output value of this kind which shall then be accepted by sink. - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_SERIALISED_PAYLOADS_KIND = Kind( - name="Batch[serialised_payloads]", - description="List of serialised elements that can be registered in the sink", - docs=BATCH_OF_SERIALISED_PAYLOADS_KIND_DOCS, +SERIALISED_PAYLOADS_KIND = Kind( + name="serialised_payloads", + description="Serialised element that is usually accepted by sink", + docs=SERIALISED_PAYLOADS_KIND_DOCS, ) + BOOLEAN_KIND_DOCS = """ -This kind represents single boolean value - `True` or `False` +This kind represents boolean value - `True` or `False` """ BOOLEAN_KIND = Kind(name="boolean", description="Boolean flag", docs=BOOLEAN_KIND_DOCS) -BATCH_OF_BOOLEAN_KIND_DOCS = f""" -This kind represents batch of boolean values. - -Examples: -``` -[True, False, False, True] -[True, True] -``` - -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_BOOLEAN_KIND = Kind( - name="Batch[boolean]", - description="Boolean values batch", - docs=BATCH_OF_BOOLEAN_KIND_DOCS, -) INTEGER_KIND_DOCS = """ Examples: @@ -247,53 +219,21 @@ def __hash__(self) -> int: ``` """ STRING_KIND = Kind(name="string", description="String value", docs=STRING_KIND_DOCS) -BATCH_OF_STRING_KIND_DOCS = f""" -This kind represents batch of string values. - -Examples: -``` -["a", "b", "c"] -["d", "e"] -``` - -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_STRING_KIND = Kind( - name="Batch[string]", - description="Batch of string values", - docs=BATCH_OF_STRING_KIND_DOCS, -) -BATCH_OF_INTEGER_KIND_DOCS = f""" -This kind represents batch of integer values. - -Examples: -``` -[1, 2, 6] -[9, 4] -``` -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_INTEGER_KIND = Kind( - name="Batch[integer]", - description="Batch of integer values", - docs=BATCH_OF_INTEGER_KIND_DOCS, -) -BATCH_OF_TOP_CLASS_KIND_DOCS = f""" +TOP_CLASS_KIND_DOCS = f""" The kind represent top classes predicted by classification model - representing its predictions on batch of images. Example: ``` ["car", "dog", "car", "cat"] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_TOP_CLASS_KIND = Kind( - name="Batch[top_class]", - description="Batch of string values representing top class predicted by classification model", - docs=BATCH_OF_TOP_CLASS_KIND_DOCS, +TOP_CLASS_KIND = Kind( + name="top_class", + description="String value representing top class predicted by classification model", + docs=TOP_CLASS_KIND_DOCS, ) + FLOAT_KIND_DOCS = """ Example: ``` @@ -311,21 +251,8 @@ def __hash__(self) -> int: ``` """ DICTIONARY_KIND = Kind(name="dictionary", description="Dictionary") -BATCH_OF_DICTIONARY_KIND_DOCS = f""" -This kind represent a batch of any Python dicts. -Examples: -``` -[{{"my_key", "my_value_1"}}, {{"my_key", "my_value_2"}}] -``` -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_DICTIONARY_KIND = Kind( - name="Batch[dictionary]", - description="Batch of dictionaries", - docs=BATCH_OF_DICTIONARY_KIND_DOCS, -) -BATCH_OF_CLASSIFICATION_PREDICTION_KIND_DOCS = f""" +CLASSIFICATION_PREDICTION_KIND_DOCS = f""" This kind represent predictions from Classification Models. Examples: @@ -362,15 +289,14 @@ def __hash__(self) -> int: }} ] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_CLASSIFICATION_PREDICTION_KIND = Kind( - name="Batch[classification_prediction]", - description="`'predictions'` key from Classification Model outputs", - docs=BATCH_OF_CLASSIFICATION_PREDICTION_KIND_DOCS, +CLASSIFICATION_PREDICTION_KIND = Kind( + name="classification_prediction", + description="`'predictions'` key from Classification Model output", + docs=CLASSIFICATION_PREDICTION_KIND_DOCS, ) + DETECTION_KIND_DOCS = """ This kind represents single detection in prediction from a model that detects multiple elements (like object detection or instance segmentation model). It is represented as a tuple @@ -458,31 +384,6 @@ def __hash__(self) -> int: docs=OBJECT_DETECTION_PREDICTION_KIND_DOCS, ) -BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND_DOCS = f""" -This kind represents batch of predictions from an Object Detection Model. - -Example: -``` -# Each prediction in batch is list of dictionaries that contains detected objects (detections) -[ - [ - {{"x": 300, "y": 400, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid"}}, - {{"x": 600, "y": 900, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid"}} - ], - [ - {{"x": 300, "y": 400, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid"}}, - {{"x": 600, "y": 900, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid"}} - ] -] -``` - -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND = Kind( - name="Batch[object_detection_prediction]", - description="`'predictions'` key from Object Detection Model output", - docs=BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND_DOCS, -) INSTANCE_SEGMENTATION_PREDICTION_KIND_DOCS = """ This kind represents single instance segmentation prediction in form of @@ -514,32 +415,6 @@ def __hash__(self) -> int: docs=INSTANCE_SEGMENTATION_PREDICTION_KIND_DOCS, ) -BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND_DOCS = f""" -This kind represents batch of predictions from Instance Segmentation Models. - -Example: -``` -# Each prediction in batch is list of dictionaries that contains detected objects (detections) and list of points -providing object contour, -[ - [ - {{"x": 300, "y": 400, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "points": [{{"x": 300, "y": 200}}]}}, - {{"x": 600, "y": 900, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "points": [{{"x": 300, "y": 200}}}} - ], - [ - {{"x": 300, "y": 400, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "points": [{{"x": 300, "y": 200}}}}, - {{"x": 600, "y": 900, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "points": [{{"x": 300, "y": 200}}}} - ] -] -``` - -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND = Kind( - name="Batch[instance_segmentation_prediction]", - description="`'predictions'` key from Instance Segmentation Model outputs", - docs=BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND_DOCS, -) KEYPOINT_DETECTION_PREDICTION_KIND_DOCS = """ This kind represents single keypoints prediction in form of @@ -566,33 +441,7 @@ def __hash__(self) -> int: docs=KEYPOINT_DETECTION_PREDICTION_KIND_DOCS, ) -BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND_DOCS = f""" -This kind represents batch of predictions from Keypoint Detection Models. - -Example: -``` -# Each prediction in batch is list of dictionaries that contains detected objects (detections) and list of points of -object skeleton. -[ - [ - {{"x": 300, "y": 400, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "keypoints": [{{"x": 300, "y": 200, "confidence": 0.3, "class_id": 0, "class_name": "tire_center"}}]}}, - {{"x": 600, "y": 900, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "keypoints": [{{"x": 300, "y": 200, "confidence": 0.3, "class_id": 0, "class_name": "tire_center"}}}} - ], - [ - {{"x": 300, "y": 400, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "keypoints": [{{"x": 300, "y": 200, "confidence": 0.3, "class_id": 0, "class_name": "tire_center"}}}}, - {{"x": 600, "y": 900, "width": 100, "height" 50, "confidence": 0.3, "class": "car", "class_id": 0.1, "detection_id": "random-uuid", "keypoints": [{{"x": 300, "y": 200, "confidence": 0.3, "class_id": 0, "class_name": "tire_center"}}}} - ] -] -``` - -{DOCS_NOTE_ABOUT_BATCH} -""" -BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND = Kind( - name="Batch[keypoint_detection_prediction]", - description="`'predictions'` key from Keypoint Detection Model output", - docs=BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND_DOCS, -) -BATCH_OF_QR_CODE_DETECTION_KIND_DOCS = f""" +QR_CODE_DETECTION_KIND_DOCS = f""" This kind represents batch of predictions regarding QR codes location and data their provide. Example: @@ -608,15 +457,14 @@ def __hash__(self) -> int: ] ] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_QR_CODE_DETECTION_KIND = Kind( - name="Batch[qr_code_detection]", +QR_CODE_DETECTION_KIND = Kind( + name="qr_code_detection", description="Prediction with QR code detection", - docs=BATCH_OF_QR_CODE_DETECTION_KIND_DOCS, + docs=QR_CODE_DETECTION_KIND_DOCS, ) -BATCH_OF_BAR_CODE_DETECTION_KIND_DOCS = f""" + +BAR_CODE_DETECTION_KIND_DOCS = f""" This kind represents batch of predictions regarding barcodes location and data their provide. Example: @@ -632,15 +480,13 @@ def __hash__(self) -> int: ] ] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_BAR_CODE_DETECTION_KIND = Kind( - name="Batch[bar_code_detection]", +BAR_CODE_DETECTION_KIND = Kind( + name="bar_code_detection", description="Prediction with barcode detection", - docs=BATCH_OF_BAR_CODE_DETECTION_KIND_DOCS, + docs=BAR_CODE_DETECTION_KIND_DOCS, ) -BATCH_OF_PREDICTION_TYPE_KIND_DOCS = f""" +PREDICTION_TYPE_KIND_DOCS = f""" This kind represent batch of prediction metadata providing information about the type of prediction. Examples: @@ -648,15 +494,14 @@ def __hash__(self) -> int: ["object-detection", "object-detection"] ["instance-segmentation", "instance-segmentation"] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_PREDICTION_TYPE_KIND = Kind( - name="Batch[prediction_type]", +PREDICTION_TYPE_KIND = Kind( + name="prediction_type", description="String value with type of prediction", - docs=BATCH_OF_PREDICTION_TYPE_KIND_DOCS, + docs=PREDICTION_TYPE_KIND_DOCS, ) -BATCH_OF_PARENT_ID_KIND_DOCS = f""" + +PARENT_ID_KIND_DOCS = f""" This kind represent batch of prediction metadata providing information about the context of prediction. For example - whenever there is a workflow with multiple models - such that first model detect objects and then other models make their predictions based on crops from first model detections - `parent_id` @@ -667,15 +512,14 @@ def __hash__(self) -> int: ["uuid-1", "uuid-1", "uuid-2", "uuid-2"] ["uuid-1", "uuid-1", "uuid-1", "uuid-1"] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_PARENT_ID_KIND = Kind( - name="Batch[parent_id]", +PARENT_ID_KIND = Kind( + name="parent_id", description="Identifier of parent for step output", - docs=BATCH_OF_PARENT_ID_KIND_DOCS, + docs=PARENT_ID_KIND_DOCS, ) -BATCH_OF_IMAGE_METADATA_KIND_DOCS = f""" + +IMAGE_METADATA_KIND_DOCS = f""" This kind represent batch of prediction metadata providing information about the image that prediction was made against. Examples: @@ -683,15 +527,14 @@ def __hash__(self) -> int: [{{"width": 1280, "height": 720}}, {{"width": 1920, "height": 1080}}] [{{"width": 1280, "height": 720}}] ``` - -{DOCS_NOTE_ABOUT_BATCH} """ -BATCH_OF_IMAGE_METADATA_KIND = Kind( - name="Batch[image_metadata]", +IMAGE_METADATA_KIND = Kind( + name="image_metadata", description="Dictionary with image metadata required by supervision", - docs=BATCH_OF_IMAGE_METADATA_KIND_DOCS, + docs=IMAGE_METADATA_KIND_DOCS, ) + STEP_AS_SELECTED_ELEMENT = "step" STEP_OUTPUT_AS_SELECTED_ELEMENT = "step_output" @@ -757,7 +600,7 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): json_schema_extra={ REFERENCE_KEY: True, SELECTED_ELEMENT_KEY: "workflow_image", - KIND_KEY: [BATCH_OF_IMAGES_KIND.dict()], + KIND_KEY: [IMAGE_KIND.dict()], } ), ] @@ -769,7 +612,7 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): json_schema_extra={ REFERENCE_KEY: True, SELECTED_ELEMENT_KEY: STEP_OUTPUT_AS_SELECTED_ELEMENT, - KIND_KEY: [BATCH_OF_IMAGES_KIND.dict()], + KIND_KEY: [IMAGE_KIND.dict()], } ), ] @@ -788,3 +631,22 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): } ), ] + + +# DEPRECATED KINDS - do not use to create new blocks! +BATCH_OF_IMAGES_KIND = IMAGE_KIND +BATCH_OF_SERIALISED_PAYLOADS_KIND = SERIALISED_PAYLOADS_KIND +BATCH_OF_BOOLEAN_KIND = BOOLEAN_KIND +BATCH_OF_STRING_KIND = STRING_KIND +BATCH_OF_INTEGER_KIND = INTEGER_KIND +BATCH_OF_TOP_CLASS_KIND = TOP_CLASS_KIND +BATCH_OF_DICTIONARY_KIND = DICTIONARY_KIND +BATCH_OF_CLASSIFICATION_PREDICTION_KIND = CLASSIFICATION_PREDICTION_KIND +BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND = OBJECT_DETECTION_PREDICTION_KIND +BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND = INSTANCE_SEGMENTATION_PREDICTION_KIND +BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND = KEYPOINT_DETECTION_PREDICTION_KIND +BATCH_OF_QR_CODE_DETECTION_KIND = QR_CODE_DETECTION_KIND +BATCH_OF_BAR_CODE_DETECTION_KIND = BAR_CODE_DETECTION_KIND +BATCH_OF_PREDICTION_TYPE_KIND = PREDICTION_TYPE_KIND +BATCH_OF_PARENT_ID_KIND = PARENT_ID_KIND +BATCH_OF_IMAGE_METADATA_KIND = IMAGE_METADATA_KIND diff --git a/tests/inference/models_predictions_tests/conftest.py b/tests/inference/models_predictions_tests/conftest.py index b15466df0..30878871b 100644 --- a/tests/inference/models_predictions_tests/conftest.py +++ b/tests/inference/models_predictions_tests/conftest.py @@ -1,14 +1,13 @@ +import json import os.path import shutil import zipfile -from typing import Generator +from typing import Dict, Generator import cv2 import numpy as np import pytest import requests -import json -from typing import Dict from inference.core.env import MODEL_CACHE_DIR @@ -29,12 +28,12 @@ ) - @pytest.fixture(scope="function") def sam2_multipolygon_response() -> Dict: with open(SAM2_MULTI_POLY_RESPONSE_PATH) as f: return json.load(f) + @pytest.fixture(scope="function") def example_image() -> np.ndarray: return cv2.imread(EXAMPLE_IMAGE_PATH) @@ -197,6 +196,7 @@ def sam2_small_model() -> Generator[str, None, None]: yield model_id shutil.rmtree(model_cache_dir) + @pytest.fixture(scope="function") def sam2_tiny_model() -> Generator[str, None, None]: model_id = "sam2/hiera_tiny" @@ -217,6 +217,7 @@ def sam2_small_truck_logits() -> Generator[np.ndarray, None, None]: def sam2_small_truck_mask_from_cached_logits() -> Generator[np.ndarray, None, None]: yield np.load(SAM2_TRUCK_MASK_FROM_CACHE) + def fetch_and_place_model_in_cache( model_id: str, model_package_url: str, diff --git a/tests/inference/models_predictions_tests/test_sam2.py b/tests/inference/models_predictions_tests/test_sam2.py index d1c4299bc..4fbfa9bf0 100644 --- a/tests/inference/models_predictions_tests/test_sam2.py +++ b/tests/inference/models_predictions_tests/test_sam2.py @@ -1,27 +1,27 @@ -import numpy as np -import pytest -import torch import json -import requests from copy import deepcopy +from io import BytesIO +from typing import Dict +import numpy as np +import pytest +import requests +import torch from PIL import Image -from io import BytesIO -from inference.core.entities.requests.sam2 import Sam2PromptSet -from inference.models.sam2 import SegmentAnything2 -from inference.models.sam2.segment_anything2 import ( - hash_prompt_set, - maybe_load_low_res_logits_from_cache, + +from inference.core.entities.requests.sam2 import Sam2PromptSet, Sam2SegmentationRequest +from inference.core.entities.responses.sam2 import Sam2SegmentationPrediction +from inference.core.workflows.core_steps.common.utils import ( + convert_inference_detections_batch_to_sv_detections, ) from inference.core.workflows.core_steps.models.foundation.segment_anything2.v1 import ( convert_sam2_segmentation_response_to_inference_instances_seg_response, ) -from inference.core.workflows.core_steps.common.utils import ( - convert_inference_detections_batch_to_sv_detections, +from inference.models.sam2 import SegmentAnything2 +from inference.models.sam2.segment_anything2 import ( + hash_prompt_set, + maybe_load_low_res_logits_from_cache, ) -from inference.core.entities.responses.sam2 import Sam2SegmentationPrediction -from inference.core.entities.requests.sam2 import Sam2SegmentationRequest -from typing import Dict @pytest.mark.slow @@ -236,6 +236,7 @@ def test_sam2_multi_poly(sam2_tiny_model: str, sam2_multipolygon_response: Dict) except Exception as e: raise e + def test_model_clears_cache_properly(sam2_small_model, truck_image): cache_size = 2 model = SegmentAnything2( diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py b/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py index 9b9fc6c3d..74a807db2 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_custom_python_block.py @@ -345,9 +345,9 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] "selector_types": ["step_output"], "selector_data_kind": { "step_output": [ - "Batch[object_detection_prediction]", - "Batch[instance_segmentation_prediction]", - "Batch[keypoint_detection_prediction]", + "object_detection_prediction", + "instance_segmentation_prediction", + "keypoint_detection_prediction", ] }, }, @@ -362,9 +362,9 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] "associated_detections": { "type": "DynamicOutputDefinition", "kind": [ - "Batch[object_detection_prediction]", - "Batch[instance_segmentation_prediction]", - "Batch[keypoint_detection_prediction]", + "object_detection_prediction", + "instance_segmentation_prediction", + "keypoint_detection_prediction", ], } }, @@ -615,7 +615,7 @@ def infer(self, image: WorkflowImageData) -> BlockResult: "predictions": { "type": "DynamicOutputDefinition", "kind": [ - "Batch[object_detection_prediction]", + "object_detection_prediction", ], } }, diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py b/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py index 1bc929de7..1da849227 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py @@ -229,5 +229,6 @@ def test_grounded_sam2_workflow( "dog", ], "Expected class names to be correct" assert result[0]["sam_predictions"].data["parent_id"].tolist() == [ - 'image.[0]', 'image.[0]' + "image.[0]", + "image.[0]", ], "Expected parent_ids to be correct" diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py index 9fad217b9..c84860317 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py @@ -218,7 +218,7 @@ def test_describe_available_blocks_when_valid_plugins_are_loaded( assert result.blocks[0].manifest_class == plugin_with_valid_blocks.Block1Manifest assert result.blocks[1].block_class == plugin_with_valid_blocks.Block2 assert result.blocks[1].manifest_class == plugin_with_valid_blocks.Block2Manifest - assert len(result.declared_kinds) == 36 + assert len(result.declared_kinds) == 29 @mock.patch.object(blocks_loader, "load_workflow_blocks") @@ -259,7 +259,7 @@ def test_describe_available_blocks_when_valid_plugins_are_loaded_and_multiple_ve result.blocks[2].manifest_class == plugin_with_multiple_versions_of_blocks.Block2Manifest ) - assert len(result.declared_kinds) == 36 + assert len(result.declared_kinds) == 29 @mock.patch.object(blocks_loader, "load_workflow_blocks") From 9a8575107a92056029b2ee88035c50acbca21677 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 28 Aug 2024 22:02:32 +0200 Subject: [PATCH 2/4] Replace kinds used by blocks --- .../core_steps/classical_cv/contours/v1.py | 4 +- .../classical_cv/convert_grayscale/v1.py | 4 +- .../core_steps/classical_cv/image_blur/v1.py | 4 +- .../classical_cv/pixel_color_count/v1.py | 3 +- .../core_steps/classical_cv/sift/v1.py | 4 +- .../classical_cv/template_matching/v1.py | 8 +-- .../core_steps/classical_cv/threshold/v1.py | 4 +- .../query_language/entities/operations.py | 4 +- .../formatters/property_definition/v1.py | 16 ++--- .../detections_classes_replacement/v1.py | 22 +++--- .../fusion/detections_consensus/v1.py | 14 ++-- .../core_steps/fusion/detections_stitch/v1.py | 12 ++-- inference/core/workflows/core_steps/loader.py | 20 ++++++ .../models/foundation/clip_comparison/v1.py | 12 ++-- .../models/foundation/cog_vlm/v1.py | 22 +++--- .../core_steps/models/foundation/lmm/v1.py | 22 +++--- .../models/foundation/lmm_classifier/v1.py | 23 +++--- .../core_steps/models/foundation/ocr/v1.py | 16 ++--- .../core_steps/models/foundation/openai/v1.py | 22 +++--- .../models/foundation/segment_anything2/v1.py | 14 ++-- .../models/foundation/yolo_world/v1.py | 4 +- .../roboflow/instance_segmentation/v1.py | 4 +- .../models/roboflow/keypoint_detection/v1.py | 4 +- .../roboflow/multi_class_classification/v1.py | 6 +- .../roboflow/multi_label_classification/v1.py | 6 +- .../models/roboflow/object_detection/v1.py | 4 +- .../third_party/barcode_detection/v1.py | 8 +-- .../third_party/qr_code_detection/v1.py | 6 +- .../sinks/roboflow/custom_metadata/v1.py | 19 +++-- .../sinks/roboflow/dataset_upload/v1.py | 22 +++--- .../absolute_static_crop/v1.py | 4 +- .../transformations/detection_offset/v1.py | 18 ++--- .../transformations/detections_filter/v1.py | 18 ++--- .../detections_transformation/v1.py | 18 ++--- .../transformations/dynamic_crop/v1.py | 16 ++--- .../transformations/dynamic_zones/v1.py | 4 +- .../transformations/image_slicer/v1.py | 4 +- .../perspective_correction/v1.py | 16 ++--- .../relative_static_crop/v1.py | 4 +- .../core_steps/visualizations/common/base.py | 16 ++--- .../core_steps/visualizations/halo/v1.py | 4 +- .../core_steps/visualizations/mask/v1.py | 4 +- .../core_steps/visualizations/polygon/v1.py | 4 +- .../execution_engine/entities/base.py | 4 +- .../detections_to_parent_coordinates_batch.py | 18 ++--- ...ections_to_parent_coordinates_non_batch.py | 18 ++--- .../stitch_detections_batch.py | 18 ++--- .../stitch_detections_non_batch.py | 18 ++--- .../tile_detections_batch.py | 16 ++--- .../tile_detections_non_batch.py | 16 ++--- .../__init__.py | 12 ++-- .../plugin_with_test_blocks/blocks.py | 70 +++++++++---------- .../introspection/test_blocks_loader.py | 4 +- .../introspection/test_schema_parser.py | 27 ++++--- .../introspection/test_selectors_parser.py | 4 +- 55 files changed, 337 insertions(+), 351 deletions(-) diff --git a/inference/core/workflows/core_steps/classical_cv/contours/v1.py b/inference/core/workflows/core_steps/classical_cv/contours/v1.py index 7d79f568f..5a636f1a2 100644 --- a/inference/core/workflows/core_steps/classical_cv/contours/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/contours/v1.py @@ -12,8 +12,8 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, CONTOURS_KIND, + IMAGE_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, StepOutputImageSelector, @@ -64,7 +64,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, ], ), OutputDefinition( diff --git a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py index 5ffd184e6..3b800130d 100644 --- a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py @@ -11,7 +11,7 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, StepOutputImageSelector, WorkflowImageSelector, ) @@ -53,7 +53,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py index c38455bf2..e198c8f58 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py @@ -12,7 +12,7 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, INTEGER_KIND, STRING_KIND, StepOutputImageSelector, @@ -73,7 +73,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index fb84b938d..a6b803608 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -9,7 +9,6 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INTEGER_KIND, INTEGER_KIND, RGB_COLOR_KIND, STRING_KIND, @@ -73,7 +72,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name="matching_pixels_count", - kind=[BATCH_OF_INTEGER_KIND], + kind=[INTEGER_KIND], ), ] diff --git a/inference/core/workflows/core_steps/classical_cv/sift/v1.py b/inference/core/workflows/core_steps/classical_cv/sift/v1.py index fdfda1fef..01c61e1be 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift/v1.py @@ -12,8 +12,8 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, IMAGE_KEYPOINTS_KIND, + IMAGE_KIND, NUMPY_ARRAY_KIND, StepOutputImageSelector, WorkflowImageSelector, @@ -66,7 +66,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name=OUTPUT_IMAGE_KEY, - kind=[BATCH_OF_IMAGES_KIND], + kind=[IMAGE_KIND], ), OutputDefinition( name="keypoints", diff --git a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py index 6b420f399..e05ab4639 100644 --- a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py @@ -21,11 +21,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INTEGER_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, FLOAT_KIND, FLOAT_ZERO_TO_ONE_KIND, + INTEGER_KIND, + OBJECT_DETECTION_PREDICTION_KIND, FloatZeroToOne, StepOutputImageSelector, WorkflowImageSelector, @@ -107,11 +107,11 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name="predictions", - kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND], + kind=[OBJECT_DETECTION_PREDICTION_KIND], ), OutputDefinition( name="number_of_matches", - kind=[BATCH_OF_INTEGER_KIND], + kind=[INTEGER_KIND], ), ] diff --git a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py index ad2f496cf..15be7f953 100644 --- a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py @@ -12,7 +12,7 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, INTEGER_KIND, STRING_KIND, StepOutputImageSelector, @@ -86,7 +86,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/common/query_language/entities/operations.py b/inference/core/workflows/core_steps/common/query_language/entities/operations.py index 855644c64..1f040d6a7 100644 --- a/inference/core/workflows/core_steps/common/query_language/entities/operations.py +++ b/inference/core/workflows/core_steps/common/query_language/entities/operations.py @@ -15,8 +15,8 @@ StatementsGroupsOperator, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, DETECTION_KIND, DICTIONARY_KIND, FLOAT_KIND, @@ -214,7 +214,7 @@ class ClassificationPropertyExtract(OperationDefinition): "(as a list of elements - one element represents single detection)", "compound": False, "input_kind": [ - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, ], "output_kind": [STRING_KIND, LIST_OF_VALUES_KIND, FLOAT_ZERO_TO_ONE_KIND], }, diff --git a/inference/core/workflows/core_steps/formatters/property_definition/v1.py b/inference/core/workflows/core_steps/formatters/property_definition/v1.py index 8f8e38bc3..b075b7788 100644 --- a/inference/core/workflows/core_steps/formatters/property_definition/v1.py +++ b/inference/core/workflows/core_steps/formatters/property_definition/v1.py @@ -10,10 +10,10 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputSelector, ) from inference.core.workflows.prototypes.block import ( @@ -62,10 +62,10 @@ class BlockManifest(WorkflowBlockManifest): ] data: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, ] ) = Field( description="Reference data to extract property from", diff --git a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py index 6533d744f..26bfb287e 100644 --- a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py @@ -15,10 +15,10 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputSelector, ) from inference.core.workflows.prototypes.block import ( @@ -56,9 +56,9 @@ class BlockManifest(WorkflowBlockManifest): ] object_detection_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( title="Regions of Interest", @@ -66,7 +66,7 @@ class BlockManifest(WorkflowBlockManifest): examples=["$steps.my_object_detection_model.predictions"], ) classification_predictions: StepOutputSelector( - kind=[BATCH_OF_CLASSIFICATION_PREDICTION_KIND] + kind=[CLASSIFICATION_PREDICTION_KIND] ) = Field( title="Classification results for crops", description="The output of classification model for crops taken based on RoIs pointed as the other parameter", @@ -94,9 +94,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ) ] diff --git a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py index 898466eaa..c3ef35aff 100644 --- a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py @@ -27,14 +27,14 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, DICTIONARY_KIND, FLOAT_ZERO_TO_ONE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, FloatZeroToOne, StepOutputSelector, WorkflowParameterSelector, @@ -83,9 +83,9 @@ class BlockManifest(WorkflowBlockManifest): predictions_batches: List[ StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ), ] = Field( @@ -162,7 +162,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name="predictions", - kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND], + kind=[OBJECT_DETECTION_PREDICTION_KIND], ), OutputDefinition( name="object_present", kind=[BOOLEAN_KIND, DICTIONARY_KIND] diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index a2a346603..1221e7f65 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -19,9 +19,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, FloatZeroToOne, StepOutputImageSelector, @@ -65,8 +65,8 @@ class BlockManifest(WorkflowBlockManifest): ) predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) = Field( description="The output of a detection model describing the bounding boxes to be merged.", @@ -105,8 +105,8 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index c1db0d6df..a2a90ae63 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -174,7 +174,9 @@ TriangleVisualizationBlockV1, ) from inference.core.workflows.execution_engine.entities.types import ( + BAR_CODE_DETECTION_KIND, BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, CONTOURS_KIND, DETECTION_KIND, DICTIONARY_KIND, @@ -182,17 +184,25 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KEYPOINTS_KIND, IMAGE_KIND, + IMAGE_METADATA_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, + NUMPY_ARRAY_KIND, OBJECT_DETECTION_PREDICTION_KIND, + PARENT_ID_KIND, POINT_KIND, + PREDICTION_TYPE_KIND, + QR_CODE_DETECTION_KIND, RGB_COLOR_KIND, ROBOFLOW_API_KEY_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, + SERIALISED_PAYLOADS_KIND, STRING_KIND, + TOP_CLASS_KIND, + VIDEO_METADATA_KIND, WILDCARD_KIND, ZONE_KIND, Kind, @@ -275,17 +285,21 @@ def load_kinds() -> List[Kind]: return [ WILDCARD_KIND, IMAGE_KIND, + VIDEO_METADATA_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, ROBOFLOW_API_KEY_KIND, FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, + SERIALISED_PAYLOADS_KIND, BOOLEAN_KIND, INTEGER_KIND, STRING_KIND, + TOP_CLASS_KIND, FLOAT_KIND, DICTIONARY_KIND, DETECTION_KIND, + CLASSIFICATION_PREDICTION_KIND, POINT_KIND, ZONE_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -294,4 +308,10 @@ def load_kinds() -> List[Kind]: RGB_COLOR_KIND, IMAGE_KEYPOINTS_KIND, CONTOURS_KIND, + NUMPY_ARRAY_KIND, + QR_CODE_DETECTION_KIND, + BAR_CODE_DETECTION_KIND, + PREDICTION_TYPE_KIND, + PARENT_ID_KIND, + IMAGE_METADATA_KIND, ] diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py index 195a4f8bf..f8cb0a355 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py @@ -28,9 +28,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_PREDICTION_TYPE_KIND, LIST_OF_VALUES_KIND, + PARENT_ID_KIND, + PREDICTION_TYPE_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -87,11 +87,9 @@ def accepts_batch_input(cls) -> bool: def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition(name="similarity", kind=[LIST_OF_VALUES_KIND]), - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition( - name="prediction_type", kind=[BATCH_OF_PREDICTION_TYPE_KIND] - ), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="prediction_type", kind=[PREDICTION_TYPE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py index 24a142bf7..72e9a59d6 100644 --- a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py @@ -24,11 +24,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_DICTIONARY_KIND, - BATCH_OF_IMAGE_METADATA_KIND, - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_STRING_KIND, DICTIONARY_KIND, + IMAGE_METADATA_KIND, + PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, ImageInputField, @@ -91,19 +89,19 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), - OutputDefinition(name="structured_output", kind=[BATCH_OF_DICTIONARY_KIND]), - OutputDefinition(name="raw_output", kind=[BATCH_OF_STRING_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), + OutputDefinition(name="structured_output", kind=[DICTIONARY_KIND]), + OutputDefinition(name="raw_output", kind=[STRING_KIND]), OutputDefinition(name="*", kind=[WILDCARD_KIND]), ] def get_actual_outputs(self) -> List[OutputDefinition]: result = [ - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), OutputDefinition(name="structured_output", kind=[DICTIONARY_KIND]), OutputDefinition(name="raw_output", kind=[STRING_KIND]), ] diff --git a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py index 22a2abd68..0cc5ec092 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py @@ -30,11 +30,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_DICTIONARY_KIND, - BATCH_OF_IMAGE_METADATA_KIND, - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_STRING_KIND, DICTIONARY_KIND, + IMAGE_METADATA_KIND, + PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, ImageInputField, @@ -130,19 +128,19 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), - OutputDefinition(name="structured_output", kind=[BATCH_OF_DICTIONARY_KIND]), - OutputDefinition(name="raw_output", kind=[BATCH_OF_STRING_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), + OutputDefinition(name="structured_output", kind=[DICTIONARY_KIND]), + OutputDefinition(name="raw_output", kind=[STRING_KIND]), OutputDefinition(name="*", kind=[WILDCARD_KIND]), ] def get_actual_outputs(self) -> List[OutputDefinition]: result = [ - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), OutputDefinition(name="structured_output", kind=[DICTIONARY_KIND]), OutputDefinition(name="raw_output", kind=[STRING_KIND]), ] diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py index daf1606ca..f6b127c15 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py @@ -23,13 +23,12 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGE_METADATA_KIND, - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_PREDICTION_TYPE_KIND, - BATCH_OF_STRING_KIND, - BATCH_OF_TOP_CLASS_KIND, + IMAGE_METADATA_KIND, LIST_OF_VALUES_KIND, + PARENT_ID_KIND, + PREDICTION_TYPE_KIND, STRING_KIND, + TOP_CLASS_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -99,14 +98,12 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="raw_output", kind=[BATCH_OF_STRING_KIND]), - OutputDefinition(name="top", kind=[BATCH_OF_TOP_CLASS_KIND]), - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), - OutputDefinition( - name="prediction_type", kind=[BATCH_OF_PREDICTION_TYPE_KIND] - ), + OutputDefinition(name="raw_output", kind=[STRING_KIND]), + OutputDefinition(name="top", kind=[TOP_CLASS_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), + OutputDefinition(name="prediction_type", kind=[PREDICTION_TYPE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py index 16432668e..0b98c263d 100644 --- a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py @@ -27,9 +27,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_PREDICTION_TYPE_KIND, - BATCH_OF_STRING_KIND, + PARENT_ID_KIND, + PREDICTION_TYPE_KIND, + STRING_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -80,12 +80,10 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="result", kind=[BATCH_OF_STRING_KIND]), - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition( - name="prediction_type", kind=[BATCH_OF_PREDICTION_TYPE_KIND] - ), + OutputDefinition(name="result", kind=[STRING_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="prediction_type", kind=[PREDICTION_TYPE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v1.py b/inference/core/workflows/core_steps/models/foundation/openai/v1.py index 8364e1e2d..23152dba3 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v1.py @@ -21,11 +21,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_DICTIONARY_KIND, - BATCH_OF_IMAGE_METADATA_KIND, - BATCH_OF_PARENT_ID_KIND, - BATCH_OF_STRING_KIND, DICTIONARY_KIND, + IMAGE_METADATA_KIND, + PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, ImageInputField, @@ -120,19 +118,19 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), - OutputDefinition(name="structured_output", kind=[BATCH_OF_DICTIONARY_KIND]), - OutputDefinition(name="raw_output", kind=[BATCH_OF_STRING_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), + OutputDefinition(name="structured_output", kind=[DICTIONARY_KIND]), + OutputDefinition(name="raw_output", kind=[STRING_KIND]), OutputDefinition(name="*", kind=[WILDCARD_KIND]), ] def get_actual_outputs(self) -> List[OutputDefinition]: result = [ - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="image", kind=[BATCH_OF_IMAGE_METADATA_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="image", kind=[IMAGE_METADATA_KIND]), OutputDefinition(name="structured_output", kind=[DICTIONARY_KIND]), OutputDefinition(name="raw_output", kind=[STRING_KIND]), ] diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index 06ed96f7f..299c97060 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -31,11 +31,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, FLOAT_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, ImageInputField, StepOutputImageSelector, @@ -84,9 +84,9 @@ class BlockManifest(WorkflowBlockManifest): boxes: Optional[ StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) ] = Field( # type: ignore @@ -126,7 +126,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name="predictions", - kind=[BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND], + kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND], ), ] diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py index 21da651f1..ce9be725c 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py @@ -23,9 +23,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, FloatZeroToOne, ImageInputField, @@ -108,7 +108,7 @@ def accepts_batch_input(cls) -> bool: def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index 2fada617f..06b6d9eba 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -26,9 +26,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, ROBOFLOW_MODEL_ID_KIND, @@ -166,7 +166,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name="predictions", - kind=[BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND], + kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND], ), ] diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py index e40a0b618..7d4eccc95 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py @@ -27,10 +27,10 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, @@ -157,7 +157,7 @@ def accepts_batch_input(cls) -> bool: def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( - name="predictions", kind=[BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[KEYPOINT_DETECTION_PREDICTION_KIND] ), ] diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index 1e516a96f..49242e087 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -23,8 +23,8 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, @@ -106,9 +106,7 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition( - name="predictions", kind=[BATCH_OF_CLASSIFICATION_PREDICTION_KIND] - ), + OutputDefinition(name="predictions", kind=[CLASSIFICATION_PREDICTION_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index cf4c1021f..893fabb6a 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -23,8 +23,8 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, @@ -106,9 +106,7 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition( - name="predictions", kind=[BATCH_OF_CLASSIFICATION_PREDICTION_KIND] - ) + OutputDefinition(name="predictions", kind=[CLASSIFICATION_PREDICTION_KIND]) ] @classmethod diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py index e2b274412..aca84b4dc 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py @@ -24,11 +24,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, FloatZeroToOne, @@ -146,7 +146,7 @@ def accepts_batch_input(cls) -> bool: def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] diff --git a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py index c2c9d20e6..052406c6b 100644 --- a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py @@ -22,7 +22,7 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BAR_CODE_DETECTION_KIND, + BAR_CODE_DETECTION_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -64,11 +64,7 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: - return [ - OutputDefinition( - name="predictions", kind=[BATCH_OF_BAR_CODE_DETECTION_KIND] - ) - ] + return [OutputDefinition(name="predictions", kind=[BAR_CODE_DETECTION_KIND])] @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: diff --git a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py index 71fa13d66..ab4b3dfd5 100644 --- a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py @@ -22,7 +22,7 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BAR_CODE_DETECTION_KIND, + QR_CODE_DETECTION_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -65,9 +65,7 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition( - name="predictions", kind=[BATCH_OF_BAR_CODE_DETECTION_KIND] - ), + OutputDefinition(name="predictions", kind=[QR_CODE_DETECTION_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index e5b1bdd81..e1c0eb5fe 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -14,12 +14,11 @@ from inference.core.workflows.execution_engine.constants import INFERENCE_ID_KEY from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_STRING_KIND, BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, StepOutputSelector, WorkflowParameterSelector, @@ -58,10 +57,10 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/roboflow_custom_metadata@v1", "RoboflowCustomMetadata"] predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, ] ) = Field( description="Reference data to extract property from", @@ -70,7 +69,7 @@ class BlockManifest(WorkflowBlockManifest): field_value: Union[ str, WorkflowParameterSelector(kind=[STRING_KIND]), - StepOutputSelector(kind=[BATCH_OF_STRING_KIND]), + StepOutputSelector(kind=[STRING_KIND]), ] = Field( description="This is the name of the metadata field you are creating", examples=["toronto", "pass", "fail"], diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index 14df54def..9c988a0b3 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -39,13 +39,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BOOLEAN_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_STRING_KIND, BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, ImageInputField, @@ -93,10 +91,10 @@ class BlockManifest(WorkflowBlockManifest): predictions: Optional[ StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, ] ) ] = Field( @@ -194,8 +192,8 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="error_status", kind=[BATCH_OF_BOOLEAN_KIND]), - OutputDefinition(name="message", kind=[BATCH_OF_STRING_KIND]), + OutputDefinition(name="error_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="message", kind=[STRING_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py index 1216a0a2e..5fd22b692 100644 --- a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py @@ -12,7 +12,7 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, INTEGER_KIND, ImageInputField, StepOutputImageSelector, @@ -76,7 +76,7 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="crops", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="crops", kind=[IMAGE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py index aa3d0174f..6ec9408d5 100644 --- a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py +++ b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py @@ -15,10 +15,10 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputSelector, WorkflowParameterSelector, ) @@ -53,9 +53,9 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/detection_offset@v1", "DetectionOffset"] predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to detection-like predictions", @@ -86,9 +86,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py index 3459123ab..2f4f79b0a 100644 --- a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py @@ -15,9 +15,9 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -45,9 +45,9 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/detections_filter@v1", "DetectionsFilter"] predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to detection-like predictions", @@ -73,9 +73,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ) ] diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 63e0a5284..46034bb73 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -21,9 +21,9 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -59,9 +59,9 @@ class BlockManifest(WorkflowBlockManifest): ] predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to detection-like predictions", @@ -87,9 +87,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ) ] diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index ab15a6521..bba42ec00 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -13,10 +13,10 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -57,9 +57,9 @@ class BlockManifest(WorkflowBlockManifest): ) predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( title="Regions of Interest", @@ -79,7 +79,7 @@ def get_output_dimensionality_offset(cls) -> int: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="crops", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="crops", kind=[IMAGE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py index 9f33a5267..6082ad54f 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py @@ -10,7 +10,7 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, StepOutputSelector, @@ -51,7 +51,7 @@ class DynamicZonesManifest(WorkflowBlockManifest): type: Literal[f"{TYPE}", "DynamicZone"] predictions: StepOutputSelector( kind=[ - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) = Field( # type: ignore description="", diff --git a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py index 9d4f5ad60..254aa35a2 100644 --- a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py +++ b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py @@ -14,8 +14,8 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INTEGER_KIND, StepOutputImageSelector, WorkflowImageSelector, @@ -104,7 +104,7 @@ def get_output_dimensionality_offset(cls) -> int: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="slices", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="slices", kind=[IMAGE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index 7d0df03e5..b06f7b8e6 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -16,12 +16,12 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, StepOutputImageSelector, StepOutputSelector, @@ -64,8 +64,8 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): predictions: Optional[ StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) ] = Field( # type: ignore @@ -109,14 +109,14 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_DETECTIONS_KEY, kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ], ), OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py index 0a4f9a206..3e052e051 100644 --- a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py @@ -12,8 +12,8 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, FloatZeroToOne, ImageInputField, StepOutputImageSelector, @@ -81,7 +81,7 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="crops", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="crops", kind=[IMAGE_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index e3a6b1a5a..1f443b722 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -9,11 +9,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -37,9 +37,9 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): ) predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( # type: ignore description="Predictions", @@ -63,7 +63,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name=OUTPUT_IMAGE_KEY, kind=[ - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, ], ), ] diff --git a/inference/core/workflows/core_steps/visualizations/halo/v1.py b/inference/core/workflows/core_steps/visualizations/halo/v1.py index 0dab79743..497153abb 100644 --- a/inference/core/workflows/core_steps/visualizations/halo/v1.py +++ b/inference/core/workflows/core_steps/visualizations/halo/v1.py @@ -12,8 +12,8 @@ ) from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, FloatZeroToOne, StepOutputSelector, @@ -45,7 +45,7 @@ class HaloManifest(ColorableVisualizationManifest): predictions: StepOutputSelector( kind=[ - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) = Field( # type: ignore description="Predictions", diff --git a/inference/core/workflows/core_steps/visualizations/mask/v1.py b/inference/core/workflows/core_steps/visualizations/mask/v1.py index caeb03bfd..5be3cee9d 100644 --- a/inference/core/workflows/core_steps/visualizations/mask/v1.py +++ b/inference/core/workflows/core_steps/visualizations/mask/v1.py @@ -12,8 +12,8 @@ ) from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, FloatZeroToOne, StepOutputSelector, WorkflowParameterSelector, @@ -44,7 +44,7 @@ class MaskManifest(ColorableVisualizationManifest): predictions: StepOutputSelector( kind=[ - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) = Field( # type: ignore description="Predictions", diff --git a/inference/core/workflows/core_steps/visualizations/polygon/v1.py b/inference/core/workflows/core_steps/visualizations/polygon/v1.py index ff04cf28a..28a9caca6 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon/v1.py @@ -12,7 +12,7 @@ ) from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, StepOutputSelector, WorkflowParameterSelector, @@ -43,7 +43,7 @@ class PolygonManifest(ColorableVisualizationManifest): predictions: StepOutputSelector( kind=[ - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) = Field( # type: ignore description="Predictions", diff --git a/inference/core/workflows/execution_engine/entities/base.py b/inference/core/workflows/execution_engine/entities/base.py index c506ccab7..8ef89f448 100644 --- a/inference/core/workflows/execution_engine/entities/base.py +++ b/inference/core/workflows/execution_engine/entities/base.py @@ -27,7 +27,7 @@ load_image_from_url, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, + IMAGE_KIND, VIDEO_METADATA_KIND, WILDCARD_KIND, Kind, @@ -64,7 +64,7 @@ def is_batch_oriented(cls) -> bool: class WorkflowImage(WorkflowInput): type: Literal["WorkflowImage", "InferenceImage"] name: str - kind: List[Kind] = Field(default=[BATCH_OF_IMAGES_KIND]) + kind: List[Kind] = Field(default=[IMAGE_KIND]) @classmethod def is_batch_oriented(cls) -> bool: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py index bcde16cf8..974e11d7a 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py @@ -15,9 +15,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -45,9 +45,9 @@ class BlockManifest(WorkflowBlockManifest): ) images_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " @@ -78,9 +78,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ), ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_non_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_non_batch.py index 6a76c73d8..980ae2faa 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_non_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_non_batch.py @@ -15,9 +15,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -45,9 +45,9 @@ class BlockManifest(WorkflowBlockManifest): ) image_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " @@ -74,9 +74,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ), ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_batch.py index 9146d4b40..27222bbfd 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_batch.py @@ -15,9 +15,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -45,9 +45,9 @@ class BlockManifest(WorkflowBlockManifest): ) images_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " @@ -78,9 +78,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ), ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_non_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_non_batch.py index ae1a75f59..ea32a62a1 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_non_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/stitch_detections_non_batch.py @@ -15,9 +15,9 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -45,9 +45,9 @@ class BlockManifest(WorkflowBlockManifest): ) image_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " @@ -74,9 +74,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition( name="predictions", kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ], ), ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py index d7b492532..eeb4d5da6 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py @@ -13,10 +13,10 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -44,9 +44,9 @@ class BlockManifest(WorkflowBlockManifest): ) crops_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " @@ -67,7 +67,7 @@ def get_output_dimensionality_offset( @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="visualisations", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="visualisations", kind=[IMAGE_KIND]), ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py index 1e275d306..cca572d1e 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py @@ -13,10 +13,10 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -44,9 +44,9 @@ class BlockManifest(WorkflowBlockManifest): ) crops_predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " @@ -63,7 +63,7 @@ def get_output_dimensionality_offset( @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="visualisations", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="visualisations", kind=[IMAGE_KIND]), ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_handling_video_metadata/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_handling_video_metadata/__init__.py index 8ab534823..60e6b4d04 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/plugin_handling_video_metadata/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_handling_video_metadata/__init__.py @@ -9,11 +9,11 @@ VideoMetadata, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, StepOutputSelector, WorkflowVideoMetadataSelector, ) @@ -57,9 +57,9 @@ class TrackerManifest(WorkflowBlockManifest): metadata: WorkflowVideoMetadataSelector predictions: StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, ] ) diff --git a/tests/workflows/unit_tests/execution_engine/compiler/plugin_with_test_blocks/blocks.py b/tests/workflows/unit_tests/execution_engine/compiler/plugin_with_test_blocks/blocks.py index 405f80881..85b8b9a13 100644 --- a/tests/workflows/unit_tests/execution_engine/compiler/plugin_with_test_blocks/blocks.py +++ b/tests/workflows/unit_tests/execution_engine/compiler/plugin_with_test_blocks/blocks.py @@ -4,10 +4,10 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BOOLEAN_KIND, - BATCH_OF_IMAGES_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_PREDICTION_TYPE_KIND, + BOOLEAN_KIND, + IMAGE_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + PREDICTION_TYPE_KIND, ROBOFLOW_MODEL_ID_KIND, ImageInputField, RoboflowModelField, @@ -38,11 +38,9 @@ class ExampleModelBlockManifest(WorkflowBlockManifest): @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ + OutputDefinition(name="prediction_type", kind=[PREDICTION_TYPE_KIND]), OutputDefinition( - name="prediction_type", kind=[BATCH_OF_PREDICTION_TYPE_KIND] - ), - OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] @@ -95,9 +93,7 @@ def run( class ExampleTransformationBlockManifest(WorkflowBlockManifest): type: Literal["ExampleTransformation"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - predictions: StepOutputSelector( - kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] - ) = Field( + predictions: StepOutputSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND]) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " "(detection must define RoI - eg: bounding box)", examples=["$steps.my_object_detection_model.predictions"], @@ -106,9 +102,9 @@ class ExampleTransformationBlockManifest(WorkflowBlockManifest): @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="image", kind=[BATCH_OF_IMAGES_KIND]), + OutputDefinition(name="image", kind=[IMAGE_KIND]), OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] @@ -129,9 +125,7 @@ def run( class ExampleSinkBlockManifest(WorkflowBlockManifest): type: Literal["ExampleSink"] image: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - predictions: StepOutputSelector( - kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] - ) = Field( + predictions: StepOutputSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND]) = Field( description="Reference to predictions of detection-like model, that can be based of cropping " "(detection must define RoI - eg: bounding box)", examples=["$steps.my_object_detection_model.predictions"], @@ -140,7 +134,7 @@ class ExampleSinkBlockManifest(WorkflowBlockManifest): @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="status", kind=[BATCH_OF_BOOLEAN_KIND]), + OutputDefinition(name="status", kind=[BOOLEAN_KIND]), ] @@ -159,19 +153,19 @@ def run( class ExampleFusionBlockManifest(WorkflowBlockManifest): type: Literal["ExampleFusion"] - predictions: List[ - StepOutputSelector(kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND]) - ] = Field( - description="Reference to predictions of detection-like model, that can be based of cropping " - "(detection must define RoI - eg: bounding box)", - examples=[["$steps.my_object_detection_model.predictions"]], + predictions: List[StepOutputSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = ( + Field( + description="Reference to predictions of detection-like model, that can be based of cropping " + "(detection must define RoI - eg: bounding box)", + examples=[["$steps.my_object_detection_model.predictions"]], + ) ) @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] @@ -191,19 +185,19 @@ def run( class ExampleBlockWithInitManifest(WorkflowBlockManifest): type: Literal["ExampleBlockWithInit"] - predictions: List[ - StepOutputSelector(kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND]) - ] = Field( - description="Reference to predictions of detection-like model, that can be based of cropping " - "(detection must define RoI - eg: bounding box)", - examples=[["$steps.my_object_detection_model.predictions"]], + predictions: List[StepOutputSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = ( + Field( + description="Reference to predictions of detection-like model, that can be based of cropping " + "(detection must define RoI - eg: bounding box)", + examples=[["$steps.my_object_detection_model.predictions"]], + ) ) @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] @@ -232,19 +226,19 @@ def run( class ExampleBlockWithFaultyInitManifest(WorkflowBlockManifest): type: Literal["ExampleBlockWithFaultyInit"] - predictions: List[ - StepOutputSelector(kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND]) - ] = Field( - description="Reference to predictions of detection-like model, that can be based of cropping " - "(detection must define RoI - eg: bounding box)", - examples=[["$steps.my_object_detection_model.predictions"]], + predictions: List[StepOutputSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = ( + Field( + description="Reference to predictions of detection-like model, that can be based of cropping " + "(detection must define RoI - eg: bounding box)", + examples=[["$steps.my_object_detection_model.predictions"]], + ) ) @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( - name="predictions", kind=[BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] ), ] diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py index c84860317..399860ccd 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py @@ -218,7 +218,7 @@ def test_describe_available_blocks_when_valid_plugins_are_loaded( assert result.blocks[0].manifest_class == plugin_with_valid_blocks.Block1Manifest assert result.blocks[1].block_class == plugin_with_valid_blocks.Block2 assert result.blocks[1].manifest_class == plugin_with_valid_blocks.Block2Manifest - assert len(result.declared_kinds) == 29 + assert len(result.declared_kinds) == 31 @mock.patch.object(blocks_loader, "load_workflow_blocks") @@ -259,7 +259,7 @@ def test_describe_available_blocks_when_valid_plugins_are_loaded_and_multiple_ve result.blocks[2].manifest_class == plugin_with_multiple_versions_of_blocks.Block2Manifest ) - assert len(result.declared_kinds) == 29 + assert len(result.declared_kinds) == 31 @mock.patch.object(blocks_loader, "load_workflow_blocks") diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py index 00b2ed9cf..72cf75818 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py @@ -4,10 +4,9 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BOOLEAN_KIND, - BATCH_OF_IMAGES_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, BOOLEAN_KIND, + IMAGE_KIND, + OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, StepOutputImageSelector, StepOutputSelector, @@ -257,7 +256,7 @@ class Manifest(WorkflowBlockManifest): ) step_output_image: StepOutputImageSelector step_output_property: StepOutputSelector( - kind=[BATCH_OF_BOOLEAN_KIND, BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND] + kind=[BOOLEAN_KIND, OBJECT_DETECTION_PREDICTION_KIND] ) step: StepSelector @@ -283,7 +282,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[BATCH_OF_IMAGES_KIND] + selected_element="workflow_image", kind=[IMAGE_KIND] ) ], is_list_element=False, @@ -310,7 +309,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="step_output", kind=[BATCH_OF_IMAGES_KIND] + selected_element="step_output", kind=[IMAGE_KIND] ) ], is_list_element=False, @@ -325,8 +324,8 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="step_output", kind=[ - BATCH_OF_BOOLEAN_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, + BOOLEAN_KIND, + OBJECT_DETECTION_PREDICTION_KIND, ], ) ], @@ -386,10 +385,10 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[BATCH_OF_IMAGES_KIND] + selected_element="workflow_image", kind=[IMAGE_KIND] ), ReferenceDefinition( - selected_element="step_output", kind=[BATCH_OF_IMAGES_KIND] + selected_element="step_output", kind=[IMAGE_KIND] ), # nested list is ignored ], @@ -441,10 +440,10 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[BATCH_OF_IMAGES_KIND] + selected_element="workflow_image", kind=[IMAGE_KIND] ), ReferenceDefinition( - selected_element="step_output", kind=[BATCH_OF_IMAGES_KIND] + selected_element="step_output", kind=[IMAGE_KIND] ), # nested list is ignored ], @@ -496,10 +495,10 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[BATCH_OF_IMAGES_KIND] + selected_element="workflow_image", kind=[IMAGE_KIND] ), ReferenceDefinition( - selected_element="step_output", kind=[BATCH_OF_IMAGES_KIND] + selected_element="step_output", kind=[IMAGE_KIND] ), # nested list is ignored ], diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py index 9d0ed1e87..595cf8d96 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py @@ -4,8 +4,8 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, BOOLEAN_KIND, + IMAGE_KIND, STRING_KIND, StepOutputSelector, WorkflowImageSelector, @@ -79,7 +79,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[BATCH_OF_IMAGES_KIND] + selected_element="workflow_image", kind=[IMAGE_KIND] ) ], is_list_element=False, From 4d42949b84565ead4646f5680da66576fd25a1dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 5 Sep 2024 14:44:48 +0200 Subject: [PATCH 3/4] Get rid of all old kinds --- .../classical_cv/camera_focus/v1.py | 6 ++-- .../models/foundation/clip_comparison/v2.py | 11 ++++---- .../sinks/roboflow/dataset_upload/v2.py | 21 ++++++-------- .../execution_engine/entities/types.py | 18 ------------ .../test_workflow_endpoints.py | 28 +++++++++---------- 5 files changed, 29 insertions(+), 55 deletions(-) diff --git a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py index 7c31bc30a..6a8697a24 100644 --- a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py @@ -12,10 +12,10 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_IMAGES_KIND, FLOAT_KIND, StepOutputImageSelector, WorkflowImageSelector, + IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -57,9 +57,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: return [ OutputDefinition( name=OUTPUT_IMAGE_KEY, - kind=[ - BATCH_OF_IMAGES_KIND, - ], + kind=[IMAGE_KIND], ), OutputDefinition( name="focus_measure", diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index 0ba9772d3..a3e2b90e5 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -27,15 +27,14 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_PARENT_ID_KIND, + PARENT_ID_KIND, FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, STRING_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, + WorkflowParameterSelector, CLASSIFICATION_PREDICTION_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -110,10 +109,10 @@ def describe_outputs(cls) -> List[OutputDefinition]: OutputDefinition(name="least_similar_class", kind=[STRING_KIND]), OutputDefinition( name="classification_predictions", - kind=[BATCH_OF_CLASSIFICATION_PREDICTION_KIND], + kind=[CLASSIFICATION_PREDICTION_KIND], ), - OutputDefinition(name="parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), - OutputDefinition(name="root_parent_id", kind=[BATCH_OF_PARENT_ID_KIND]), + OutputDefinition(name="parent_id", kind=[PARENT_ID_KIND]), + OutputDefinition(name="root_parent_id", kind=[PARENT_ID_KIND]), ] @classmethod diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index 0d03667ab..1051c837b 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -17,12 +17,6 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_BOOLEAN_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_STRING_KIND, BOOLEAN_KIND, FLOAT_KIND, ROBOFLOW_PROJECT_KIND, @@ -31,7 +25,8 @@ StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, - WorkflowParameterSelector, + WorkflowParameterSelector, OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, CLASSIFICATION_PREDICTION_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -74,10 +69,10 @@ class BlockManifest(WorkflowBlockManifest): predictions: Optional[ StepOutputSelector( kind=[ - BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND, - BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND, - BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND, - BATCH_OF_CLASSIFICATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + CLASSIFICATION_PREDICTION_KIND, ] ) ] = Field( @@ -183,8 +178,8 @@ def accepts_batch_input(cls) -> bool: @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ - OutputDefinition(name="error_status", kind=[BATCH_OF_BOOLEAN_KIND]), - OutputDefinition(name="message", kind=[BATCH_OF_STRING_KIND]), + OutputDefinition(name="error_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="message", kind=[STRING_KIND]), ] @classmethod diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index c4fb696a9..84e58e074 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -632,21 +632,3 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): ), ] - -# DEPRECATED KINDS - do not use to create new blocks! -BATCH_OF_IMAGES_KIND = IMAGE_KIND -BATCH_OF_SERIALISED_PAYLOADS_KIND = SERIALISED_PAYLOADS_KIND -BATCH_OF_BOOLEAN_KIND = BOOLEAN_KIND -BATCH_OF_STRING_KIND = STRING_KIND -BATCH_OF_INTEGER_KIND = INTEGER_KIND -BATCH_OF_TOP_CLASS_KIND = TOP_CLASS_KIND -BATCH_OF_DICTIONARY_KIND = DICTIONARY_KIND -BATCH_OF_CLASSIFICATION_PREDICTION_KIND = CLASSIFICATION_PREDICTION_KIND -BATCH_OF_OBJECT_DETECTION_PREDICTION_KIND = OBJECT_DETECTION_PREDICTION_KIND -BATCH_OF_INSTANCE_SEGMENTATION_PREDICTION_KIND = INSTANCE_SEGMENTATION_PREDICTION_KIND -BATCH_OF_KEYPOINT_DETECTION_PREDICTION_KIND = KEYPOINT_DETECTION_PREDICTION_KIND -BATCH_OF_QR_CODE_DETECTION_KIND = QR_CODE_DETECTION_KIND -BATCH_OF_BAR_CODE_DETECTION_KIND = BAR_CODE_DETECTION_KIND -BATCH_OF_PREDICTION_TYPE_KIND = PREDICTION_TYPE_KIND -BATCH_OF_PARENT_ID_KIND = PARENT_ID_KIND -BATCH_OF_IMAGE_METADATA_KIND = IMAGE_METADATA_KIND diff --git a/tests/inference/integration_tests/test_workflow_endpoints.py b/tests/inference/integration_tests/test_workflow_endpoints.py index fa95c1924..83f415a98 100644 --- a/tests/inference/integration_tests/test_workflow_endpoints.py +++ b/tests/inference/integration_tests/test_workflow_endpoints.py @@ -150,9 +150,9 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] "selector_types": ["step_output"], "selector_data_kind": { "step_output": [ - "Batch[object_detection_prediction]", - "Batch[instance_segmentation_prediction]", - "Batch[keypoint_detection_prediction]", + "object_detection_prediction", + "instance_segmentation_prediction", + "keypoint_detection_prediction", ] }, }, @@ -167,9 +167,9 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] "associated_detections": { "type": "DynamicOutputDefinition", "kind": [ - "Batch[object_detection_prediction]", - "Batch[instance_segmentation_prediction]", - "Batch[keypoint_detection_prediction]", + "object_detection_prediction", + "instance_segmentation_prediction", + "keypoint_detection_prediction", ], } }, @@ -218,7 +218,7 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] types_compatible_with_object_detection_predictions = { e["manifest_type_identifier"] for e in response_data["kinds_connections"][ - "Batch[object_detection_prediction]" + "object_detection_prediction" ] } assert ( @@ -248,9 +248,9 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] "is_dimensionality_reference": True, "selector_data_kind": { "step_output": [ - "Batch[object_detection_prediction]", - "Batch[instance_segmentation_prediction]", - "Batch[keypoint_detection_prediction]", + "object_detection_prediction", + "instance_segmentation_prediction", + "keypoint_detection_prediction", ] }, }, @@ -265,9 +265,9 @@ def my_function(self, prediction: sv.Detections, crops: Batch[WorkflowImageData] "associated_detections": { "type": "DynamicOutputDefinition", "kind": [ - "Batch[object_detection_prediction]", - "Batch[instance_segmentation_prediction]", - "Batch[keypoint_detection_prediction]", + "object_detection_prediction", + "instance_segmentation_prediction", + "keypoint_detection_prediction", ], } }, @@ -432,7 +432,7 @@ def infer(self, image: WorkflowImageData) -> BlockResult: "predictions": { "type": "DynamicOutputDefinition", "kind": [ - "Batch[object_detection_prediction]", + "object_detection_prediction", ], } }, From 1e40d34a25e9d83a2cb9a5cc5b865515f5c26cfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 5 Sep 2024 14:45:05 +0200 Subject: [PATCH 4/4] Make linters happy --- .../classical_cv/camera_focus/v1.py | 2 +- .../models/foundation/clip_comparison/v2.py | 5 ++- .../sinks/roboflow/dataset_upload/v2.py | 7 +++- .../execution_engine/entities/types.py | 1 - .../core/utils/test_sqlite_wrapper.py | 38 +++++++++++-------- 5 files changed, 32 insertions(+), 21 deletions(-) diff --git a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py index 6a8697a24..21bcc885f 100644 --- a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py @@ -13,9 +13,9 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, + IMAGE_KIND, StepOutputImageSelector, WorkflowImageSelector, - IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index a3e2b90e5..165b020cc 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -27,14 +27,15 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - PARENT_ID_KIND, + CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, + PARENT_ID_KIND, STRING_KIND, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, CLASSIFICATION_PREDICTION_KIND, + WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index 1051c837b..ee59d2a7f 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -18,15 +18,18 @@ ) from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, FLOAT_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, ImageInputField, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, - WorkflowParameterSelector, OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, - KEYPOINT_DETECTION_PREDICTION_KIND, CLASSIFICATION_PREDICTION_KIND, + WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index 84e58e074..bbca55105 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -631,4 +631,3 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): } ), ] - diff --git a/tests/inference/unit_tests/core/utils/test_sqlite_wrapper.py b/tests/inference/unit_tests/core/utils/test_sqlite_wrapper.py index 8babad383..f380a5476 100644 --- a/tests/inference/unit_tests/core/utils/test_sqlite_wrapper.py +++ b/tests/inference/unit_tests/core/utils/test_sqlite_wrapper.py @@ -65,7 +65,9 @@ def test_select_no_limit(): def test_select_with_exclusive(): # given conn = sqlite3.connect(":memory:") - q = SQLiteWrapper(db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn) + q = SQLiteWrapper( + db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn + ) # when q.insert(values={"col1": "lorem"}, connection=conn) @@ -73,10 +75,7 @@ def test_select_with_exclusive(): rows = q.select(connection=conn, with_exclusive=True) # then - assert rows == [ - {"id": 1, "col1": "lorem"}, - {"id": 2, "col1": "ipsum"} - ] + assert rows == [{"id": 1, "col1": "lorem"}, {"id": 2, "col1": "ipsum"}] conn.close() @@ -84,7 +83,9 @@ def test_select_from_cursor(): # given conn = sqlite3.connect(":memory:") curr = conn.cursor() - q = SQLiteWrapper(db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn) + q = SQLiteWrapper( + db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn + ) # when q.insert(values={"col1": "lorem"}, connection=conn) @@ -92,10 +93,7 @@ def test_select_from_cursor(): rows = q.select(cursor=curr) # then - assert rows == [ - {"id": 1, "col1": "lorem"}, - {"id": 2, "col1": "ipsum"} - ] + assert rows == [{"id": 1, "col1": "lorem"}, {"id": 2, "col1": "ipsum"}] conn.close() @@ -159,7 +157,9 @@ def test_flush_limit(): def test_delete(): # given conn = sqlite3.connect(":memory:") - q = SQLiteWrapper(db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn) + q = SQLiteWrapper( + db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn + ) # when q.insert(values={"col1": "lorem"}, connection=conn) @@ -179,7 +179,9 @@ def test_delete(): def test_delete_non_existent(): # given conn = sqlite3.connect(":memory:") - q = SQLiteWrapper(db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn) + q = SQLiteWrapper( + db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn + ) # when q.insert(values={"col1": "lorem"}, connection=conn) @@ -198,7 +200,9 @@ def test_delete_non_existent(): def test_delete_with_exclusive(): # given conn = sqlite3.connect(":memory:") - q = SQLiteWrapper(db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn) + q = SQLiteWrapper( + db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn + ) # when q.insert(values={"col1": "lorem"}, connection=conn) @@ -207,7 +211,9 @@ def test_delete_with_exclusive(): rows = q.select(connection=conn) rows_to_be_deleted = rows[:-1] rows_to_be_kept = rows[-1:] - deleted_rows = q.delete(connection=conn, rows=rows_to_be_deleted, with_exclusive=True) + deleted_rows = q.delete( + connection=conn, rows=rows_to_be_deleted, with_exclusive=True + ) # then assert deleted_rows == rows_to_be_deleted @@ -219,7 +225,9 @@ def test_delete_from_cursor(): # given conn = sqlite3.connect(":memory:") curr = conn.cursor() - q = SQLiteWrapper(db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn) + q = SQLiteWrapper( + db_file_path="", table_name="test", columns={"col1": "TEXT"}, connection=conn + ) # when q.insert(values={"col1": "lorem"}, connection=conn)