From 25f16ef757e7874b925b426ecf5d1130206d3c2c Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Tue, 28 Jan 2025 23:57:10 -0800 Subject: [PATCH 01/27] change: Allow telemetry only in supported regions --- src/sagemaker/telemetry/constants.py | 35 ++++++++++++++++++ src/sagemaker/telemetry/telemetry_logging.py | 16 +++++++-- .../telemetry/test_telemetry_logging.py | 36 +++++++++++++++++++ 3 files changed, 85 insertions(+), 2 deletions(-) diff --git a/src/sagemaker/telemetry/constants.py b/src/sagemaker/telemetry/constants.py index 2108ff9fd6..28bb758b05 100644 --- a/src/sagemaker/telemetry/constants.py +++ b/src/sagemaker/telemetry/constants.py @@ -42,3 +42,38 @@ class Status(Enum): def __str__(self): # pylint: disable=E0307 """Return the status name.""" return self.name + + +class Region(str, Enum): + # Classic + US_EAST_1 = "us-east-1" # IAD + US_EAST_2 = "us-east-2" # CMH + US_WEST_1 = "us-west-1" # SFO + US_WEST_2 = "us-west-2" # PDX + AP_NORTHEAST_1 = "ap-northeast-1" # NRT + AP_NORTHEAST_2 = "ap-northeast-2" # ICN + AP_NORTHEAST_3 = "ap-northeast-3" # KIX + AP_SOUTH_1 = "ap-south-1" # BOM + AP_SOUTHEAST_1 = "ap-southeast-1" # SIN + AP_SOUTHEAST_2 = "ap-southeast-2" # SYD + CA_CENTRAL_1 = "ca-central-1" # YUL + EU_CENTRAL_1 = "eu-central-1" # FRA + EU_NORTH_1 = "eu-north-1" # ARN + EU_WEST_1 = "eu-west-1" # DUB + EU_WEST_2 = "eu-west-2" # LHR + EU_WEST_3 = "eu-west-3" # CDG + SA_EAST_1 = "sa-east-1" # GRU + # Opt-in + AP_EAST_1 = "ap-east-1" # HKG + AP_SOUTHEAST_3 = "ap-southeast-3" # CGK + AF_SOUTH_1 = "af-south-1" # CPT + EU_SOUTH_1 = "eu-south-1" # MXP + ME_SOUTH_1 = "me-south-1" # BAH + MX_CENTRAL_1 = "mx-central-1" # QRO + AP_SOUTHEAST_7 = "ap-southeast-7" # BKK + AP_SOUTH_2 = "ap-south-2" # HYD + AP_SOUTHEAST_4 = "ap-southeast-4" # MEL + EU_CENTRAL_2 = "eu-central-2" # ZRH + EU_SOUTH_2 = "eu-south-2" # ZAZ + IL_CENTRAL_1 = "il-central-1" # TLV + ME_CENTRAL_1 = "me-central-1" # DXB diff --git a/src/sagemaker/telemetry/telemetry_logging.py b/src/sagemaker/telemetry/telemetry_logging.py index b45550b2c2..55c0e205d9 100644 --- a/src/sagemaker/telemetry/telemetry_logging.py +++ b/src/sagemaker/telemetry/telemetry_logging.py @@ -27,6 +27,7 @@ from sagemaker.telemetry.constants import ( Feature, Status, + Region, DEFAULT_AWS_REGION, ) from sagemaker.user_agent import SDK_VERSION, process_studio_metadata_file @@ -189,8 +190,19 @@ def _send_telemetry_request( """Make GET request to an empty object in S3 bucket""" try: accountId = _get_accountId(session) if session else "NotAvailable" - # telemetry will be sent to us-west-2 if no session availale - region = _get_region_or_default(session) if session else DEFAULT_AWS_REGION + + # Validate region if session exists + if session: + region = _get_region_or_default(session) + try: + Region(region) + except ValueError: + logger.debug( + "Region not found in supported regions. Telemetry request will not be emitted." + ) + return + else: # telemetry will be sent to us-west-2 if no session available + region = DEFAULT_AWS_REGION url = _construct_url( accountId, region, diff --git a/tests/unit/sagemaker/telemetry/test_telemetry_logging.py b/tests/unit/sagemaker/telemetry/test_telemetry_logging.py index 9107256b5b..bd8db82a16 100644 --- a/tests/unit/sagemaker/telemetry/test_telemetry_logging.py +++ b/tests/unit/sagemaker/telemetry/test_telemetry_logging.py @@ -300,3 +300,39 @@ def test_get_default_sagemaker_session_with_no_region(self): assert "Must setup local AWS configuration with a region supported by SageMaker." in str( context.exception ) + + @patch("sagemaker.telemetry.telemetry_logging._get_accountId") + @patch("sagemaker.telemetry.telemetry_logging._get_region_or_default") + def test_send_telemetry_request_valid_region(self, mock_get_region, mock_get_accountId): + """Test to verify telemetry request is sent when region is valid""" + mock_get_accountId.return_value = "testAccountId" + mock_session = MagicMock() + + # Test with valid region + mock_get_region.return_value = "us-east-1" + with patch( + "sagemaker.telemetry.telemetry_logging._requests_helper" + ) as mock_requests_helper: + _send_telemetry_request(1, [1, 2], mock_session) + # Assert telemetry request was sent + mock_requests_helper.assert_called_once_with( + "https://sm-pysdk-t-us-east-1.s3.us-east-1.amazonaws.com/telemetry?" + "x-accountId=testAccountId&x-status=1&x-feature=1,2", + 2, + ) + + @patch("sagemaker.telemetry.telemetry_logging._get_accountId") + @patch("sagemaker.telemetry.telemetry_logging._get_region_or_default") + def test_send_telemetry_request_invalid_region(self, mock_get_region, mock_get_accountId): + """Test to verify telemetry request is not sent when region is invalid""" + mock_get_accountId.return_value = "testAccountId" + mock_session = MagicMock() + + # Test with invalid region + mock_get_region.return_value = "invalid-region" + with patch( + "sagemaker.telemetry.telemetry_logging._requests_helper" + ) as mock_requests_helper: + _send_telemetry_request(1, [1, 2], mock_session) + # Assert telemetry request was not sent + mock_requests_helper.assert_not_called() From 0ed85d67e6cfd8bfe39529a12aaa5dfe43785835 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Tue, 28 Jan 2025 23:57:10 -0800 Subject: [PATCH 02/27] change: Allow telemetry only in supported regions --- src/sagemaker/telemetry/constants.py | 37 ++++++++++++++++++++ src/sagemaker/telemetry/telemetry_logging.py | 2 +- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/src/sagemaker/telemetry/constants.py b/src/sagemaker/telemetry/constants.py index 28bb758b05..a18a4a4a0f 100644 --- a/src/sagemaker/telemetry/constants.py +++ b/src/sagemaker/telemetry/constants.py @@ -44,6 +44,43 @@ def __str__(self): # pylint: disable=E0307 return self.name +class Region(str, Enum): + """Telemetry: List of all supported AWS regions.""" + + # Classic + US_EAST_1 = "us-east-1" # IAD + US_EAST_2 = "us-east-2" # CMH + US_WEST_1 = "us-west-1" # SFO + US_WEST_2 = "us-west-2" # PDX + AP_NORTHEAST_1 = "ap-northeast-1" # NRT + AP_NORTHEAST_2 = "ap-northeast-2" # ICN + AP_NORTHEAST_3 = "ap-northeast-3" # KIX + AP_SOUTH_1 = "ap-south-1" # BOM + AP_SOUTHEAST_1 = "ap-southeast-1" # SIN + AP_SOUTHEAST_2 = "ap-southeast-2" # SYD + CA_CENTRAL_1 = "ca-central-1" # YUL + EU_CENTRAL_1 = "eu-central-1" # FRA + EU_NORTH_1 = "eu-north-1" # ARN + EU_WEST_1 = "eu-west-1" # DUB + EU_WEST_2 = "eu-west-2" # LHR + EU_WEST_3 = "eu-west-3" # CDG + SA_EAST_1 = "sa-east-1" # GRU + # Opt-in + AP_EAST_1 = "ap-east-1" # HKG + AP_SOUTHEAST_3 = "ap-southeast-3" # CGK + AF_SOUTH_1 = "af-south-1" # CPT + EU_SOUTH_1 = "eu-south-1" # MXP + ME_SOUTH_1 = "me-south-1" # BAH + MX_CENTRAL_1 = "mx-central-1" # QRO + AP_SOUTHEAST_7 = "ap-southeast-7" # BKK + AP_SOUTH_2 = "ap-south-2" # HYD + AP_SOUTHEAST_4 = "ap-southeast-4" # MEL + EU_CENTRAL_2 = "eu-central-2" # ZRH + EU_SOUTH_2 = "eu-south-2" # ZAZ + IL_CENTRAL_1 = "il-central-1" # TLV + ME_CENTRAL_1 = "me-central-1" # DXB + + class Region(str, Enum): # Classic US_EAST_1 = "us-east-1" # IAD diff --git a/src/sagemaker/telemetry/telemetry_logging.py b/src/sagemaker/telemetry/telemetry_logging.py index 55c0e205d9..887a574ca1 100644 --- a/src/sagemaker/telemetry/telemetry_logging.py +++ b/src/sagemaker/telemetry/telemetry_logging.py @@ -197,7 +197,7 @@ def _send_telemetry_request( try: Region(region) except ValueError: - logger.debug( + logger.warning( "Region not found in supported regions. Telemetry request will not be emitted." ) return From b69ffcb952948626166c35ec4264d6fff8a2ce17 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Tue, 28 Jan 2025 23:57:10 -0800 Subject: [PATCH 03/27] change: Allow telemetry only in supported regions --- src/sagemaker/telemetry/constants.py | 34 ---------------------------- 1 file changed, 34 deletions(-) diff --git a/src/sagemaker/telemetry/constants.py b/src/sagemaker/telemetry/constants.py index a18a4a4a0f..d6f19dc3d2 100644 --- a/src/sagemaker/telemetry/constants.py +++ b/src/sagemaker/telemetry/constants.py @@ -80,37 +80,3 @@ class Region(str, Enum): IL_CENTRAL_1 = "il-central-1" # TLV ME_CENTRAL_1 = "me-central-1" # DXB - -class Region(str, Enum): - # Classic - US_EAST_1 = "us-east-1" # IAD - US_EAST_2 = "us-east-2" # CMH - US_WEST_1 = "us-west-1" # SFO - US_WEST_2 = "us-west-2" # PDX - AP_NORTHEAST_1 = "ap-northeast-1" # NRT - AP_NORTHEAST_2 = "ap-northeast-2" # ICN - AP_NORTHEAST_3 = "ap-northeast-3" # KIX - AP_SOUTH_1 = "ap-south-1" # BOM - AP_SOUTHEAST_1 = "ap-southeast-1" # SIN - AP_SOUTHEAST_2 = "ap-southeast-2" # SYD - CA_CENTRAL_1 = "ca-central-1" # YUL - EU_CENTRAL_1 = "eu-central-1" # FRA - EU_NORTH_1 = "eu-north-1" # ARN - EU_WEST_1 = "eu-west-1" # DUB - EU_WEST_2 = "eu-west-2" # LHR - EU_WEST_3 = "eu-west-3" # CDG - SA_EAST_1 = "sa-east-1" # GRU - # Opt-in - AP_EAST_1 = "ap-east-1" # HKG - AP_SOUTHEAST_3 = "ap-southeast-3" # CGK - AF_SOUTH_1 = "af-south-1" # CPT - EU_SOUTH_1 = "eu-south-1" # MXP - ME_SOUTH_1 = "me-south-1" # BAH - MX_CENTRAL_1 = "mx-central-1" # QRO - AP_SOUTHEAST_7 = "ap-southeast-7" # BKK - AP_SOUTH_2 = "ap-south-2" # HYD - AP_SOUTHEAST_4 = "ap-southeast-4" # MEL - EU_CENTRAL_2 = "eu-central-2" # ZRH - EU_SOUTH_2 = "eu-south-2" # ZAZ - IL_CENTRAL_1 = "il-central-1" # TLV - ME_CENTRAL_1 = "me-central-1" # DXB From 8d7f4a8e3c1645b4d892479cbcbd723951e77081 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 29 Jan 2025 13:03:28 -0800 Subject: [PATCH 04/27] change: Allow telemetry only in supported regions --- src/sagemaker/telemetry/constants.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/sagemaker/telemetry/constants.py b/src/sagemaker/telemetry/constants.py index d6f19dc3d2..cb83a78279 100644 --- a/src/sagemaker/telemetry/constants.py +++ b/src/sagemaker/telemetry/constants.py @@ -79,4 +79,3 @@ class Region(str, Enum): EU_SOUTH_2 = "eu-south-2" # ZAZ IL_CENTRAL_1 = "il-central-1" # TLV ME_CENTRAL_1 = "me-central-1" # DXB - From dadbb220d42205b4658dde8e09861a4b72389507 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 30 Jan 2025 12:18:59 -0800 Subject: [PATCH 05/27] change: Allow telemetry only in supported regions --- src/sagemaker/telemetry/telemetry_logging.py | 22 +++++++++----------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/sagemaker/telemetry/telemetry_logging.py b/src/sagemaker/telemetry/telemetry_logging.py index 887a574ca1..b0ecedee4c 100644 --- a/src/sagemaker/telemetry/telemetry_logging.py +++ b/src/sagemaker/telemetry/telemetry_logging.py @@ -190,19 +190,16 @@ def _send_telemetry_request( """Make GET request to an empty object in S3 bucket""" try: accountId = _get_accountId(session) if session else "NotAvailable" + region = _get_region_or_default(session) + + try: + Region(region) # Validate the region + except ValueError: + logger.warning( + "Region not found in supported regions. Telemetry request will not be emitted." + ) + return - # Validate region if session exists - if session: - region = _get_region_or_default(session) - try: - Region(region) - except ValueError: - logger.warning( - "Region not found in supported regions. Telemetry request will not be emitted." - ) - return - else: # telemetry will be sent to us-west-2 if no session available - region = DEFAULT_AWS_REGION url = _construct_url( accountId, region, @@ -280,6 +277,7 @@ def _get_region_or_default(session): def _get_default_sagemaker_session(): """Return the default sagemaker session""" + boto_session = boto3.Session(region_name=DEFAULT_AWS_REGION) sagemaker_session = Session(boto_session=boto_session) From 7775c635e26ba11045fe1dccb020b44c86c6cf83 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Mon, 24 Feb 2025 09:34:28 -0800 Subject: [PATCH 06/27] documentation: Removed a line about python version requirements of training script which can misguide users.Training script can be of latest version based on the support provided by framework_version of the container --- doc/frameworks/pytorch/using_pytorch.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/frameworks/pytorch/using_pytorch.rst b/doc/frameworks/pytorch/using_pytorch.rst index c50376920e..4141dd84db 100644 --- a/doc/frameworks/pytorch/using_pytorch.rst +++ b/doc/frameworks/pytorch/using_pytorch.rst @@ -28,8 +28,6 @@ To train a PyTorch model by using the SageMaker Python SDK: Prepare a PyTorch Training Script ================================= -Your PyTorch training script must be a Python 3.6 compatible source file. - Prepare your script in a separate source file than the notebook, terminal session, or source file you're using to submit the script to SageMaker via a ``PyTorch`` Estimator. This will be discussed in further detail below. From 58f8746ea537cf5918ead6515944657b4ce22356 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 12 Mar 2025 08:49:04 -0700 Subject: [PATCH 07/27] feature: Enabled update_endpoint through model_builder --- src/sagemaker/huggingface/model.py | 5 + src/sagemaker/model.py | 53 +++++-- src/sagemaker/serve/builder/model_builder.py | 15 +- src/sagemaker/session.py | 36 +++++ src/sagemaker/tensorflow/model.py | 2 + tests/unit/sagemaker/model/test_deploy.py | 140 ++++++++++++++++++ .../serve/builder/test_model_builder.py | 87 ++++++++++- 7 files changed, 320 insertions(+), 18 deletions(-) diff --git a/src/sagemaker/huggingface/model.py b/src/sagemaker/huggingface/model.py index 05b981d21b..f4b44fc057 100644 --- a/src/sagemaker/huggingface/model.py +++ b/src/sagemaker/huggingface/model.py @@ -218,6 +218,7 @@ def deploy( container_startup_health_check_timeout=None, inference_recommendation_id=None, explainer_config=None, + update_endpoint: Optional[bool] = False, **kwargs, ): """Deploy this ``Model`` to an ``Endpoint`` and optionally return a ``Predictor``. @@ -296,6 +297,9 @@ def deploy( would like to deploy the model and endpoint with recommended parameters. explainer_config (sagemaker.explainer.ExplainerConfig): Specifies online explainability configuration for use with Amazon SageMaker Clarify. (default: None) + update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. + If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources + corresponding to the previous EndpointConfig. Default: False Raises: ValueError: If arguments combination check failed in these circumstances: - If no role is specified or @@ -335,6 +339,7 @@ def deploy( container_startup_health_check_timeout=container_startup_health_check_timeout, inference_recommendation_id=inference_recommendation_id, explainer_config=explainer_config, + update_endpoint=update_endpoint, **kwargs, ) diff --git a/src/sagemaker/model.py b/src/sagemaker/model.py index e5ea1ea314..24be862ecc 100644 --- a/src/sagemaker/model.py +++ b/src/sagemaker/model.py @@ -53,7 +53,6 @@ from sagemaker.model_card.schema_constraints import ModelApprovalStatusEnum from sagemaker.session import Session from sagemaker.model_metrics import ModelMetrics -from sagemaker.deprecations import removed_kwargs from sagemaker.drift_check_baselines import DriftCheckBaselines from sagemaker.explainer import ExplainerConfig from sagemaker.metadata_properties import MetadataProperties @@ -1386,6 +1385,7 @@ def deploy( routing_config: Optional[Dict[str, Any]] = None, model_reference_arn: Optional[str] = None, inference_ami_version: Optional[str] = None, + update_endpoint: Optional[bool] = False, **kwargs, ): """Deploy this ``Model`` to an ``Endpoint`` and optionally return a ``Predictor``. @@ -1497,6 +1497,10 @@ def deploy( inference_ami_version (Optional [str]): Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. For a full list of options, see: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ProductionVariant.html + update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. + If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources + corresponding to the previous EndpointConfig. Default: False + Note: Currently this is supported for single model endpoints Raises: ValueError: If arguments combination check failed in these circumstances: - If no role is specified or @@ -1512,8 +1516,6 @@ def deploy( """ self.accept_eula = accept_eula - removed_kwargs("update_endpoint", kwargs) - self._init_sagemaker_session_if_does_not_exist(instance_type) # Depending on the instance type, a local session (or) a session is initialized. self.role = resolve_value_from_config( @@ -1628,6 +1630,8 @@ def deploy( # Support multiple models on same endpoint if endpoint_type == EndpointType.INFERENCE_COMPONENT_BASED: + if update_endpoint: + raise ValueError("Currently update_endpoint is supported for single model endpoints") if endpoint_name: self.endpoint_name = endpoint_name else: @@ -1783,17 +1787,38 @@ def deploy( if is_explainer_enabled: explainer_config_dict = explainer_config._to_request_dict() - self.sagemaker_session.endpoint_from_production_variants( - name=self.endpoint_name, - production_variants=[production_variant], - tags=tags, - kms_key=kms_key, - wait=wait, - data_capture_config_dict=data_capture_config_dict, - explainer_config_dict=explainer_config_dict, - async_inference_config_dict=async_inference_config_dict, - live_logging=endpoint_logging, - ) + if update_endpoint: + endpoint_config_name = self.sagemaker_session.create_endpoint_config( + name=self.name, + model_name=self.name, + initial_instance_count=initial_instance_count, + instance_type=instance_type, + accelerator_type=accelerator_type, + tags=tags, + kms_key=kms_key, + data_capture_config_dict=data_capture_config_dict, + volume_size=volume_size, + model_data_download_timeout=model_data_download_timeout, + container_startup_health_check_timeout=container_startup_health_check_timeout, + explainer_config_dict=explainer_config_dict, + async_inference_config_dict=async_inference_config_dict, + serverless_inference_config=serverless_inference_config_dict, + routing_config=routing_config, + inference_ami_version=inference_ami_version, + ) + self.sagemaker_session.update_endpoint(self.endpoint_name, endpoint_config_name) + else: + self.sagemaker_session.endpoint_from_production_variants( + name=self.endpoint_name, + production_variants=[production_variant], + tags=tags, + kms_key=kms_key, + wait=wait, + data_capture_config_dict=data_capture_config_dict, + explainer_config_dict=explainer_config_dict, + async_inference_config_dict=async_inference_config_dict, + live_logging=endpoint_logging, + ) if self.predictor_cls: predictor = self.predictor_cls(self.endpoint_name, self.sagemaker_session) diff --git a/src/sagemaker/serve/builder/model_builder.py b/src/sagemaker/serve/builder/model_builder.py index a7a518105c..1a4746a6db 100644 --- a/src/sagemaker/serve/builder/model_builder.py +++ b/src/sagemaker/serve/builder/model_builder.py @@ -1602,6 +1602,7 @@ def deploy( ResourceRequirements, ] ] = None, + update_endpoint: Optional[bool] = False, ) -> Union[Predictor, Transformer]: """Deploys the built Model. @@ -1615,24 +1616,32 @@ def deploy( AsyncInferenceConfig, BatchTransformInferenceConfig, ResourceRequirements]]) : Additional Config for different deployment types such as serverless, async, batch and multi-model/container + update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. + If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources + corresponding to the previous EndpointConfig. Default: False + Note: Currently this is supported for single model endpoints Returns: Transformer for Batch Deployments Predictors for all others """ if not hasattr(self, "built_model"): raise ValueError("Model Needs to be built before deploying") - endpoint_name = unique_name_from_base(endpoint_name) + if not update_endpoint: + endpoint_name = unique_name_from_base(endpoint_name) + if not inference_config: # Real-time Deployment return self.built_model.deploy( instance_type=self.instance_type, initial_instance_count=initial_instance_count, endpoint_name=endpoint_name, + update_endpoint=update_endpoint, ) if isinstance(inference_config, ServerlessInferenceConfig): return self.built_model.deploy( serverless_inference_config=inference_config, endpoint_name=endpoint_name, + update_endpoint=update_endpoint, ) if isinstance(inference_config, AsyncInferenceConfig): @@ -1641,6 +1650,7 @@ def deploy( initial_instance_count=initial_instance_count, async_inference_config=inference_config, endpoint_name=endpoint_name, + update_endpoint=update_endpoint, ) if isinstance(inference_config, BatchTransformInferenceConfig): @@ -1652,6 +1662,8 @@ def deploy( return transformer if isinstance(inference_config, ResourceRequirements): + if update_endpoint: + raise ValueError("Currently update_endpoint is supported for single model endpoints") # Multi Model and MultiContainer endpoints with Inference Component return self.built_model.deploy( instance_type=self.instance_type, @@ -1660,6 +1672,7 @@ def deploy( resources=inference_config, initial_instance_count=initial_instance_count, role=self.role_arn, + update_endpoint=update_endpoint, ) raise ValueError("Deployment Options not supported") diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index b2398e03d1..4a65b3ccb5 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -4488,6 +4488,10 @@ def create_endpoint_config( model_data_download_timeout=None, container_startup_health_check_timeout=None, explainer_config_dict=None, + async_inference_config_dict=None, + serverless_inference_config_dict=None, + routing_config: Optional[Dict[str, Any]] = None, + inference_ami_version: Optional[str] = None, ): """Create an Amazon SageMaker endpoint configuration. @@ -4525,6 +4529,27 @@ def create_endpoint_config( -inference-algo-ping-requests explainer_config_dict (dict): Specifies configuration to enable explainers. Default: None. + async_inference_config_dict (dict): Specifies + configuration related to async endpoint. Use this configuration when trying + to create async endpoint and make async inference. If empty config object + passed through, will use default config to deploy async endpoint. Deploy a + real-time endpoint if it's None. (default: None). + serverless_inference_config_dict (dict): + Specifies configuration related to serverless endpoint. Use this configuration + when trying to create serverless endpoint and make serverless inference. If + empty object passed through, will use pre-defined values in + ``ServerlessInferenceConfig`` class to deploy serverless endpoint. Deploy an + instance based endpoint if it's None. (default: None). + routing_config (Optional[Dict[str, Any]): Settings the control how the endpoint routes incoming + traffic to the instances that the endpoint hosts. + Currently, support dictionary key ``RoutingStrategy``. + + .. code:: python + + { + "RoutingStrategy": sagemaker.enums.RoutingStrategy.RANDOM + } + inference_ami_version (Optional [str]): Specifies an option from a collection of preconfigured Example: >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}] @@ -4544,9 +4569,12 @@ def create_endpoint_config( instance_type, initial_instance_count, accelerator_type=accelerator_type, + serverless_inference_config=serverless_inference_config_dict, volume_size=volume_size, model_data_download_timeout=model_data_download_timeout, container_startup_health_check_timeout=container_startup_health_check_timeout, + routing_config=routing_config, + inference_ami_version=inference_ami_version, ) production_variants = [provided_production_variant] # Currently we just inject CoreDumpConfig.KmsKeyId from the config for production variant. @@ -4586,6 +4614,14 @@ def create_endpoint_config( ) request["DataCaptureConfig"] = inferred_data_capture_config_dict + if async_inference_config_dict is not None: + inferred_async_inference_config_dict = update_nested_dictionary_with_values_from_config( + async_inference_config_dict, + ENDPOINT_CONFIG_ASYNC_INFERENCE_PATH, + sagemaker_session=self, + ) + request["AsyncInferenceConfig"] = inferred_async_inference_config_dict + if explainer_config_dict is not None: request["ExplainerConfig"] = explainer_config_dict diff --git a/src/sagemaker/tensorflow/model.py b/src/sagemaker/tensorflow/model.py index c7f624114f..b384cbbbb5 100644 --- a/src/sagemaker/tensorflow/model.py +++ b/src/sagemaker/tensorflow/model.py @@ -358,6 +358,7 @@ def deploy( container_startup_health_check_timeout=None, inference_recommendation_id=None, explainer_config=None, + update_endpoint: Optional[bool] = False, **kwargs, ): """Deploy a Tensorflow ``Model`` to a SageMaker ``Endpoint``.""" @@ -383,6 +384,7 @@ def deploy( container_startup_health_check_timeout=container_startup_health_check_timeout, inference_recommendation_id=inference_recommendation_id, explainer_config=explainer_config, + update_endpoint=update_endpoint, **kwargs, ) diff --git a/tests/unit/sagemaker/model/test_deploy.py b/tests/unit/sagemaker/model/test_deploy.py index 7b99281b96..49a67871e7 100644 --- a/tests/unit/sagemaker/model/test_deploy.py +++ b/tests/unit/sagemaker/model/test_deploy.py @@ -23,6 +23,7 @@ from sagemaker.serverless import ServerlessInferenceConfig from sagemaker.explainer import ExplainerConfig from sagemaker.compute_resource_requirements.resource_requirements import ResourceRequirements +from sagemaker.enums import EndpointType from tests.unit.sagemaker.inference_recommender.constants import ( DESCRIBE_COMPILATION_JOB_RESPONSE, DESCRIBE_MODEL_PACKAGE_RESPONSE, @@ -1051,3 +1052,142 @@ def test_deploy_with_name_and_resources(sagemaker_session): async_inference_config_dict=None, live_logging=False, ) + + +@patch("sagemaker.model.Model._create_sagemaker_model", Mock()) +@patch("sagemaker.utils.name_from_base", return_value=ENDPOINT_NAME) +@patch("sagemaker.production_variant", return_value=BASE_PRODUCTION_VARIANT) +def test_deploy_with_update_endpoint(production_variant, name_from_base, sagemaker_session): + model = Model( + MODEL_IMAGE, MODEL_DATA, role=ROLE, name=MODEL_NAME, sagemaker_session=sagemaker_session + ) + + # Mock the create_endpoint_config to return a specific config name + endpoint_config_name = "test-config-name" + sagemaker_session.create_endpoint_config.return_value = endpoint_config_name + + # Test update_endpoint=True scenario + endpoint_name = "existing-endpoint" + model.deploy( + instance_type=INSTANCE_TYPE, + initial_instance_count=INSTANCE_COUNT, + endpoint_name=endpoint_name, + update_endpoint=True, + ) + + # Verify create_endpoint_config is called with correct parameters + sagemaker_session.create_endpoint_config.assert_called_with( + name=MODEL_NAME, + model_name=MODEL_NAME, + initial_instance_count=INSTANCE_COUNT, + instance_type=INSTANCE_TYPE, + accelerator_type=None, + tags=None, + kms_key=None, + data_capture_config_dict=None, + volume_size=None, + model_data_download_timeout=None, + container_startup_health_check_timeout=None, + explainer_config_dict=None, + async_inference_config_dict=None, + serverless_inference_config=None, + routing_config=None, + inference_ami_version=None, + ) + + # Verify update_endpoint is called with correct parameters + sagemaker_session.update_endpoint.assert_called_with(endpoint_name, endpoint_config_name) + + # Test update_endpoint with serverless config + serverless_inference_config = ServerlessInferenceConfig() + serverless_inference_config_dict = { + "MemorySizeInMB": 2048, + "MaxConcurrency": 5, + } + model.deploy( + endpoint_name=endpoint_name, + update_endpoint=True, + serverless_inference_config=serverless_inference_config, + ) + + sagemaker_session.create_endpoint_config.assert_called_with( + name=MODEL_NAME, + model_name=MODEL_NAME, + initial_instance_count=None, + instance_type=None, + accelerator_type=None, + tags=None, + kms_key=None, + data_capture_config_dict=None, + volume_size=None, + model_data_download_timeout=None, + container_startup_health_check_timeout=None, + explainer_config_dict=None, + async_inference_config_dict=None, + serverless_inference_config=serverless_inference_config_dict, + routing_config=None, + inference_ami_version=None, + ) + + # Verify update_endpoint is called with the new config + sagemaker_session.update_endpoint.assert_called_with(endpoint_name, endpoint_config_name) + + # Test update_endpoint with async inference config + async_inference_config = AsyncInferenceConfig( + output_path="s3://bucket/output", + failure_path="s3://bucket/failure" + ) + async_inference_config_dict = { + "OutputConfig": { + "S3OutputPath": "s3://bucket/output", + "S3FailurePath": "s3://bucket/failure" + }, + } + model.deploy( + endpoint_name=endpoint_name, + instance_type=INSTANCE_TYPE, + initial_instance_count=INSTANCE_COUNT, + update_endpoint=True, + async_inference_config=async_inference_config, + ) + + sagemaker_session.create_endpoint_config.assert_called_with( + name=MODEL_NAME, + model_name=MODEL_NAME, + initial_instance_count=INSTANCE_COUNT, + instance_type=INSTANCE_TYPE, + accelerator_type=None, + tags=None, + kms_key=None, + data_capture_config_dict=None, + volume_size=None, + model_data_download_timeout=None, + container_startup_health_check_timeout=None, + explainer_config_dict=None, + async_inference_config_dict=async_inference_config_dict, + serverless_inference_config=None, + routing_config=None, + inference_ami_version=None, + ) + + # Verify update_endpoint is called with the new config + sagemaker_session.update_endpoint.assert_called_with(endpoint_name, endpoint_config_name) + + +@patch("sagemaker.model.Model._create_sagemaker_model", Mock()) +@patch("sagemaker.production_variant", return_value=BASE_PRODUCTION_VARIANT) +def test_deploy_with_update_endpoint_inference_component(production_variant, sagemaker_session): + model = Model( + MODEL_IMAGE, MODEL_DATA, role=ROLE, name=MODEL_NAME, sagemaker_session=sagemaker_session + ) + + # Test that updating endpoint with inference component raises error + with pytest.raises(ValueError, match="Currently update_endpoint is supported for single model endpoints"): + model.deploy( + endpoint_name="test-endpoint", + instance_type=INSTANCE_TYPE, + initial_instance_count=INSTANCE_COUNT, + update_endpoint=True, + resources=RESOURCES, + endpoint_type=EndpointType.INFERENCE_COMPONENT_BASED, + ) diff --git a/tests/unit/sagemaker/serve/builder/test_model_builder.py b/tests/unit/sagemaker/serve/builder/test_model_builder.py index 107d65c301..ee37bf7b43 100644 --- a/tests/unit/sagemaker/serve/builder/test_model_builder.py +++ b/tests/unit/sagemaker/serve/builder/test_model_builder.py @@ -4041,14 +4041,30 @@ def test_neuron_configurations_rule_set(self): @pytest.mark.parametrize( "test_case", [ + # Real-time deployment without update { "input_args": {"endpoint_name": "test"}, "call_params": { "instance_type": "ml.g5.2xlarge", "initial_instance_count": 1, "endpoint_name": "test", + "update_endpoint": False, }, }, + # Real-time deployment with update + { + "input_args": { + "endpoint_name": "existing-endpoint", + "update_endpoint": True, + }, + "call_params": { + "instance_type": "ml.g5.2xlarge", + "initial_instance_count": 1, + "endpoint_name": "existing-endpoint", + "update_endpoint": True, + }, + }, + # Serverless deployment without update { "input_args": { "endpoint_name": "test", @@ -4057,8 +4073,23 @@ def test_neuron_configurations_rule_set(self): "call_params": { "serverless_inference_config": ServerlessInferenceConfig(), "endpoint_name": "test", + "update_endpoint": False, + }, + }, + # Serverless deployment with update + { + "input_args": { + "endpoint_name": "existing-endpoint", + "inference_config": ServerlessInferenceConfig(), + "update_endpoint": True, + }, + "call_params": { + "serverless_inference_config": ServerlessInferenceConfig(), + "endpoint_name": "existing-endpoint", + "update_endpoint": True, }, }, + # Async deployment without update { "input_args": { "endpoint_name": "test", @@ -4069,10 +4100,30 @@ def test_neuron_configurations_rule_set(self): "instance_type": "ml.g5.2xlarge", "initial_instance_count": 1, "endpoint_name": "test", + "update_endpoint": False, + }, + }, + # Async deployment with update + { + "input_args": { + "endpoint_name": "existing-endpoint", + "inference_config": AsyncInferenceConfig(output_path="op-path"), + "update_endpoint": True, + }, + "call_params": { + "async_inference_config": AsyncInferenceConfig(output_path="op-path"), + "instance_type": "ml.g5.2xlarge", + "initial_instance_count": 1, + "endpoint_name": "existing-endpoint", + "update_endpoint": True, }, }, + # Multi-Model deployment (update_endpoint not supported) { - "input_args": {"endpoint_name": "test", "inference_config": RESOURCE_REQUIREMENTS}, + "input_args": { + "endpoint_name": "test", + "inference_config": RESOURCE_REQUIREMENTS, + }, "call_params": { "resources": RESOURCE_REQUIREMENTS, "role": "role-arn", @@ -4080,12 +4131,16 @@ def test_neuron_configurations_rule_set(self): "instance_type": "ml.g5.2xlarge", "mode": Mode.SAGEMAKER_ENDPOINT, "endpoint_type": EndpointType.INFERENCE_COMPONENT_BASED, + "update_endpoint": False, }, }, + # Batch transform { "input_args": { "inference_config": BatchTransformInferenceConfig( - instance_count=1, instance_type="ml.m5.large", output_path="op-path" + instance_count=1, + instance_type="ml.m5.large", + output_path="op-path" ) }, "call_params": { @@ -4096,7 +4151,16 @@ def test_neuron_configurations_rule_set(self): "id": "Batch", }, ], - ids=["Real Time", "Serverless", "Async", "Multi-Model", "Batch"], + ids=[ + "Real Time", + "Real Time Update", + "Serverless", + "Serverless Update", + "Async", + "Async Update", + "Multi-Model", + "Batch", + ], ) @patch("sagemaker.serve.builder.model_builder.unique_name_from_base") def test_deploy(mock_unique_name_from_base, test_case): @@ -4119,3 +4183,20 @@ def test_deploy(mock_unique_name_from_base, test_case): diff = deepdiff.DeepDiff(kwargs, test_case["call_params"]) assert diff == {} + + +def test_deploy_multi_model_update_error(): + model_builder = ModelBuilder( + model="meta-llama/Meta-Llama-3-8B-Instruct", + env_vars={"HUGGING_FACE_HUB_TOKEN": "token"}, + role_arn="role-arn", + instance_type="ml.g5.2xlarge", + ) + setattr(model_builder, "built_model", MagicMock()) + + with pytest.raises(ValueError, match="Currently update_endpoint is supported for single model endpoints"): + model_builder.deploy( + endpoint_name="test", + inference_config=RESOURCE_REQUIREMENTS, + update_endpoint=True + ) From 0bf6404dc6678a622f5c04b9babc51b2dd41a3c1 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 12 Mar 2025 10:13:04 -0700 Subject: [PATCH 08/27] fix: fix unit test, black-check, pylint errors --- src/sagemaker/huggingface/model.py | 4 ++-- src/sagemaker/model.py | 4 +++- src/sagemaker/serve/builder/model_builder.py | 4 +++- src/sagemaker/session.py | 4 ++-- tests/unit/sagemaker/jumpstart/model/test_model.py | 2 +- tests/unit/sagemaker/model/test_deploy.py | 9 +++++---- .../sagemaker/serve/builder/test_model_builder.py | 12 +++++------- 7 files changed, 21 insertions(+), 18 deletions(-) diff --git a/src/sagemaker/huggingface/model.py b/src/sagemaker/huggingface/model.py index f4b44fc057..6ef28f99e5 100644 --- a/src/sagemaker/huggingface/model.py +++ b/src/sagemaker/huggingface/model.py @@ -298,8 +298,8 @@ def deploy( explainer_config (sagemaker.explainer.ExplainerConfig): Specifies online explainability configuration for use with Amazon SageMaker Clarify. (default: None) update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. - If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources - corresponding to the previous EndpointConfig. Default: False + If True, this will deploy a new EndpointConfig to an already existing endpoint and + delete resources corresponding to the previous EndpointConfig. Default: False Raises: ValueError: If arguments combination check failed in these circumstances: - If no role is specified or diff --git a/src/sagemaker/model.py b/src/sagemaker/model.py index 24be862ecc..74177795a7 100644 --- a/src/sagemaker/model.py +++ b/src/sagemaker/model.py @@ -1631,7 +1631,9 @@ def deploy( # Support multiple models on same endpoint if endpoint_type == EndpointType.INFERENCE_COMPONENT_BASED: if update_endpoint: - raise ValueError("Currently update_endpoint is supported for single model endpoints") + raise ValueError( + "Currently update_endpoint is supported for single model endpoints" + ) if endpoint_name: self.endpoint_name = endpoint_name else: diff --git a/src/sagemaker/serve/builder/model_builder.py b/src/sagemaker/serve/builder/model_builder.py index 1a4746a6db..4a49c0538e 100644 --- a/src/sagemaker/serve/builder/model_builder.py +++ b/src/sagemaker/serve/builder/model_builder.py @@ -1663,7 +1663,9 @@ def deploy( if isinstance(inference_config, ResourceRequirements): if update_endpoint: - raise ValueError("Currently update_endpoint is supported for single model endpoints") + raise ValueError( + "Currently update_endpoint is supported for single model endpoints" + ) # Multi Model and MultiContainer endpoints with Inference Component return self.built_model.deploy( instance_type=self.instance_type, diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index 4a65b3ccb5..be0bb9b688 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -4540,8 +4540,8 @@ def create_endpoint_config( empty object passed through, will use pre-defined values in ``ServerlessInferenceConfig`` class to deploy serverless endpoint. Deploy an instance based endpoint if it's None. (default: None). - routing_config (Optional[Dict[str, Any]): Settings the control how the endpoint routes incoming - traffic to the instances that the endpoint hosts. + routing_config (Optional[Dict[str, Any]): Settings the control how the endpoint routes + incoming traffic to the instances that the endpoint hosts. Currently, support dictionary key ``RoutingStrategy``. .. code:: python diff --git a/tests/unit/sagemaker/jumpstart/model/test_model.py b/tests/unit/sagemaker/jumpstart/model/test_model.py index be961828f4..d9b126f651 100644 --- a/tests/unit/sagemaker/jumpstart/model/test_model.py +++ b/tests/unit/sagemaker/jumpstart/model/test_model.py @@ -794,7 +794,7 @@ def test_jumpstart_model_kwargs_match_parent_class(self): and reach out to JumpStart team.""" init_args_to_skip: Set[str] = set(["model_reference_arn"]) - deploy_args_to_skip: Set[str] = set(["kwargs", "model_reference_arn"]) + deploy_args_to_skip: Set[str] = set(["kwargs", "model_reference_arn", "update_endpoint"]) deploy_args_removed_at_deploy_time: Set[str] = set(["model_access_configs"]) parent_class_init = Model.__init__ diff --git a/tests/unit/sagemaker/model/test_deploy.py b/tests/unit/sagemaker/model/test_deploy.py index 49a67871e7..4167ca62c3 100644 --- a/tests/unit/sagemaker/model/test_deploy.py +++ b/tests/unit/sagemaker/model/test_deploy.py @@ -1134,13 +1134,12 @@ def test_deploy_with_update_endpoint(production_variant, name_from_base, sagemak # Test update_endpoint with async inference config async_inference_config = AsyncInferenceConfig( - output_path="s3://bucket/output", - failure_path="s3://bucket/failure" + output_path="s3://bucket/output", failure_path="s3://bucket/failure" ) async_inference_config_dict = { "OutputConfig": { "S3OutputPath": "s3://bucket/output", - "S3FailurePath": "s3://bucket/failure" + "S3FailurePath": "s3://bucket/failure", }, } model.deploy( @@ -1182,7 +1181,9 @@ def test_deploy_with_update_endpoint_inference_component(production_variant, sag ) # Test that updating endpoint with inference component raises error - with pytest.raises(ValueError, match="Currently update_endpoint is supported for single model endpoints"): + with pytest.raises( + ValueError, match="Currently update_endpoint is supported for single model endpoints" + ): model.deploy( endpoint_name="test-endpoint", instance_type=INSTANCE_TYPE, diff --git a/tests/unit/sagemaker/serve/builder/test_model_builder.py b/tests/unit/sagemaker/serve/builder/test_model_builder.py index ee37bf7b43..6661c6e2bf 100644 --- a/tests/unit/sagemaker/serve/builder/test_model_builder.py +++ b/tests/unit/sagemaker/serve/builder/test_model_builder.py @@ -4138,9 +4138,7 @@ def test_neuron_configurations_rule_set(self): { "input_args": { "inference_config": BatchTransformInferenceConfig( - instance_count=1, - instance_type="ml.m5.large", - output_path="op-path" + instance_count=1, instance_type="ml.m5.large", output_path="op-path" ) }, "call_params": { @@ -4194,9 +4192,9 @@ def test_deploy_multi_model_update_error(): ) setattr(model_builder, "built_model", MagicMock()) - with pytest.raises(ValueError, match="Currently update_endpoint is supported for single model endpoints"): + with pytest.raises( + ValueError, match="Currently update_endpoint is supported for single model endpoints" + ): model_builder.deploy( - endpoint_name="test", - inference_config=RESOURCE_REQUIREMENTS, - update_endpoint=True + endpoint_name="test", inference_config=RESOURCE_REQUIREMENTS, update_endpoint=True ) From c67d7df4f9a1cc15aff8a2cf9dad277f05355dc3 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 12 Mar 2025 10:51:19 -0700 Subject: [PATCH 09/27] fix: fix black-check, pylint errors --- src/sagemaker/huggingface/model.py | 8 +++++--- src/sagemaker/model.py | 7 ++++--- src/sagemaker/serve/builder/model_builder.py | 7 ++++--- src/sagemaker/session.py | 5 ++++- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/sagemaker/huggingface/model.py b/src/sagemaker/huggingface/model.py index 6ef28f99e5..3ca25fb3ce 100644 --- a/src/sagemaker/huggingface/model.py +++ b/src/sagemaker/huggingface/model.py @@ -297,9 +297,11 @@ def deploy( would like to deploy the model and endpoint with recommended parameters. explainer_config (sagemaker.explainer.ExplainerConfig): Specifies online explainability configuration for use with Amazon SageMaker Clarify. (default: None) - update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. - If True, this will deploy a new EndpointConfig to an already existing endpoint and - delete resources corresponding to the previous EndpointConfig. Default: False + update_endpoint (Optional[bool]): + Flag to update the model in an existing Amazon SageMaker endpoint. + If True, this will deploy a new EndpointConfig to an already existing endpoint + and delete resources corresponding to the previous EndpointConfig. Default: False + Note: Currently this is supported for single model endpoints Raises: ValueError: If arguments combination check failed in these circumstances: - If no role is specified or diff --git a/src/sagemaker/model.py b/src/sagemaker/model.py index 74177795a7..b281d9f489 100644 --- a/src/sagemaker/model.py +++ b/src/sagemaker/model.py @@ -1497,9 +1497,10 @@ def deploy( inference_ami_version (Optional [str]): Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. For a full list of options, see: https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ProductionVariant.html - update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. - If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources - corresponding to the previous EndpointConfig. Default: False + update_endpoint (Optional[bool]): + Flag to update the model in an existing Amazon SageMaker endpoint. + If True, this will deploy a new EndpointConfig to an already existing endpoint + and delete resources corresponding to the previous EndpointConfig. Default: False Note: Currently this is supported for single model endpoints Raises: ValueError: If arguments combination check failed in these circumstances: diff --git a/src/sagemaker/serve/builder/model_builder.py b/src/sagemaker/serve/builder/model_builder.py index 4a49c0538e..9122f22e44 100644 --- a/src/sagemaker/serve/builder/model_builder.py +++ b/src/sagemaker/serve/builder/model_builder.py @@ -1616,9 +1616,10 @@ def deploy( AsyncInferenceConfig, BatchTransformInferenceConfig, ResourceRequirements]]) : Additional Config for different deployment types such as serverless, async, batch and multi-model/container - update_endpoint (Optional[bool]): Flag to update the model in an existing Amazon SageMaker endpoint. - If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources - corresponding to the previous EndpointConfig. Default: False + update_endpoint (Optional[bool]): + Flag to update the model in an existing Amazon SageMaker endpoint. + If True, this will deploy a new EndpointConfig to an already existing endpoint + and delete resources corresponding to the previous EndpointConfig. Default: False Note: Currently this is supported for single model endpoints Returns: Transformer for Batch Deployments diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index be0bb9b688..38fa7f8c26 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -4549,7 +4549,10 @@ def create_endpoint_config( { "RoutingStrategy": sagemaker.enums.RoutingStrategy.RANDOM } - inference_ami_version (Optional [str]): Specifies an option from a collection of preconfigured + inference_ami_version (Optional [str]): + Specifies an option from a collection of preconfigured + Amazon Machine Image (AMI) images. For a full list of options, see: + https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ProductionVariant.html Example: >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}] From 89e18a9f3e75843e3074bf2f07221f9ec67ef5ea Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Mon, 7 Apr 2025 23:06:31 -0700 Subject: [PATCH 10/27] fix:Added handler for pipeline variable while creating process job --- src/sagemaker/processing.py | 43 +++++- tests/unit/test_processing.py | 255 +++++++++++++++++++++++++++++++++- 2 files changed, 296 insertions(+), 2 deletions(-) diff --git a/src/sagemaker/processing.py b/src/sagemaker/processing.py index d8674f269d..1fdfaae3c2 100644 --- a/src/sagemaker/processing.py +++ b/src/sagemaker/processing.py @@ -60,9 +60,10 @@ ) from sagemaker.workflow import is_pipeline_variable from sagemaker.workflow.entities import PipelineVariable -from sagemaker.workflow.execution_variables import ExecutionVariables +from sagemaker.workflow.execution_variables import ExecutionVariable, ExecutionVariables from sagemaker.workflow.functions import Join from sagemaker.workflow.pipeline_context import runnable_by_pipeline +from sagemaker.workflow.parameters import Parameter logger = logging.getLogger(__name__) @@ -314,6 +315,15 @@ def _normalize_args( "code argument has to be a valid S3 URI or local file path " + "rather than a pipeline variable" ) + if arguments is not None: + normalized_arguments = [] + for arg in arguments: + if isinstance(arg, PipelineVariable): + normalized_value = self._normalize_pipeline_variable(arg) + normalized_arguments.append(normalized_value) + else: + normalized_arguments.append(str(arg)) + arguments = normalized_arguments self._current_job_name = self._generate_current_job_name(job_name=job_name) @@ -499,6 +509,37 @@ def _normalize_outputs(self, outputs=None): normalized_outputs.append(output) return normalized_outputs + def _normalize_pipeline_variable(self, value): + """Helper function to normalize PipelineVariable objects""" + try: + if isinstance(value, Parameter): + return str(value.default_value) if value.default_value is not None else None + + elif isinstance(value, ExecutionVariable): + return f"{value.name}" + + elif isinstance(value, Join): + normalized_values = [ + normalize_pipeline_variable(v) if isinstance(v, PipelineVariable) else str(v) + for v in value.values + ] + return value.on.join(normalized_values) + + elif isinstance(value, PipelineVariable): + if hasattr(value, 'default_value'): + return str(value.default_value) + elif hasattr(value, 'expr'): + return str(value.expr) + + return str(value) + + except AttributeError as e: + raise ValueError(f"Missing required attribute while normalizing {type(value).__name__}: {e}") + except TypeError as e: + raise ValueError(f"Type error while normalizing {type(value).__name__}: {e}") + except Exception as e: + raise ValueError(f"Error normalizing {type(value).__name__}: {e}") + class ScriptProcessor(Processor): """Handles Amazon SageMaker processing tasks for jobs using a machine learning framework.""" diff --git a/tests/unit/test_processing.py b/tests/unit/test_processing.py index 06d2cde02e..0088e10640 100644 --- a/tests/unit/test_processing.py +++ b/tests/unit/test_processing.py @@ -46,8 +46,9 @@ from sagemaker.fw_utils import UploadedCode from sagemaker.workflow.pipeline_context import PipelineSession, _PipelineConfig from sagemaker.workflow.functions import Join -from sagemaker.workflow.execution_variables import ExecutionVariables +from sagemaker.workflow.execution_variables import ExecutionVariable, ExecutionVariables from tests.unit import SAGEMAKER_CONFIG_PROCESSING_JOB +from sagemaker.workflow.parameters import ParameterString, Parameter BUCKET_NAME = "mybucket" REGION = "us-west-2" @@ -1717,3 +1718,255 @@ def _get_describe_response_inputs_and_ouputs(): "ProcessingInputs": _get_expected_args_all_parameters(None)["inputs"], "ProcessingOutputConfig": _get_expected_args_all_parameters(None)["output_config"], } + +# Parameters +def _get_data_inputs_with_parameters(): + return [ + ProcessingInput( + source=ParameterString( + name="input_data", + default_value="s3://dummy-bucket/input" + ), + destination="/opt/ml/processing/input", + input_name="input-1" + ) + ] + + +def _get_data_outputs_with_parameters(): + return [ + ProcessingOutput( + source="/opt/ml/processing/output", + destination=ParameterString( + name="output_data", + default_value="s3://dummy-bucket/output" + ), + output_name="output-1" + ) + ] + + +def _get_expected_args_with_parameters(job_name): + return { + "inputs": [{ + "InputName": "input-1", + "S3Input": { + "S3Uri": "s3://dummy-bucket/input", + "LocalPath": "/opt/ml/processing/input", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None" + } + }], + "output_config": { + "Outputs": [{ + "OutputName": "output-1", + "S3Output": { + "S3Uri": "s3://dummy-bucket/output", + "LocalPath": "/opt/ml/processing/output", + "S3UploadMode": "EndOfJob" + } + }] + }, + "job_name": job_name, + "resources": { + "ClusterConfig": { + "InstanceType": "ml.m4.xlarge", + "InstanceCount": 1, + "VolumeSizeInGB": 100, + "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key" + } + }, + "stopping_condition": {"MaxRuntimeInSeconds": 3600}, + "app_specification": { + "ImageUri": "custom-image-uri", + "ContainerArguments": [ + "--input-data", + "s3://dummy-bucket/input-param", + "--output-path", + "s3://dummy-bucket/output-param" + ], + "ContainerEntrypoint": ["python3"] + }, + "environment": {"my_env_variable": "my_env_variable_value"}, + "network_config": { + "EnableNetworkIsolation": True, + "EnableInterContainerTrafficEncryption": True, + "VpcConfig": { + "Subnets": ["my_subnet_id"], + "SecurityGroupIds": ["my_security_group_id"] + } + }, + "role_arn": "dummy/role", + "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], + "experiment_config": {"ExperimentName": "AnExperiment"} + } + + +@patch("os.path.exists", return_value=True) +@patch("os.path.isfile", return_value=True) +@patch("sagemaker.utils.repack_model") +@patch("sagemaker.utils.create_tar_file") +@patch("sagemaker.session.Session.upload_data") +def test_script_processor_with_parameter_string( + upload_data_mock, + create_tar_file_mock, + repack_model_mock, + exists_mock, + isfile_mock, + sagemaker_session, +): + """Test ScriptProcessor with ParameterString arguments""" + upload_data_mock.return_value = "s3://mocked_s3_uri_from_upload_data" + + # Setup processor + processor = ScriptProcessor( + role="arn:aws:iam::012345678901:role/SageMakerRole", # Updated role ARN + image_uri="custom-image-uri", + command=["python3"], + instance_type="ml.m4.xlarge", + instance_count=1, + volume_size_in_gb=100, + volume_kms_key="arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", + output_kms_key="arn:aws:kms:us-west-2:012345678901:key/output-kms-key", + max_runtime_in_seconds=3600, + base_job_name="test_processor", + env={"my_env_variable": "my_env_variable_value"}, + tags=[{"Key": "my-tag", "Value": "my-tag-value"}], + network_config=NetworkConfig( + subnets=["my_subnet_id"], + security_group_ids=["my_security_group_id"], + enable_network_isolation=True, + encrypt_inter_container_traffic=True, + ), + sagemaker_session=sagemaker_session, + ) + + input_param = ParameterString( + name="input_param", + default_value="s3://dummy-bucket/input-param" + ) + output_param = ParameterString( + name="output_param", + default_value="s3://dummy-bucket/output-param" + ) + exec_var = ExecutionVariable( + name="ExecutionTest" + ) + join_var = Join( + on="/", + values=["s3://bucket", "prefix", "file.txt"] + ) + dummy_str_var = "test-variable" + + # Define expected arguments + expected_args = { + "inputs": [ + { + "InputName": "input-1", + "AppManaged": False, + "S3Input": { + "S3Uri": ParameterString( + name="input_data", + default_value="s3://dummy-bucket/input" + ), + "LocalPath": "/opt/ml/processing/input", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None" + } + }, + { + "InputName": "code", + "AppManaged": False, + "S3Input": { + "S3Uri": "s3://mocked_s3_uri_from_upload_data", + "LocalPath": "/opt/ml/processing/input/code", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None" + } + } + ], + "output_config": { + "Outputs": [ + { + "OutputName": "output-1", + "AppManaged": False, + "S3Output": { + "S3Uri": ParameterString( + name="output_data", + default_value="s3://dummy-bucket/output" + ), + "LocalPath": "/opt/ml/processing/output", + "S3UploadMode": "EndOfJob" + } + } + ], + "KmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/output-kms-key" + }, + "job_name": "test_job", + "resources": { + "ClusterConfig": { + "InstanceType": "ml.m4.xlarge", + "InstanceCount": 1, + "VolumeSizeInGB": 100, + "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key" + } + }, + "stopping_condition": {"MaxRuntimeInSeconds": 3600}, + "app_specification": { + "ImageUri": "custom-image-uri", + "ContainerArguments": [ + "--input-data", + "s3://dummy-bucket/input-param", + "--output-path", + "s3://dummy-bucket/output-param", + "--exec-arg", "ExecutionTest", + "--join-arg", "s3://bucket/prefix/file.txt", + "--string-param", "test-variable" + ], + "ContainerEntrypoint": ["python3", "/opt/ml/processing/input/code/processing_code.py"] + }, + "environment": {"my_env_variable": "my_env_variable_value"}, + "network_config": { + "EnableNetworkIsolation": True, + "EnableInterContainerTrafficEncryption": True, + "VpcConfig": { + "SecurityGroupIds": ["my_security_group_id"], + "Subnets": ["my_subnet_id"] + } + }, + "role_arn": "arn:aws:iam::012345678901:role/SageMakerRole", + "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], + "experiment_config": {"ExperimentName": "AnExperiment"} + } + + # Run processor + processor.run( + code="/local/path/to/processing_code.py", + inputs=_get_data_inputs_with_parameters(), + outputs=_get_data_outputs_with_parameters(), + arguments=[ + "--input-data", + input_param, + "--output-path", + output_param, + "--exec-arg", exec_var, + "--join-arg", join_var, + "--string-param", dummy_str_var + ], + wait=True, + logs=False, + job_name="test_job", + experiment_config={"ExperimentName": "AnExperiment"}, + ) + + # Assert + sagemaker_session.process.assert_called_with(**expected_args) + assert "test_job" in processor._current_job_name + + From 7f15e1928037bf34b60a8697ac5f1450c73abae1 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 9 Apr 2025 15:18:05 -0700 Subject: [PATCH 11/27] fix: Added handler for pipeline variable while creating process job --- src/sagemaker/processing.py | 46 +----- .../workflow/test_processing_step.py | 17 +- tests/unit/test_processing.py | 150 +++++++++--------- 3 files changed, 93 insertions(+), 120 deletions(-) diff --git a/src/sagemaker/processing.py b/src/sagemaker/processing.py index 1fdfaae3c2..7beef2e5bd 100644 --- a/src/sagemaker/processing.py +++ b/src/sagemaker/processing.py @@ -17,7 +17,7 @@ and interpretation on Amazon SageMaker. """ from __future__ import absolute_import - +import json import logging import os import pathlib @@ -60,10 +60,9 @@ ) from sagemaker.workflow import is_pipeline_variable from sagemaker.workflow.entities import PipelineVariable -from sagemaker.workflow.execution_variables import ExecutionVariable, ExecutionVariables +from sagemaker.workflow.execution_variables import ExecutionVariables from sagemaker.workflow.functions import Join from sagemaker.workflow.pipeline_context import runnable_by_pipeline -from sagemaker.workflow.parameters import Parameter logger = logging.getLogger(__name__) @@ -316,14 +315,14 @@ def _normalize_args( + "rather than a pipeline variable" ) if arguments is not None: - normalized_arguments = [] + processed_arguments = [] for arg in arguments: if isinstance(arg, PipelineVariable): - normalized_value = self._normalize_pipeline_variable(arg) - normalized_arguments.append(normalized_value) + processed_value = json.dumps(arg.expr) + processed_arguments.append(processed_value) else: - normalized_arguments.append(str(arg)) - arguments = normalized_arguments + processed_arguments.append(str(arg)) + arguments = processed_arguments self._current_job_name = self._generate_current_job_name(job_name=job_name) @@ -509,37 +508,6 @@ def _normalize_outputs(self, outputs=None): normalized_outputs.append(output) return normalized_outputs - def _normalize_pipeline_variable(self, value): - """Helper function to normalize PipelineVariable objects""" - try: - if isinstance(value, Parameter): - return str(value.default_value) if value.default_value is not None else None - - elif isinstance(value, ExecutionVariable): - return f"{value.name}" - - elif isinstance(value, Join): - normalized_values = [ - normalize_pipeline_variable(v) if isinstance(v, PipelineVariable) else str(v) - for v in value.values - ] - return value.on.join(normalized_values) - - elif isinstance(value, PipelineVariable): - if hasattr(value, 'default_value'): - return str(value.default_value) - elif hasattr(value, 'expr'): - return str(value.expr) - - return str(value) - - except AttributeError as e: - raise ValueError(f"Missing required attribute while normalizing {type(value).__name__}: {e}") - except TypeError as e: - raise ValueError(f"Type error while normalizing {type(value).__name__}: {e}") - except Exception as e: - raise ValueError(f"Error normalizing {type(value).__name__}: {e}") - class ScriptProcessor(Processor): """Handles Amazon SageMaker processing tasks for jobs using a machine learning framework.""" diff --git a/tests/unit/sagemaker/workflow/test_processing_step.py b/tests/unit/sagemaker/workflow/test_processing_step.py index 0dcd7c2495..f94e0791cb 100644 --- a/tests/unit/sagemaker/workflow/test_processing_step.py +++ b/tests/unit/sagemaker/workflow/test_processing_step.py @@ -824,7 +824,12 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): processor, run_inputs = spark_processor processor.sagemaker_session = pipeline_session processor.role = ROLE - + arguments_output = [ + "--input", + "input-data-uri", + "--output", + '{"Get": "Parameters.MyArgOutput"}', + ] run_inputs["inputs"] = processing_input step_args = processor.run(**run_inputs) @@ -835,7 +840,7 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): step_args = get_step_args_helper(step_args, "Processing") - assert step_args["AppSpecification"]["ContainerArguments"] == run_inputs["arguments"] + assert step_args["AppSpecification"]["ContainerArguments"] == arguments_output entry_points = step_args["AppSpecification"]["ContainerEntrypoint"] entry_points_expr = [] @@ -1019,6 +1024,12 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ processor, run_inputs = spark_processor processor.sagemaker_session = pipeline_session processor.role = ROLE + arguments_output = [ + "--input", + "input-data-uri", + "--output", + '{"Get": "Parameters.MyArgOutput"}', + ] run_inputs["inputs"] = processing_input @@ -1030,7 +1041,7 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ step_args = get_step_args_helper(step_args, "Processing") - assert step_args["AppSpecification"]["ContainerArguments"] == run_inputs["arguments"] + assert step_args["AppSpecification"]["ContainerArguments"] == arguments_output entry_points = step_args["AppSpecification"]["ContainerEntrypoint"] entry_points_expr = [] diff --git a/tests/unit/test_processing.py b/tests/unit/test_processing.py index 0088e10640..7b020c61bf 100644 --- a/tests/unit/test_processing.py +++ b/tests/unit/test_processing.py @@ -48,7 +48,7 @@ from sagemaker.workflow.functions import Join from sagemaker.workflow.execution_variables import ExecutionVariable, ExecutionVariables from tests.unit import SAGEMAKER_CONFIG_PROCESSING_JOB -from sagemaker.workflow.parameters import ParameterString, Parameter +from sagemaker.workflow.parameters import ParameterString BUCKET_NAME = "mybucket" REGION = "us-west-2" @@ -1719,16 +1719,14 @@ def _get_describe_response_inputs_and_ouputs(): "ProcessingOutputConfig": _get_expected_args_all_parameters(None)["output_config"], } + # Parameters def _get_data_inputs_with_parameters(): return [ ProcessingInput( - source=ParameterString( - name="input_data", - default_value="s3://dummy-bucket/input" - ), + source=ParameterString(name="input_data", default_value="s3://dummy-bucket/input"), destination="/opt/ml/processing/input", - input_name="input-1" + input_name="input-1", ) ] @@ -1738,36 +1736,39 @@ def _get_data_outputs_with_parameters(): ProcessingOutput( source="/opt/ml/processing/output", destination=ParameterString( - name="output_data", - default_value="s3://dummy-bucket/output" + name="output_data", default_value="s3://dummy-bucket/output" ), - output_name="output-1" + output_name="output-1", ) ] def _get_expected_args_with_parameters(job_name): return { - "inputs": [{ - "InputName": "input-1", - "S3Input": { - "S3Uri": "s3://dummy-bucket/input", - "LocalPath": "/opt/ml/processing/input", - "S3DataType": "S3Prefix", - "S3InputMode": "File", - "S3DataDistributionType": "FullyReplicated", - "S3CompressionType": "None" + "inputs": [ + { + "InputName": "input-1", + "S3Input": { + "S3Uri": "s3://dummy-bucket/input", + "LocalPath": "/opt/ml/processing/input", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None", + }, } - }], + ], "output_config": { - "Outputs": [{ - "OutputName": "output-1", - "S3Output": { - "S3Uri": "s3://dummy-bucket/output", - "LocalPath": "/opt/ml/processing/output", - "S3UploadMode": "EndOfJob" + "Outputs": [ + { + "OutputName": "output-1", + "S3Output": { + "S3Uri": "s3://dummy-bucket/output", + "LocalPath": "/opt/ml/processing/output", + "S3UploadMode": "EndOfJob", + }, } - }] + ] }, "job_name": job_name, "resources": { @@ -1775,7 +1776,7 @@ def _get_expected_args_with_parameters(job_name): "InstanceType": "ml.m4.xlarge", "InstanceCount": 1, "VolumeSizeInGB": 100, - "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key" + "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", } }, "stopping_condition": {"MaxRuntimeInSeconds": 3600}, @@ -1785,9 +1786,9 @@ def _get_expected_args_with_parameters(job_name): "--input-data", "s3://dummy-bucket/input-param", "--output-path", - "s3://dummy-bucket/output-param" + "s3://dummy-bucket/output-param", ], - "ContainerEntrypoint": ["python3"] + "ContainerEntrypoint": ["python3"], }, "environment": {"my_env_variable": "my_env_variable_value"}, "network_config": { @@ -1795,12 +1796,12 @@ def _get_expected_args_with_parameters(job_name): "EnableInterContainerTrafficEncryption": True, "VpcConfig": { "Subnets": ["my_subnet_id"], - "SecurityGroupIds": ["my_security_group_id"] - } + "SecurityGroupIds": ["my_security_group_id"], + }, }, "role_arn": "dummy/role", "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], - "experiment_config": {"ExperimentName": "AnExperiment"} + "experiment_config": {"ExperimentName": "AnExperiment"}, } @@ -1810,12 +1811,12 @@ def _get_expected_args_with_parameters(job_name): @patch("sagemaker.utils.create_tar_file") @patch("sagemaker.session.Session.upload_data") def test_script_processor_with_parameter_string( - upload_data_mock, - create_tar_file_mock, - repack_model_mock, - exists_mock, - isfile_mock, - sagemaker_session, + upload_data_mock, + create_tar_file_mock, + repack_model_mock, + exists_mock, + isfile_mock, + sagemaker_session, ): """Test ScriptProcessor with ParameterString arguments""" upload_data_mock.return_value = "s3://mocked_s3_uri_from_upload_data" @@ -1843,21 +1844,12 @@ def test_script_processor_with_parameter_string( sagemaker_session=sagemaker_session, ) - input_param = ParameterString( - name="input_param", - default_value="s3://dummy-bucket/input-param" - ) + input_param = ParameterString(name="input_param", default_value="s3://dummy-bucket/input-param") output_param = ParameterString( - name="output_param", - default_value="s3://dummy-bucket/output-param" - ) - exec_var = ExecutionVariable( - name="ExecutionTest" - ) - join_var = Join( - on="/", - values=["s3://bucket", "prefix", "file.txt"] + name="output_param", default_value="s3://dummy-bucket/output-param" ) + exec_var = ExecutionVariable(name="ExecutionTest") + join_var = Join(on="/", values=["s3://bucket", "prefix", "file.txt"]) dummy_str_var = "test-variable" # Define expected arguments @@ -1868,15 +1860,14 @@ def test_script_processor_with_parameter_string( "AppManaged": False, "S3Input": { "S3Uri": ParameterString( - name="input_data", - default_value="s3://dummy-bucket/input" + name="input_data", default_value="s3://dummy-bucket/input" ), "LocalPath": "/opt/ml/processing/input", "S3DataType": "S3Prefix", "S3InputMode": "File", "S3DataDistributionType": "FullyReplicated", - "S3CompressionType": "None" - } + "S3CompressionType": "None", + }, }, { "InputName": "code", @@ -1887,9 +1878,9 @@ def test_script_processor_with_parameter_string( "S3DataType": "S3Prefix", "S3InputMode": "File", "S3DataDistributionType": "FullyReplicated", - "S3CompressionType": "None" - } - } + "S3CompressionType": "None", + }, + }, ], "output_config": { "Outputs": [ @@ -1898,15 +1889,14 @@ def test_script_processor_with_parameter_string( "AppManaged": False, "S3Output": { "S3Uri": ParameterString( - name="output_data", - default_value="s3://dummy-bucket/output" + name="output_data", default_value="s3://dummy-bucket/output" ), "LocalPath": "/opt/ml/processing/output", - "S3UploadMode": "EndOfJob" - } + "S3UploadMode": "EndOfJob", + }, } ], - "KmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/output-kms-key" + "KmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/output-kms-key", }, "job_name": "test_job", "resources": { @@ -1914,7 +1904,7 @@ def test_script_processor_with_parameter_string( "InstanceType": "ml.m4.xlarge", "InstanceCount": 1, "VolumeSizeInGB": 100, - "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key" + "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", } }, "stopping_condition": {"MaxRuntimeInSeconds": 3600}, @@ -1922,14 +1912,17 @@ def test_script_processor_with_parameter_string( "ImageUri": "custom-image-uri", "ContainerArguments": [ "--input-data", - "s3://dummy-bucket/input-param", + '{"Get": "Parameters.input_param"}', "--output-path", - "s3://dummy-bucket/output-param", - "--exec-arg", "ExecutionTest", - "--join-arg", "s3://bucket/prefix/file.txt", - "--string-param", "test-variable" + '{"Get": "Parameters.output_param"}', + "--exec-arg", + '{"Get": "Execution.ExecutionTest"}', + "--join-arg", + '{"Std:Join": {"On": "/", "Values": ["s3://bucket", "prefix", "file.txt"]}}', + "--string-param", + "test-variable", ], - "ContainerEntrypoint": ["python3", "/opt/ml/processing/input/code/processing_code.py"] + "ContainerEntrypoint": ["python3", "/opt/ml/processing/input/code/processing_code.py"], }, "environment": {"my_env_variable": "my_env_variable_value"}, "network_config": { @@ -1937,12 +1930,12 @@ def test_script_processor_with_parameter_string( "EnableInterContainerTrafficEncryption": True, "VpcConfig": { "SecurityGroupIds": ["my_security_group_id"], - "Subnets": ["my_subnet_id"] - } + "Subnets": ["my_subnet_id"], + }, }, "role_arn": "arn:aws:iam::012345678901:role/SageMakerRole", "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], - "experiment_config": {"ExperimentName": "AnExperiment"} + "experiment_config": {"ExperimentName": "AnExperiment"}, } # Run processor @@ -1955,9 +1948,12 @@ def test_script_processor_with_parameter_string( input_param, "--output-path", output_param, - "--exec-arg", exec_var, - "--join-arg", join_var, - "--string-param", dummy_str_var + "--exec-arg", + exec_var, + "--join-arg", + join_var, + "--string-param", + dummy_str_var, ], wait=True, logs=False, @@ -1968,5 +1964,3 @@ def test_script_processor_with_parameter_string( # Assert sagemaker_session.process.assert_called_with(**expected_args) assert "test_job" in processor._current_job_name - - From de11c9139abfd9d8dd750cac4e880185caf65329 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Sun, 13 Apr 2025 20:59:19 -0700 Subject: [PATCH 12/27] Revert the PR changes: #5122, due to issue https://t.corp.amazon.com/P223568185/overview --- src/sagemaker/processing.py | 10 - .../workflow/test_processing_step.py | 17 +- tests/unit/test_processing.py | 249 +----------------- 3 files changed, 4 insertions(+), 272 deletions(-) diff --git a/src/sagemaker/processing.py b/src/sagemaker/processing.py index 7beef2e5bd..103be47caf 100644 --- a/src/sagemaker/processing.py +++ b/src/sagemaker/processing.py @@ -17,7 +17,6 @@ and interpretation on Amazon SageMaker. """ from __future__ import absolute_import -import json import logging import os import pathlib @@ -314,15 +313,6 @@ def _normalize_args( "code argument has to be a valid S3 URI or local file path " + "rather than a pipeline variable" ) - if arguments is not None: - processed_arguments = [] - for arg in arguments: - if isinstance(arg, PipelineVariable): - processed_value = json.dumps(arg.expr) - processed_arguments.append(processed_value) - else: - processed_arguments.append(str(arg)) - arguments = processed_arguments self._current_job_name = self._generate_current_job_name(job_name=job_name) diff --git a/tests/unit/sagemaker/workflow/test_processing_step.py b/tests/unit/sagemaker/workflow/test_processing_step.py index f94e0791cb..0dcd7c2495 100644 --- a/tests/unit/sagemaker/workflow/test_processing_step.py +++ b/tests/unit/sagemaker/workflow/test_processing_step.py @@ -824,12 +824,7 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): processor, run_inputs = spark_processor processor.sagemaker_session = pipeline_session processor.role = ROLE - arguments_output = [ - "--input", - "input-data-uri", - "--output", - '{"Get": "Parameters.MyArgOutput"}', - ] + run_inputs["inputs"] = processing_input step_args = processor.run(**run_inputs) @@ -840,7 +835,7 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): step_args = get_step_args_helper(step_args, "Processing") - assert step_args["AppSpecification"]["ContainerArguments"] == arguments_output + assert step_args["AppSpecification"]["ContainerArguments"] == run_inputs["arguments"] entry_points = step_args["AppSpecification"]["ContainerEntrypoint"] entry_points_expr = [] @@ -1024,12 +1019,6 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ processor, run_inputs = spark_processor processor.sagemaker_session = pipeline_session processor.role = ROLE - arguments_output = [ - "--input", - "input-data-uri", - "--output", - '{"Get": "Parameters.MyArgOutput"}', - ] run_inputs["inputs"] = processing_input @@ -1041,7 +1030,7 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ step_args = get_step_args_helper(step_args, "Processing") - assert step_args["AppSpecification"]["ContainerArguments"] == arguments_output + assert step_args["AppSpecification"]["ContainerArguments"] == run_inputs["arguments"] entry_points = step_args["AppSpecification"]["ContainerEntrypoint"] entry_points_expr = [] diff --git a/tests/unit/test_processing.py b/tests/unit/test_processing.py index 7b020c61bf..06d2cde02e 100644 --- a/tests/unit/test_processing.py +++ b/tests/unit/test_processing.py @@ -46,9 +46,8 @@ from sagemaker.fw_utils import UploadedCode from sagemaker.workflow.pipeline_context import PipelineSession, _PipelineConfig from sagemaker.workflow.functions import Join -from sagemaker.workflow.execution_variables import ExecutionVariable, ExecutionVariables +from sagemaker.workflow.execution_variables import ExecutionVariables from tests.unit import SAGEMAKER_CONFIG_PROCESSING_JOB -from sagemaker.workflow.parameters import ParameterString BUCKET_NAME = "mybucket" REGION = "us-west-2" @@ -1718,249 +1717,3 @@ def _get_describe_response_inputs_and_ouputs(): "ProcessingInputs": _get_expected_args_all_parameters(None)["inputs"], "ProcessingOutputConfig": _get_expected_args_all_parameters(None)["output_config"], } - - -# Parameters -def _get_data_inputs_with_parameters(): - return [ - ProcessingInput( - source=ParameterString(name="input_data", default_value="s3://dummy-bucket/input"), - destination="/opt/ml/processing/input", - input_name="input-1", - ) - ] - - -def _get_data_outputs_with_parameters(): - return [ - ProcessingOutput( - source="/opt/ml/processing/output", - destination=ParameterString( - name="output_data", default_value="s3://dummy-bucket/output" - ), - output_name="output-1", - ) - ] - - -def _get_expected_args_with_parameters(job_name): - return { - "inputs": [ - { - "InputName": "input-1", - "S3Input": { - "S3Uri": "s3://dummy-bucket/input", - "LocalPath": "/opt/ml/processing/input", - "S3DataType": "S3Prefix", - "S3InputMode": "File", - "S3DataDistributionType": "FullyReplicated", - "S3CompressionType": "None", - }, - } - ], - "output_config": { - "Outputs": [ - { - "OutputName": "output-1", - "S3Output": { - "S3Uri": "s3://dummy-bucket/output", - "LocalPath": "/opt/ml/processing/output", - "S3UploadMode": "EndOfJob", - }, - } - ] - }, - "job_name": job_name, - "resources": { - "ClusterConfig": { - "InstanceType": "ml.m4.xlarge", - "InstanceCount": 1, - "VolumeSizeInGB": 100, - "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", - } - }, - "stopping_condition": {"MaxRuntimeInSeconds": 3600}, - "app_specification": { - "ImageUri": "custom-image-uri", - "ContainerArguments": [ - "--input-data", - "s3://dummy-bucket/input-param", - "--output-path", - "s3://dummy-bucket/output-param", - ], - "ContainerEntrypoint": ["python3"], - }, - "environment": {"my_env_variable": "my_env_variable_value"}, - "network_config": { - "EnableNetworkIsolation": True, - "EnableInterContainerTrafficEncryption": True, - "VpcConfig": { - "Subnets": ["my_subnet_id"], - "SecurityGroupIds": ["my_security_group_id"], - }, - }, - "role_arn": "dummy/role", - "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], - "experiment_config": {"ExperimentName": "AnExperiment"}, - } - - -@patch("os.path.exists", return_value=True) -@patch("os.path.isfile", return_value=True) -@patch("sagemaker.utils.repack_model") -@patch("sagemaker.utils.create_tar_file") -@patch("sagemaker.session.Session.upload_data") -def test_script_processor_with_parameter_string( - upload_data_mock, - create_tar_file_mock, - repack_model_mock, - exists_mock, - isfile_mock, - sagemaker_session, -): - """Test ScriptProcessor with ParameterString arguments""" - upload_data_mock.return_value = "s3://mocked_s3_uri_from_upload_data" - - # Setup processor - processor = ScriptProcessor( - role="arn:aws:iam::012345678901:role/SageMakerRole", # Updated role ARN - image_uri="custom-image-uri", - command=["python3"], - instance_type="ml.m4.xlarge", - instance_count=1, - volume_size_in_gb=100, - volume_kms_key="arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", - output_kms_key="arn:aws:kms:us-west-2:012345678901:key/output-kms-key", - max_runtime_in_seconds=3600, - base_job_name="test_processor", - env={"my_env_variable": "my_env_variable_value"}, - tags=[{"Key": "my-tag", "Value": "my-tag-value"}], - network_config=NetworkConfig( - subnets=["my_subnet_id"], - security_group_ids=["my_security_group_id"], - enable_network_isolation=True, - encrypt_inter_container_traffic=True, - ), - sagemaker_session=sagemaker_session, - ) - - input_param = ParameterString(name="input_param", default_value="s3://dummy-bucket/input-param") - output_param = ParameterString( - name="output_param", default_value="s3://dummy-bucket/output-param" - ) - exec_var = ExecutionVariable(name="ExecutionTest") - join_var = Join(on="/", values=["s3://bucket", "prefix", "file.txt"]) - dummy_str_var = "test-variable" - - # Define expected arguments - expected_args = { - "inputs": [ - { - "InputName": "input-1", - "AppManaged": False, - "S3Input": { - "S3Uri": ParameterString( - name="input_data", default_value="s3://dummy-bucket/input" - ), - "LocalPath": "/opt/ml/processing/input", - "S3DataType": "S3Prefix", - "S3InputMode": "File", - "S3DataDistributionType": "FullyReplicated", - "S3CompressionType": "None", - }, - }, - { - "InputName": "code", - "AppManaged": False, - "S3Input": { - "S3Uri": "s3://mocked_s3_uri_from_upload_data", - "LocalPath": "/opt/ml/processing/input/code", - "S3DataType": "S3Prefix", - "S3InputMode": "File", - "S3DataDistributionType": "FullyReplicated", - "S3CompressionType": "None", - }, - }, - ], - "output_config": { - "Outputs": [ - { - "OutputName": "output-1", - "AppManaged": False, - "S3Output": { - "S3Uri": ParameterString( - name="output_data", default_value="s3://dummy-bucket/output" - ), - "LocalPath": "/opt/ml/processing/output", - "S3UploadMode": "EndOfJob", - }, - } - ], - "KmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/output-kms-key", - }, - "job_name": "test_job", - "resources": { - "ClusterConfig": { - "InstanceType": "ml.m4.xlarge", - "InstanceCount": 1, - "VolumeSizeInGB": 100, - "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", - } - }, - "stopping_condition": {"MaxRuntimeInSeconds": 3600}, - "app_specification": { - "ImageUri": "custom-image-uri", - "ContainerArguments": [ - "--input-data", - '{"Get": "Parameters.input_param"}', - "--output-path", - '{"Get": "Parameters.output_param"}', - "--exec-arg", - '{"Get": "Execution.ExecutionTest"}', - "--join-arg", - '{"Std:Join": {"On": "/", "Values": ["s3://bucket", "prefix", "file.txt"]}}', - "--string-param", - "test-variable", - ], - "ContainerEntrypoint": ["python3", "/opt/ml/processing/input/code/processing_code.py"], - }, - "environment": {"my_env_variable": "my_env_variable_value"}, - "network_config": { - "EnableNetworkIsolation": True, - "EnableInterContainerTrafficEncryption": True, - "VpcConfig": { - "SecurityGroupIds": ["my_security_group_id"], - "Subnets": ["my_subnet_id"], - }, - }, - "role_arn": "arn:aws:iam::012345678901:role/SageMakerRole", - "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], - "experiment_config": {"ExperimentName": "AnExperiment"}, - } - - # Run processor - processor.run( - code="/local/path/to/processing_code.py", - inputs=_get_data_inputs_with_parameters(), - outputs=_get_data_outputs_with_parameters(), - arguments=[ - "--input-data", - input_param, - "--output-path", - output_param, - "--exec-arg", - exec_var, - "--join-arg", - join_var, - "--string-param", - dummy_str_var, - ], - wait=True, - logs=False, - job_name="test_job", - experiment_config={"ExperimentName": "AnExperiment"}, - ) - - # Assert - sagemaker_session.process.assert_called_with(**expected_args) - assert "test_job" in processor._current_job_name From 7acccdb8578b608623a5840c6b233f0f2dcc074e Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Mon, 14 Apr 2025 12:05:11 -0700 Subject: [PATCH 13/27] Fix: fix the issue, https://t.corp.amazon.com/P223568185/communication --- src/sagemaker/processing.py | 11 + .../workflow/test_processing_step.py | 18 +- tests/unit/test_processing.py | 249 +++++++++++++++++- 3 files changed, 275 insertions(+), 3 deletions(-) diff --git a/src/sagemaker/processing.py b/src/sagemaker/processing.py index 103be47caf..eda4ffc01e 100644 --- a/src/sagemaker/processing.py +++ b/src/sagemaker/processing.py @@ -17,6 +17,7 @@ and interpretation on Amazon SageMaker. """ from __future__ import absolute_import +import json import logging import os import pathlib @@ -314,6 +315,16 @@ def _normalize_args( + "rather than a pipeline variable" ) + if arguments is not None: + processed_arguments = [] + for arg in arguments: + if isinstance(arg, PipelineVariable): + processed_value = json.dumps(arg.expr) + processed_arguments.append(processed_value) + else: + processed_arguments.append(arg) + arguments = processed_arguments + self._current_job_name = self._generate_current_job_name(job_name=job_name) inputs_with_code = self._include_code_in_inputs(inputs, code, kms_key) diff --git a/tests/unit/sagemaker/workflow/test_processing_step.py b/tests/unit/sagemaker/workflow/test_processing_step.py index 0dcd7c2495..9ee8242a45 100644 --- a/tests/unit/sagemaker/workflow/test_processing_step.py +++ b/tests/unit/sagemaker/workflow/test_processing_step.py @@ -825,6 +825,13 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): processor.sagemaker_session = pipeline_session processor.role = ROLE + arguments_output = [ + "--input", + "input-data-uri", + "--output", + '{"Get": "Parameters.MyArgOutput"}', + ] + run_inputs["inputs"] = processing_input step_args = processor.run(**run_inputs) @@ -835,7 +842,7 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): step_args = get_step_args_helper(step_args, "Processing") - assert step_args["AppSpecification"]["ContainerArguments"] == run_inputs["arguments"] + assert step_args["AppSpecification"]["ContainerArguments"] == arguments_output entry_points = step_args["AppSpecification"]["ContainerEntrypoint"] entry_points_expr = [] @@ -1020,6 +1027,13 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ processor.sagemaker_session = pipeline_session processor.role = ROLE + arguments_output = [ + "--input", + "input-data-uri", + "--output", + '{"Get": "Parameters.MyArgOutput"}', + ] + run_inputs["inputs"] = processing_input step_args = processor.run(**run_inputs) @@ -1030,7 +1044,7 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ step_args = get_step_args_helper(step_args, "Processing") - assert step_args["AppSpecification"]["ContainerArguments"] == run_inputs["arguments"] + assert step_args["AppSpecification"]["ContainerArguments"] == arguments_output entry_points = step_args["AppSpecification"]["ContainerEntrypoint"] entry_points_expr = [] diff --git a/tests/unit/test_processing.py b/tests/unit/test_processing.py index 06d2cde02e..7b020c61bf 100644 --- a/tests/unit/test_processing.py +++ b/tests/unit/test_processing.py @@ -46,8 +46,9 @@ from sagemaker.fw_utils import UploadedCode from sagemaker.workflow.pipeline_context import PipelineSession, _PipelineConfig from sagemaker.workflow.functions import Join -from sagemaker.workflow.execution_variables import ExecutionVariables +from sagemaker.workflow.execution_variables import ExecutionVariable, ExecutionVariables from tests.unit import SAGEMAKER_CONFIG_PROCESSING_JOB +from sagemaker.workflow.parameters import ParameterString BUCKET_NAME = "mybucket" REGION = "us-west-2" @@ -1717,3 +1718,249 @@ def _get_describe_response_inputs_and_ouputs(): "ProcessingInputs": _get_expected_args_all_parameters(None)["inputs"], "ProcessingOutputConfig": _get_expected_args_all_parameters(None)["output_config"], } + + +# Parameters +def _get_data_inputs_with_parameters(): + return [ + ProcessingInput( + source=ParameterString(name="input_data", default_value="s3://dummy-bucket/input"), + destination="/opt/ml/processing/input", + input_name="input-1", + ) + ] + + +def _get_data_outputs_with_parameters(): + return [ + ProcessingOutput( + source="/opt/ml/processing/output", + destination=ParameterString( + name="output_data", default_value="s3://dummy-bucket/output" + ), + output_name="output-1", + ) + ] + + +def _get_expected_args_with_parameters(job_name): + return { + "inputs": [ + { + "InputName": "input-1", + "S3Input": { + "S3Uri": "s3://dummy-bucket/input", + "LocalPath": "/opt/ml/processing/input", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None", + }, + } + ], + "output_config": { + "Outputs": [ + { + "OutputName": "output-1", + "S3Output": { + "S3Uri": "s3://dummy-bucket/output", + "LocalPath": "/opt/ml/processing/output", + "S3UploadMode": "EndOfJob", + }, + } + ] + }, + "job_name": job_name, + "resources": { + "ClusterConfig": { + "InstanceType": "ml.m4.xlarge", + "InstanceCount": 1, + "VolumeSizeInGB": 100, + "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", + } + }, + "stopping_condition": {"MaxRuntimeInSeconds": 3600}, + "app_specification": { + "ImageUri": "custom-image-uri", + "ContainerArguments": [ + "--input-data", + "s3://dummy-bucket/input-param", + "--output-path", + "s3://dummy-bucket/output-param", + ], + "ContainerEntrypoint": ["python3"], + }, + "environment": {"my_env_variable": "my_env_variable_value"}, + "network_config": { + "EnableNetworkIsolation": True, + "EnableInterContainerTrafficEncryption": True, + "VpcConfig": { + "Subnets": ["my_subnet_id"], + "SecurityGroupIds": ["my_security_group_id"], + }, + }, + "role_arn": "dummy/role", + "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], + "experiment_config": {"ExperimentName": "AnExperiment"}, + } + + +@patch("os.path.exists", return_value=True) +@patch("os.path.isfile", return_value=True) +@patch("sagemaker.utils.repack_model") +@patch("sagemaker.utils.create_tar_file") +@patch("sagemaker.session.Session.upload_data") +def test_script_processor_with_parameter_string( + upload_data_mock, + create_tar_file_mock, + repack_model_mock, + exists_mock, + isfile_mock, + sagemaker_session, +): + """Test ScriptProcessor with ParameterString arguments""" + upload_data_mock.return_value = "s3://mocked_s3_uri_from_upload_data" + + # Setup processor + processor = ScriptProcessor( + role="arn:aws:iam::012345678901:role/SageMakerRole", # Updated role ARN + image_uri="custom-image-uri", + command=["python3"], + instance_type="ml.m4.xlarge", + instance_count=1, + volume_size_in_gb=100, + volume_kms_key="arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", + output_kms_key="arn:aws:kms:us-west-2:012345678901:key/output-kms-key", + max_runtime_in_seconds=3600, + base_job_name="test_processor", + env={"my_env_variable": "my_env_variable_value"}, + tags=[{"Key": "my-tag", "Value": "my-tag-value"}], + network_config=NetworkConfig( + subnets=["my_subnet_id"], + security_group_ids=["my_security_group_id"], + enable_network_isolation=True, + encrypt_inter_container_traffic=True, + ), + sagemaker_session=sagemaker_session, + ) + + input_param = ParameterString(name="input_param", default_value="s3://dummy-bucket/input-param") + output_param = ParameterString( + name="output_param", default_value="s3://dummy-bucket/output-param" + ) + exec_var = ExecutionVariable(name="ExecutionTest") + join_var = Join(on="/", values=["s3://bucket", "prefix", "file.txt"]) + dummy_str_var = "test-variable" + + # Define expected arguments + expected_args = { + "inputs": [ + { + "InputName": "input-1", + "AppManaged": False, + "S3Input": { + "S3Uri": ParameterString( + name="input_data", default_value="s3://dummy-bucket/input" + ), + "LocalPath": "/opt/ml/processing/input", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None", + }, + }, + { + "InputName": "code", + "AppManaged": False, + "S3Input": { + "S3Uri": "s3://mocked_s3_uri_from_upload_data", + "LocalPath": "/opt/ml/processing/input/code", + "S3DataType": "S3Prefix", + "S3InputMode": "File", + "S3DataDistributionType": "FullyReplicated", + "S3CompressionType": "None", + }, + }, + ], + "output_config": { + "Outputs": [ + { + "OutputName": "output-1", + "AppManaged": False, + "S3Output": { + "S3Uri": ParameterString( + name="output_data", default_value="s3://dummy-bucket/output" + ), + "LocalPath": "/opt/ml/processing/output", + "S3UploadMode": "EndOfJob", + }, + } + ], + "KmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/output-kms-key", + }, + "job_name": "test_job", + "resources": { + "ClusterConfig": { + "InstanceType": "ml.m4.xlarge", + "InstanceCount": 1, + "VolumeSizeInGB": 100, + "VolumeKmsKeyId": "arn:aws:kms:us-west-2:012345678901:key/volume-kms-key", + } + }, + "stopping_condition": {"MaxRuntimeInSeconds": 3600}, + "app_specification": { + "ImageUri": "custom-image-uri", + "ContainerArguments": [ + "--input-data", + '{"Get": "Parameters.input_param"}', + "--output-path", + '{"Get": "Parameters.output_param"}', + "--exec-arg", + '{"Get": "Execution.ExecutionTest"}', + "--join-arg", + '{"Std:Join": {"On": "/", "Values": ["s3://bucket", "prefix", "file.txt"]}}', + "--string-param", + "test-variable", + ], + "ContainerEntrypoint": ["python3", "/opt/ml/processing/input/code/processing_code.py"], + }, + "environment": {"my_env_variable": "my_env_variable_value"}, + "network_config": { + "EnableNetworkIsolation": True, + "EnableInterContainerTrafficEncryption": True, + "VpcConfig": { + "SecurityGroupIds": ["my_security_group_id"], + "Subnets": ["my_subnet_id"], + }, + }, + "role_arn": "arn:aws:iam::012345678901:role/SageMakerRole", + "tags": [{"Key": "my-tag", "Value": "my-tag-value"}], + "experiment_config": {"ExperimentName": "AnExperiment"}, + } + + # Run processor + processor.run( + code="/local/path/to/processing_code.py", + inputs=_get_data_inputs_with_parameters(), + outputs=_get_data_outputs_with_parameters(), + arguments=[ + "--input-data", + input_param, + "--output-path", + output_param, + "--exec-arg", + exec_var, + "--join-arg", + join_var, + "--string-param", + dummy_str_var, + ], + wait=True, + logs=False, + job_name="test_job", + experiment_config={"ExperimentName": "AnExperiment"}, + ) + + # Assert + sagemaker_session.process.assert_called_with(**expected_args) + assert "test_job" in processor._current_job_name From 797066f6347bf90cdcdbf35a34a477d8a25d46c3 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 16 Apr 2025 11:49:34 -0700 Subject: [PATCH 14/27] Added numpy 2.0 support --- pyproject.toml | 4 ++-- requirements/extras/test_requirements.txt | 4 ++-- src/sagemaker/serve/utils/conda_in_process.yml | 4 ++-- src/sagemaker/serve/utils/in_process_requirements.txt | 2 +- tests/data/serve_resources/mlflow/pytorch/conda.yaml | 4 ++-- .../serve_resources/mlflow/pytorch/requirements.txt | 2 +- .../data/serve_resources/mlflow/tensorflow/conda.yaml | 2 +- .../serve_resources/mlflow/tensorflow/requirements.txt | 2 +- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 2 +- .../serve_resources/mlflow/xgboost/requirements.txt | 4 ++-- tests/unit/sagemaker/jumpstart/constants.py | 10 +++++----- .../serve/detector/test_dependency_manager.py | 8 ++++---- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0122a6bf3c..148ff565a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,10 +39,10 @@ dependencies = [ "google-pasta", "importlib-metadata>=1.4.0,<7.0", "jsonschema", - "numpy>=1.9.0,<2.0", + "numpy==2.0", "omegaconf>=2.2,<=2.3", "packaging>=20.0", - "pandas", + "pandas==2.2.3", "pathos", "platformdirs", "protobuf>=3.12,<6.0", diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index de960e4619..2f4ac77d16 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -1,5 +1,5 @@ tox==3.24.5 -numpy>=1.24.0 +numpy==2.0 build[virtualenv]==1.2.1 flake8==4.0.1 pytest==6.2.5 @@ -22,7 +22,7 @@ requests==2.32.2 sagemaker-experiments==0.1.35 Jinja2==3.1.6 pyvis==0.2.1 -pandas==1.4.4 +pandas==2.2.3 scikit-learn==1.3.0 cloudpickle==2.2.1 jsonpickle<4.0.0 diff --git a/src/sagemaker/serve/utils/conda_in_process.yml b/src/sagemaker/serve/utils/conda_in_process.yml index 61badaa52f..d5e6dae195 100644 --- a/src/sagemaker/serve/utils/conda_in_process.yml +++ b/src/sagemaker/serve/utils/conda_in_process.yml @@ -12,12 +12,12 @@ dependencies: - boto3>=1.34.142,<2.0 - cloudpickle==2.2.1 - google-pasta - - numpy>=1.9.0,<2.0 + - numpy==2.0 - protobuf>=3.12,<5.0 - smdebug_rulesconfig==1.0.1 - importlib-metadata>=1.4.0,<7.0 - packaging>=20.0 - - pandas + - pandas==2.2.3 - pathos - schema - PyYAML~=6.0 diff --git a/src/sagemaker/serve/utils/in_process_requirements.txt b/src/sagemaker/serve/utils/in_process_requirements.txt index e356e1720d..a40738202e 100644 --- a/src/sagemaker/serve/utils/in_process_requirements.txt +++ b/src/sagemaker/serve/utils/in_process_requirements.txt @@ -32,7 +32,7 @@ mpmath>=1.3.0 multiprocess>=0.70.14 networkx>=3.1 packaging>=23.1 -pandas>=1.5.3 +pandas==2.2.3 pathos>=0.3.0 pillow>=9.5.0 platformdirs>=3.2.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/conda.yaml b/tests/data/serve_resources/mlflow/pytorch/conda.yaml index beecdbab08..05ca04f1d4 100644 --- a/tests/data/serve_resources/mlflow/pytorch/conda.yaml +++ b/tests/data/serve_resources/mlflow/pytorch/conda.yaml @@ -11,10 +11,10 @@ dependencies: - defusedxml==0.7.1 - dill==0.3.8 - gmpy2==2.1.2 - - numpy==1.26.4 + - numpy==2.0 - opt-einsum==3.3.0 - packaging==24.0 - - pandas==2.2.1 + - pandas==2.2.3 - pyyaml==6.0.1 - requests==2.31.0 - torch>=2.6.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index 450bcbfada..4493234a2a 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -5,7 +5,7 @@ cloudpickle==2.2.1 defusedxml==0.7.1 dill==0.3.8 gmpy2==2.1.2 -numpy==1.24.4 +numpy==2.0 opt-einsum==3.3.0 packaging==21.3 pandas==2.2.1 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 90d8c300a0..4e8e060321 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -6,6 +6,6 @@ dependencies: - pip: - mlflow==2.11.1 - cloudpickle==2.2.1 - - numpy==1.26.4 + - numpy==2.0 - tensorflow==2.16.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index ff99d3b92e..d633a8e813 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ mlflow==2.13.2 cloudpickle==2.2.1 -numpy==1.26.4 +numpy==2.0 tensorflow==2.16.1 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 44ca3c4c2e..92ed4106f5 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -6,7 +6,7 @@ dependencies: - pip: - mlflow==2.11.1 - lz4==4.3.2 - - numpy==1.26.4 + - numpy==2.0 - pandas==2.2.1 - psutil==5.9.8 - scikit-learn==1.3.2 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index 1130dcaec5..e70ddeca62 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,7 +1,7 @@ mlflow==2.13.2 lz4==4.3.2 -numpy==1.24.4 -pandas==2.0.3 +numpy==2.0 +pandas==2.2.3 psutil==5.9.8 scikit-learn==1.3.2 scipy==1.10.1 diff --git a/tests/unit/sagemaker/jumpstart/constants.py b/tests/unit/sagemaker/jumpstart/constants.py index 2eb7469e21..85a0922a40 100644 --- a/tests/unit/sagemaker/jumpstart/constants.py +++ b/tests/unit/sagemaker/jumpstart/constants.py @@ -12095,7 +12095,7 @@ "inference_vulnerabilities": [], "training_vulnerable": False, "training_dependencies": [ - "numpy==1.23.1", + "numpy==2.0", "opencv_python==4.7.0.68", "sagemaker_jumpstart_prepack_script_utilities==1.0.0", ], @@ -14360,10 +14360,10 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==1.24.3", + "numpy==2.0", "oscrypto==1.3.0", "packaging==23.1", - "pandas==2.0.2", + "pandas==2.2.3", "pathos==0.3.0", "pkgutil-resolve-name==1.3.10", "platformdirs==3.8.0", @@ -14884,10 +14884,10 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==1.24.3", + "numpy==2.0", "oscrypto==1.3.0", "packaging==23.1", - "pandas==2.0.2", + "pandas==2.2.3", "pathos==0.3.0", "pkgutil-resolve-name==1.3.10", "platformdirs==3.8.0", diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index 491968dd25..442613ebb3 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -21,8 +21,8 @@ DEPENDENCY_LIST = [ "requests==2.26.0", - "numpy>=1.20.0", - "pandas<=1.3.3", + "numpy==2.0", + "pandas==2.2.3", "matplotlib<3.5.0", "scikit-learn>0.24.1", "Django!=4.0.0", @@ -34,8 +34,8 @@ EXPECTED_DEPENDENCY_MAP = { "requests": "==2.26.0", - "numpy": ">=1.20.0", - "pandas": "<=1.3.3", + "numpy": "==2.0", + "pandas": "==2.2.3", "matplotlib": "<3.5.0", "scikit-learn": ">0.24.1", "Django": "!=4.0.0", From ca2191990347693a82e30aa58d97ae650dcaff6e Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 16 Apr 2025 12:29:14 -0700 Subject: [PATCH 15/27] Added numpy 2.0 support --- pyproject.toml | 2 +- requirements/extras/test_requirements.txt | 2 +- src/sagemaker/serve/utils/conda_in_process.yml | 4 ++-- tests/data/serve_resources/mlflow/pytorch/requirements.txt | 2 +- tests/data/serve_resources/mlflow/tensorflow/conda.yaml | 2 +- tests/data/serve_resources/mlflow/tensorflow/requirements.txt | 2 +- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 4 ++-- tests/data/serve_resources/mlflow/xgboost/requirements.txt | 2 +- tests/unit/sagemaker/jumpstart/constants.py | 2 +- .../unit/sagemaker/serve/detector/test_dependency_manager.py | 4 ++-- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 148ff565a2..2a22dd326e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ "google-pasta", "importlib-metadata>=1.4.0,<7.0", "jsonschema", - "numpy==2.0", + "numpy==2.0.0", "omegaconf>=2.2,<=2.3", "packaging>=20.0", "pandas==2.2.3", diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index a4b8fd00d1..253c76a259 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -1,5 +1,5 @@ tox==3.24.5 -numpy==2.0 +numpy==2.0.0 build[virtualenv]==1.2.1 flake8==4.0.1 pytest==6.2.5 diff --git a/src/sagemaker/serve/utils/conda_in_process.yml b/src/sagemaker/serve/utils/conda_in_process.yml index d5e6dae195..3e7bcbeda5 100644 --- a/src/sagemaker/serve/utils/conda_in_process.yml +++ b/src/sagemaker/serve/utils/conda_in_process.yml @@ -12,7 +12,7 @@ dependencies: - boto3>=1.34.142,<2.0 - cloudpickle==2.2.1 - google-pasta - - numpy==2.0 + - numpy==2.0.0 - protobuf>=3.12,<5.0 - smdebug_rulesconfig==1.0.1 - importlib-metadata>=1.4.0,<7.0 @@ -64,7 +64,7 @@ dependencies: - multiprocess>=0.70.14 - networkx>=3.1 - packaging>=23.1 - - pandas>=1.5.3 + - pandas==2.2.3 - pathos>=0.3.0 - pillow>=9.5.0 - platformdirs>=3.2.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index 4493234a2a..f2f8061b54 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -5,7 +5,7 @@ cloudpickle==2.2.1 defusedxml==0.7.1 dill==0.3.8 gmpy2==2.1.2 -numpy==2.0 +numpy==2.0.0 opt-einsum==3.3.0 packaging==21.3 pandas==2.2.1 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 4e8e060321..95c9f977fa 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -6,6 +6,6 @@ dependencies: - pip: - mlflow==2.11.1 - cloudpickle==2.2.1 - - numpy==2.0 + - numpy==2.0.0 - tensorflow==2.16.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index d633a8e813..f20ba060e5 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ mlflow==2.13.2 cloudpickle==2.2.1 -numpy==2.0 +numpy==2.0.0 tensorflow==2.16.1 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 92ed4106f5..51c3791209 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -6,8 +6,8 @@ dependencies: - pip: - mlflow==2.11.1 - lz4==4.3.2 - - numpy==2.0 - - pandas==2.2.1 + - numpy==2.0.0 + - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.3.2 - scipy==1.11.3 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index e70ddeca62..5addf1c8f9 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,6 +1,6 @@ mlflow==2.13.2 lz4==4.3.2 -numpy==2.0 +numpy==2.0.0 pandas==2.2.3 psutil==5.9.8 scikit-learn==1.3.2 diff --git a/tests/unit/sagemaker/jumpstart/constants.py b/tests/unit/sagemaker/jumpstart/constants.py index 85a0922a40..a0c9421172 100644 --- a/tests/unit/sagemaker/jumpstart/constants.py +++ b/tests/unit/sagemaker/jumpstart/constants.py @@ -14884,7 +14884,7 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==2.0", + "numpy==2.0.0", "oscrypto==1.3.0", "packaging==23.1", "pandas==2.2.3", diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index 442613ebb3..06ab587d93 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -21,7 +21,7 @@ DEPENDENCY_LIST = [ "requests==2.26.0", - "numpy==2.0", + "numpy==2.0.0", "pandas==2.2.3", "matplotlib<3.5.0", "scikit-learn>0.24.1", @@ -34,7 +34,7 @@ EXPECTED_DEPENDENCY_MAP = { "requests": "==2.26.0", - "numpy": "==2.0", + "numpy": "==2.0.0", "pandas": "==2.2.3", "matplotlib": "<3.5.0", "scikit-learn": ">0.24.1", From a7d9c3e44c876cecd3b6d548efd4e76d40876c0a Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 16 Apr 2025 12:45:08 -0700 Subject: [PATCH 16/27] Added numpy 2.0 support --- pyproject.toml | 2 +- requirements/extras/test_requirements.txt | 2 +- src/sagemaker/serve/utils/conda_in_process.yml | 2 +- tests/data/serve_resources/mlflow/pytorch/requirements.txt | 2 +- tests/data/serve_resources/mlflow/tensorflow/conda.yaml | 2 +- tests/data/serve_resources/mlflow/tensorflow/requirements.txt | 2 +- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 2 +- tests/data/serve_resources/mlflow/xgboost/requirements.txt | 2 +- tests/unit/sagemaker/jumpstart/constants.py | 2 +- tests/unit/sagemaker/serve/detector/test_dependency_manager.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2a22dd326e..b0cf468059 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ "google-pasta", "importlib-metadata>=1.4.0,<7.0", "jsonschema", - "numpy==2.0.0", + "numpy>=2.0.0", "omegaconf>=2.2,<=2.3", "packaging>=20.0", "pandas==2.2.3", diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index 253c76a259..1051382663 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -1,5 +1,5 @@ tox==3.24.5 -numpy==2.0.0 +numpy>=2.0.0 build[virtualenv]==1.2.1 flake8==4.0.1 pytest==6.2.5 diff --git a/src/sagemaker/serve/utils/conda_in_process.yml b/src/sagemaker/serve/utils/conda_in_process.yml index 3e7bcbeda5..59d51f68a0 100644 --- a/src/sagemaker/serve/utils/conda_in_process.yml +++ b/src/sagemaker/serve/utils/conda_in_process.yml @@ -12,7 +12,7 @@ dependencies: - boto3>=1.34.142,<2.0 - cloudpickle==2.2.1 - google-pasta - - numpy==2.0.0 + - numpy>=2.0.0 - protobuf>=3.12,<5.0 - smdebug_rulesconfig==1.0.1 - importlib-metadata>=1.4.0,<7.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index f2f8061b54..31d37cc517 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -5,7 +5,7 @@ cloudpickle==2.2.1 defusedxml==0.7.1 dill==0.3.8 gmpy2==2.1.2 -numpy==2.0.0 +numpy>=2.0.0 opt-einsum==3.3.0 packaging==21.3 pandas==2.2.1 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 95c9f977fa..49c8965ac4 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -6,6 +6,6 @@ dependencies: - pip: - mlflow==2.11.1 - cloudpickle==2.2.1 - - numpy==2.0.0 + - numpy>=2.0.0 - tensorflow==2.16.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index f20ba060e5..c8ff680c08 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ mlflow==2.13.2 cloudpickle==2.2.1 -numpy==2.0.0 +numpy>=2.0.0 tensorflow==2.16.1 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 51c3791209..3d2448a31a 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -6,7 +6,7 @@ dependencies: - pip: - mlflow==2.11.1 - lz4==4.3.2 - - numpy==2.0.0 + - numpy>=2.0.0 - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.3.2 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index 5addf1c8f9..fcaa3cf9a1 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,6 +1,6 @@ mlflow==2.13.2 lz4==4.3.2 -numpy==2.0.0 +numpy>=2.0.0 pandas==2.2.3 psutil==5.9.8 scikit-learn==1.3.2 diff --git a/tests/unit/sagemaker/jumpstart/constants.py b/tests/unit/sagemaker/jumpstart/constants.py index a0c9421172..5204d2985c 100644 --- a/tests/unit/sagemaker/jumpstart/constants.py +++ b/tests/unit/sagemaker/jumpstart/constants.py @@ -14884,7 +14884,7 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==2.0.0", + "numpy>=2.0.0", "oscrypto==1.3.0", "packaging==23.1", "pandas==2.2.3", diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index 06ab587d93..b73cfcc489 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -21,7 +21,7 @@ DEPENDENCY_LIST = [ "requests==2.26.0", - "numpy==2.0.0", + "numpy>=2.0.0", "pandas==2.2.3", "matplotlib<3.5.0", "scikit-learn>0.24.1", From 87d190d2883a83ead8f01a7db1828ef973a7a56f Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Wed, 16 Apr 2025 14:57:29 -0700 Subject: [PATCH 17/27] resolve conflict dependency with numpy 2.0 --- requirements/extras/scipy_requirements.txt | 2 +- requirements/extras/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/extras/scipy_requirements.txt b/requirements/extras/scipy_requirements.txt index 0e99587e6e..4e8e3a6e65 100644 --- a/requirements/extras/scipy_requirements.txt +++ b/requirements/extras/scipy_requirements.txt @@ -1 +1 @@ -scipy==1.10.1 +scipy==1.15.2 diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index 1051382663..b9893365f1 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -23,7 +23,7 @@ sagemaker-experiments==0.1.35 Jinja2==3.1.6 pyvis==0.2.1 pandas==2.2.3 -scikit-learn==1.3.0 +scikit-learn==1.6.1 cloudpickle==2.2.1 jsonpickle<4.0.0 PyYAML==6.0 From 96a34bd13a3108cdc72836fb402a68562d2f32ec Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 10:39:06 -0700 Subject: [PATCH 18/27] resolve conflict dependency with numpy 2.0 --- requirements/extras/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index b9893365f1..2a8d5890e9 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -43,7 +43,7 @@ nbformat>=5.9,<6 accelerate>=0.24.1,<=0.27.0 schema==0.7.5 tensorflow>=2.9.0,<=2.15.1 -mlflow>=2.12.2,<2.13 +mlflow>=2.16.1 huggingface_hub==0.26.2 uvicorn>=0.30.1 fastapi==0.115.4 From 2eb87bc0fcc85dc15352a64a0d8df347fe69f5d6 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 12:01:26 -0700 Subject: [PATCH 19/27] resolve conflict dependency with numpy 2.0 --- pyproject.toml | 2 +- requirements/extras/scipy_requirements.txt | 2 +- requirements/extras/test_requirements.txt | 4 ++-- src/sagemaker/serve/utils/conda_in_process.yml | 2 +- tests/data/serve_resources/mlflow/pytorch/conda.yaml | 2 +- tests/data/serve_resources/mlflow/pytorch/requirements.txt | 4 ++-- tests/data/serve_resources/mlflow/tensorflow/conda.yaml | 4 ++-- tests/data/serve_resources/mlflow/tensorflow/requirements.txt | 4 ++-- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 4 ++-- tests/data/serve_resources/mlflow/xgboost/requirements.txt | 4 ++-- tests/unit/sagemaker/jumpstart/constants.py | 2 +- .../unit/sagemaker/serve/detector/test_dependency_manager.py | 2 +- 12 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b0cf468059..d5c6229f37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ "google-pasta", "importlib-metadata>=1.4.0,<7.0", "jsonschema", - "numpy>=2.0.0", + "numpy>=2.0.0,<2.3.0", "omegaconf>=2.2,<=2.3", "packaging>=20.0", "pandas==2.2.3", diff --git a/requirements/extras/scipy_requirements.txt b/requirements/extras/scipy_requirements.txt index 4e8e3a6e65..f89caf8c2b 100644 --- a/requirements/extras/scipy_requirements.txt +++ b/requirements/extras/scipy_requirements.txt @@ -1 +1 @@ -scipy==1.15.2 +scipy==1.13.0 diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index 2a8d5890e9..3c0ab9e6b2 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -1,5 +1,5 @@ tox==3.24.5 -numpy>=2.0.0 +numpy>=2.0.0, <2.3.0 build[virtualenv]==1.2.1 flake8==4.0.1 pytest==6.2.5 @@ -23,7 +23,7 @@ sagemaker-experiments==0.1.35 Jinja2==3.1.6 pyvis==0.2.1 pandas==2.2.3 -scikit-learn==1.6.1 +scikit-learn==1.4.0 cloudpickle==2.2.1 jsonpickle<4.0.0 PyYAML==6.0 diff --git a/src/sagemaker/serve/utils/conda_in_process.yml b/src/sagemaker/serve/utils/conda_in_process.yml index 59d51f68a0..9a5bad65c1 100644 --- a/src/sagemaker/serve/utils/conda_in_process.yml +++ b/src/sagemaker/serve/utils/conda_in_process.yml @@ -12,7 +12,7 @@ dependencies: - boto3>=1.34.142,<2.0 - cloudpickle==2.2.1 - google-pasta - - numpy>=2.0.0 + - numpy>=2.0.0,<2.3.0 - protobuf>=3.12,<5.0 - smdebug_rulesconfig==1.0.1 - importlib-metadata>=1.4.0,<7.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/conda.yaml b/tests/data/serve_resources/mlflow/pytorch/conda.yaml index 05ca04f1d4..137e3f694b 100644 --- a/tests/data/serve_resources/mlflow/pytorch/conda.yaml +++ b/tests/data/serve_resources/mlflow/pytorch/conda.yaml @@ -4,7 +4,7 @@ dependencies: - python=3.10.13 - pip<=23.3.1 - pip: - - mlflow==2.10.2 + - mlflow==2.16.1 - astunparse==1.6.3 - cffi==1.16.0 - cloudpickle==2.2.1 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index 31d37cc517..969cef50f3 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -1,11 +1,11 @@ -mlflow==2.13.2 +mlflow==2.16.1 astunparse==1.6.3 cffi==1.16.0 cloudpickle==2.2.1 defusedxml==0.7.1 dill==0.3.8 gmpy2==2.1.2 -numpy>=2.0.0 +numpy>=2.0.0,<2.3.0 opt-einsum==3.3.0 packaging==21.3 pandas==2.2.1 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 49c8965ac4..4d2a50079c 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -4,8 +4,8 @@ dependencies: - python=3.10.13 - pip<=23.3.1 - pip: - - mlflow==2.11.1 + - mlflow==2.16.1 - cloudpickle==2.2.1 - - numpy>=2.0.0 + - numpy>=2.0.0,<2.3.0 - tensorflow==2.16.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index c8ff680c08..12573c8994 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ -mlflow==2.13.2 +mlflow==2.16.1 cloudpickle==2.2.1 -numpy>=2.0.0 +numpy>=2.0.0,<2.3.0 tensorflow==2.16.1 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 3d2448a31a..7220d5cc39 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -4,9 +4,9 @@ dependencies: - python=3.10.13 - pip<=23.3.1 - pip: - - mlflow==2.11.1 + - mlflow==2.16.1 - lz4==4.3.2 - - numpy>=2.0.0 + - numpy>=2.0.0,<2.3.0 - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.3.2 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index fcaa3cf9a1..f2ce3e3350 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,6 +1,6 @@ -mlflow==2.13.2 +mlflow==2.16.1 lz4==4.3.2 -numpy>=2.0.0 +numpy>=2.0.0,<2.3.0 pandas==2.2.3 psutil==5.9.8 scikit-learn==1.3.2 diff --git a/tests/unit/sagemaker/jumpstart/constants.py b/tests/unit/sagemaker/jumpstart/constants.py index 5204d2985c..25cee7288e 100644 --- a/tests/unit/sagemaker/jumpstart/constants.py +++ b/tests/unit/sagemaker/jumpstart/constants.py @@ -14884,7 +14884,7 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy>=2.0.0", + "numpy>=2.0.0,<2.3.0", "oscrypto==1.3.0", "packaging==23.1", "pandas==2.2.3", diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index b73cfcc489..986cfbfe93 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -21,7 +21,7 @@ DEPENDENCY_LIST = [ "requests==2.26.0", - "numpy>=2.0.0", + "numpy>=2.0.0,<2.3.0", "pandas==2.2.3", "matplotlib<3.5.0", "scikit-learn>0.24.1", From 054891ed3f0c8268e22b5109eb8f9e3151aad349 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 12:15:19 -0700 Subject: [PATCH 20/27] resolve conflict dependency with numpy 2.0 --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index b16c0d2f0b..21cea9ac50 100644 --- a/tox.ini +++ b/tox.ini @@ -67,7 +67,7 @@ markers = [testenv] setenv = PYTHONHASHSEED=42 -pip_version = pip==21.3 +pip_version = pip==24.3 passenv = AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY @@ -132,7 +132,7 @@ commands = twine check dist/*.tar.gz [testenv:sphinx] -pip_version = pip==21.3 +pip_version = pip==24.3 changedir = doc # pip install requirements.txt is separate as RTD does it in separate steps # having the requirements.txt installed in deps above results in Double Requirement exception From 843c3fd065e1bfc746fd6547a5db4c3b4faae0bb Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 12:45:12 -0700 Subject: [PATCH 21/27] resolve conflict dependency with numpy 2.0 --- pyproject.toml | 2 +- tox.ini | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d5c6229f37..dd803ba5b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ dependencies = [ "platformdirs", "protobuf>=3.12,<6.0", "psutil", - "PyYAML~=6.0", + "PyYAML>=6.0.1", "requests", "sagemaker-core>=1.0.17,<2.0.0", "schema", diff --git a/tox.ini b/tox.ini index 21cea9ac50..1664da221d 100644 --- a/tox.ini +++ b/tox.ini @@ -82,10 +82,10 @@ passenv = # Can be used to specify which tests to run, e.g.: tox -- -s commands = python -c "import os; os.system('install-custom-pkgs --install-boto-wheels')" - pip install 'apache-airflow==2.9.3' --constraint "https://raw.githubusercontent.com/apache/airflow/constraints-2.9.3/constraints-3.8.txt" - pip install 'torch==2.0.1+cpu' -f 'https://download.pytorch.org/whl/torch_stable.html' - pip install 'torchvision==0.15.2+cpu' -f 'https://download.pytorch.org/whl/torch_stable.html' - pip install 'dill>=0.3.8' + pip install 'apache-airflow==2.10.4' --constraint "https://raw.githubusercontent.com/apache/airflow/constraints-2.10.4/constraints-3.9.txt" + pip install 'torch==2.3.1+cpu' -f 'https://download.pytorch.org/whl/torch_stable.html' + pip install 'torchvision==0.18.1+cpu' -f 'https://download.pytorch.org/whl/torch_stable.html' + pip install 'dill>=0.3.9' pytest {posargs} deps = .[test] From 6598598a0779b2ddbfc313246d38ad685eb15cee Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 15:11:24 -0700 Subject: [PATCH 22/27] resolve conflict dependency with numpy 2.0 --- .pylintrc | 21 +- CHANGELOG.md | 7 - VERSION | 2 +- pyproject.toml | 6 +- requirements/extras/local_requirements.txt | 2 +- requirements/extras/test_requirements.txt | 13 +- requirements/tox/doc8_requirements.txt | 4 +- requirements/tox/flake8_requirements.txt | 4 +- requirements/tox/pylint_requirements.txt | 4 +- requirements/tox/spelling_requirements.txt | 2 +- src/sagemaker/config/config_schema.py | 24 +- .../feature_store/dataset_builder.py | 2 +- .../huggingface-llm-neuronx.json | 11 - .../image_uri_config/huggingface-llm.json | 181 +- src/sagemaker/image_uri_config/pytorch.json | 55 - .../image_uri_config/tensorflow.json | 84 - src/sagemaker/jumpstart/factory/model.py | 2 +- src/sagemaker/local/entities.py | 6 +- .../model_monitor/clarify_model_monitoring.py | 6 +- .../multi_model_server/prepare.py | 3 +- .../serve/utils/conda_in_process.yml | 8 +- .../serve/utils/in_process_requirements.txt | 4 +- .../model_step/pytorch_mnist/requirements.txt | 2 +- tests/data/remote_function/requirements.txt | 2 +- .../serve_resources/mlflow/pytorch/conda.yaml | 6 +- .../mlflow/pytorch/requirements.txt | 4 +- .../serve_resources/mlflow/xgboost/conda.yaml | 2 +- .../mlflow/xgboost/requirements.txt | 4 +- tests/data/workflow/requirements.txt | 2 +- tests/integ/sagemaker/experiments/test_run.py | 4 +- .../jumpstart/private_hub/test_hub_content.py | 2 +- .../serve/test_serve_js_deep_unit_tests.py | 54 +- tests/integ/sagemaker/workflow/helpers.py | 4 +- .../integ/sagemaker/workflow/test_workflow.py | 8 +- tests/integ/test_feature_store.py | 16 +- .../lineage/test_feature_processor_lineage.py | 2116 +++++++++-------- .../sagemaker/huggingface/test_llm_utils.py | 4 +- tests/unit/sagemaker/jumpstart/constants.py | 10 +- .../estimator/test_sagemaker_config.py | 100 +- .../jumpstart/model/test_sagemaker_config.py | 44 +- tests/unit/sagemaker/jumpstart/test_utils.py | 24 +- .../sagemaker/local/test_local_entities.py | 7 +- .../modules/train/test_model_trainer.py | 5 +- .../serve/detector/test_dependency_manager.py | 8 +- .../detector/test_pickle_dependencies.py | 45 +- .../djl_serving/test_djl_prepare.py | 6 +- .../test_multi_model_server_prepare.py | 6 +- .../model_server/tgi/test_tgi_prepare.py | 6 +- .../unit/sagemaker/workflow/test_pipeline.py | 61 +- tests/unit/test_exception_on_bad_status.py | 8 +- tests/unit/test_hyperparameter.py | 2 +- tests/unit/test_predictor_async.py | 4 +- tests/unit/test_tuner.py | 49 +- tox.ini | 18 +- 54 files changed, 1536 insertions(+), 1548 deletions(-) diff --git a/.pylintrc b/.pylintrc index 5428b86be0..223580f4d3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -94,7 +94,24 @@ disable= useless-object-inheritance, # TODO: Enable this check and fix code once Python 2 is no longer supported. super-with-arguments, raise-missing-from, - E1136, + C0116, # Missing function or method docstring + C0209, # Use f-string instead of format + E0015, # Unrecognized option found in config + E0702, # Raising a string instead of an exception + E1101, # Module has no member (likely dynamic attr) + E1136, # Value assigned to something inferred as None + R0022, # Useless option value in config + R1710, # Inconsistent return statements + R1714, # Consider using `in` with comparisons + R1729, # Use a generator + R1732, + R1735, # Consider using a dict or list literal + W0237, # Argument renamed in override + W0613, # Unused argument + W0621, # Redefining name from outer scope + W0719 + W1404, # Implicit string concatenation + W1514, # `open()` used without encoding [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs @@ -436,4 +453,4 @@ analyse-fallback-blocks=no # Exceptions that will emit a warning when being caught. Defaults to # "Exception" -overgeneral-exceptions=Exception +overgeneral-exceptions=builtins.Exception diff --git a/CHANGELOG.md b/CHANGELOG.md index e59d964bd1..7db9aa6c8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,5 @@ # Changelog -## v2.243.2 (2025-04-16) - -### Bug Fixes and Other Changes - - * tgi image uri unit tests - * Fix deepdiff dependencies - ## v2.243.1 (2025-04-11) ### Bug Fixes and Other Changes diff --git a/VERSION b/VERSION index 4e55ec1ee4..f68f7b9691 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.243.3.dev0 +2.243.2.dev0 diff --git a/pyproject.toml b/pyproject.toml index dd803ba5b5..6e0bf54b0d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ name = "sagemaker" dynamic = ["version", "optional-dependencies"] description = "Open source library for training and deploying models on Amazon SageMaker." readme = "README.rst" -requires-python = ">=3.8" +requires-python = ">=3.9" authors = [ { name = "Amazon Web Services" }, ] @@ -25,10 +25,10 @@ classifiers = [ "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", ] dependencies = [ "attrs>=23.1.0,<24", @@ -41,7 +41,7 @@ dependencies = [ "jsonschema", "numpy>=2.0.0,<2.3.0", "omegaconf>=2.2,<=2.3", - "packaging>=20.0", + "packaging>=23.0,<25", "pandas==2.2.3", "pathos", "platformdirs", diff --git a/requirements/extras/local_requirements.txt b/requirements/extras/local_requirements.txt index 68b9a1bcb3..ea57b82e9a 100644 --- a/requirements/extras/local_requirements.txt +++ b/requirements/extras/local_requirements.txt @@ -1,3 +1,3 @@ urllib3>=1.26.8,<3.0.0 docker>=5.0.2,<8.0.0 -PyYAML>=5.4.1,<7 +PyYAML>=6.0.1,<7 diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index 3c0ab9e6b2..e15c53b8af 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -1,7 +1,7 @@ tox==3.24.5 numpy>=2.0.0, <2.3.0 build[virtualenv]==1.2.1 -flake8==4.0.1 +flake8==7.1.2 pytest==6.2.5 pytest-cov==3.0.0 pytest-rerunfailures==10.2 @@ -14,10 +14,10 @@ awslogs==0.14.0 black==24.3.0 stopit==1.1.2 # Update tox.ini to have correct version of airflow constraints file -apache-airflow==2.9.3 +apache-airflow==2.10.4 apache-airflow-providers-amazon==7.2.1 attrs>=23.1.0,<24 -fabric==2.6.0 +fabric==3.2.2 requests==2.32.2 sagemaker-experiments==0.1.35 Jinja2==3.1.6 @@ -26,13 +26,13 @@ pandas==2.2.3 scikit-learn==1.4.0 cloudpickle==2.2.1 jsonpickle<4.0.0 -PyYAML==6.0 +PyYAML>=6.0.1 # TODO find workaround xgboost>=1.6.2,<=1.7.6 pillow>=10.0.1,<=11 opentelemetry-proto==1.27.0 protobuf==4.25.5 -tensorboard>=2.9.0,<=2.15.2 +tensorboard>=2.16.2,<=2.18.0 transformers==4.48.0 sentencepiece==0.1.99 # https://github.com/triton-inference-server/server/issues/6246 @@ -42,7 +42,7 @@ onnx==1.17.0 nbformat>=5.9,<6 accelerate>=0.24.1,<=0.27.0 schema==0.7.5 -tensorflow>=2.9.0,<=2.15.1 +tensorflow>=2.16.2,<=2.18.0 mlflow>=2.16.1 huggingface_hub==0.26.2 uvicorn>=0.30.1 @@ -51,3 +51,4 @@ nest-asyncio sagemaker-mlflow>=0.1.0 deepdiff>=8.0.0 orderly-set<5.4.0 +lexicon diff --git a/requirements/tox/doc8_requirements.txt b/requirements/tox/doc8_requirements.txt index e4a040dd4d..8707c06621 100644 --- a/requirements/tox/doc8_requirements.txt +++ b/requirements/tox/doc8_requirements.txt @@ -1,2 +1,2 @@ -doc8==0.10.1 -Pygments==2.15.0 +doc8==1.1.2 +Pygments==2.18.0 diff --git a/requirements/tox/flake8_requirements.txt b/requirements/tox/flake8_requirements.txt index b3ccfca84f..63a79da444 100644 --- a/requirements/tox/flake8_requirements.txt +++ b/requirements/tox/flake8_requirements.txt @@ -1,2 +1,2 @@ -flake8==4.0.1 -flake8-future-import==0.4.6 +flake8==7.1.2 +flake8-future-import==0.4.7 diff --git a/requirements/tox/pylint_requirements.txt b/requirements/tox/pylint_requirements.txt index b307f21762..0e5db209fe 100644 --- a/requirements/tox/pylint_requirements.txt +++ b/requirements/tox/pylint_requirements.txt @@ -1,2 +1,2 @@ -pylint==2.6.2 -astroid==2.4.2 +pylint==3.0.3 +astroid==3.0.2 diff --git a/requirements/tox/spelling_requirements.txt b/requirements/tox/spelling_requirements.txt index 769415eb2c..94d6bc314e 100644 --- a/requirements/tox/spelling_requirements.txt +++ b/requirements/tox/spelling_requirements.txt @@ -1,2 +1,2 @@ pyenchant==3.2.2 -pylint==2.6.2 +pylint==3.0.3 diff --git a/src/sagemaker/config/config_schema.py b/src/sagemaker/config/config_schema.py index 34a98c0b8e..61da17e7cf 100644 --- a/src/sagemaker/config/config_schema.py +++ b/src/sagemaker/config/config_schema.py @@ -540,7 +540,8 @@ def _simple_path(*args: str): "minItems": 0, "maxItems": 50, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html#sagemaker-CreateTrainingJob-request-Environment + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_CreateTrainingJob.html#sagemaker-CreateTrainingJob-request-Environment "environmentVariables": { TYPE: OBJECT, ADDITIONAL_PROPERTIES: False, @@ -553,13 +554,15 @@ def _simple_path(*args: str): }, "maxProperties": 48, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_S3DataSource.html#sagemaker-Type-S3DataSource-S3Uri + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_S3DataSource.html#sagemaker-Type-S3DataSource-S3Uri "s3Uri": { TYPE: "string", "pattern": "^(https|s3)://([^/]+)/?(.*)$", "maxLength": 1024, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AlgorithmSpecification.html#sagemaker-Type-AlgorithmSpecification-ContainerEntrypoint + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_AlgorithmSpecification.html#sagemaker-Type-AlgorithmSpecification-ContainerEntrypoint "preExecutionCommand": {TYPE: "string", "pattern": r".*"}, # Regex based on https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_PipelineDefinitionS3Location.html # except with an additional ^ and $ for the beginning and the end to closer align to @@ -570,7 +573,8 @@ def _simple_path(*args: str): "minLength": 3, "maxLength": 63, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_MonitoringJobDefinition.html#sagemaker-Type-MonitoringJobDefinition-Environment + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_MonitoringJobDefinition.html#sagemaker-Type-MonitoringJobDefinition-Environment "environment-Length256-Properties50": { TYPE: OBJECT, ADDITIONAL_PROPERTIES: False, @@ -583,7 +587,8 @@ def _simple_path(*args: str): }, "maxProperties": 50, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTransformJob.html#sagemaker-CreateTransformJob-request-Environment + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_CreateTransformJob.html#sagemaker-CreateTransformJob-request-Environment "environment-Length10240-Properties16": { TYPE: OBJECT, ADDITIONAL_PROPERTIES: False, @@ -596,7 +601,8 @@ def _simple_path(*args: str): }, "maxProperties": 16, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ContainerDefinition.html#sagemaker-Type-ContainerDefinition-Environment + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_ContainerDefinition.html#sagemaker-Type-ContainerDefinition-Environment "environment-Length1024-Properties16": { TYPE: OBJECT, ADDITIONAL_PROPERTIES: False, @@ -609,7 +615,8 @@ def _simple_path(*args: str): }, "maxProperties": 16, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateProcessingJob.html#sagemaker-CreateProcessingJob-request-Environment + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_CreateProcessingJob.html#sagemaker-CreateProcessingJob-request-Environment "environment-Length256-Properties100": { TYPE: OBJECT, ADDITIONAL_PROPERTIES: False, @@ -622,7 +629,8 @@ def _simple_path(*args: str): }, "maxProperties": 100, }, - # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html#sagemaker-CreateTrainingJob-request-Environment + # Regex is taken from https://docs.aws.amazon.com/sagemaker/latest/APIReference/ + # API_CreateTrainingJob.html#sagemaker-CreateTrainingJob-request-Environment "environment-Length512-Properties48": { TYPE: OBJECT, ADDITIONAL_PROPERTIES: False, diff --git a/src/sagemaker/feature_store/dataset_builder.py b/src/sagemaker/feature_store/dataset_builder.py index 289fa1ee0c..fc9f9372b1 100644 --- a/src/sagemaker/feature_store/dataset_builder.py +++ b/src/sagemaker/feature_store/dataset_builder.py @@ -929,7 +929,7 @@ def _construct_query_string(self, base: FeatureGroupToBeMerged) -> str: selected_features += ", " selected_features += ", ".join( [ - f'fg_{i}."{feature_name}" as "{feature_name}.{(i+1)}"' + f'fg_{i}."{feature_name}" as "{feature_name}.{(i + 1)}"' for feature_name in feature_group.projected_feature_names ] ) diff --git a/src/sagemaker/image_uri_config/huggingface-llm-neuronx.json b/src/sagemaker/image_uri_config/huggingface-llm-neuronx.json index d79e7637ed..ed5c289377 100644 --- a/src/sagemaker/image_uri_config/huggingface-llm-neuronx.json +++ b/src/sagemaker/image_uri_config/huggingface-llm-neuronx.json @@ -14,7 +14,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -67,7 +66,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -120,7 +118,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -173,7 +170,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -226,7 +222,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -279,7 +274,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -332,7 +326,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -385,7 +378,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -438,7 +430,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -491,7 +482,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -544,7 +534,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", diff --git a/src/sagemaker/image_uri_config/huggingface-llm.json b/src/sagemaker/image_uri_config/huggingface-llm.json index ed85f0d2bf..27df32a073 100644 --- a/src/sagemaker/image_uri_config/huggingface-llm.json +++ b/src/sagemaker/image_uri_config/huggingface-llm.json @@ -14,9 +14,7 @@ "1.4": "1.4.5", "2.0": "2.4.0", "2.3": "2.3.1", - "3.0": "3.0.1", - "3.2": "3.2.3", - "3.1": "3.1.1" + "3.0": "3.0.1" }, "versions": { "0.6.0": { @@ -26,7 +24,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -79,7 +76,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -132,7 +128,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -185,7 +180,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -238,7 +232,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -291,7 +284,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -344,7 +336,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -397,7 +388,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -450,7 +440,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -503,7 +492,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -556,7 +544,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -609,7 +596,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -662,7 +648,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -715,7 +700,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -768,7 +752,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -821,7 +804,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -874,7 +856,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -927,7 +908,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -972,165 +952,6 @@ "container_version": { "gpu": "cu124-ubuntu22.04-v2.1" } - }, - "3.1.1": { - "py_versions": [ - "py311" - ], - "registries": { - "af-south-1": "626614931356", - "ap-east-1": "871362719292", - "ap-east-2": "975050140332", - "ap-northeast-1": "763104351884", - "ap-northeast-2": "763104351884", - "ap-northeast-3": "364406365360", - "ap-south-1": "763104351884", - "ap-south-2": "772153158452", - "ap-southeast-1": "763104351884", - "ap-southeast-2": "763104351884", - "ap-southeast-3": "907027046896", - "ap-southeast-4": "457447274322", - "ap-southeast-5": "550225433462", - "ap-southeast-7": "590183813437", - "ca-central-1": "763104351884", - "ca-west-1": "204538143572", - "cn-north-1": "727897471807", - "cn-northwest-1": "727897471807", - "eu-central-1": "763104351884", - "eu-central-2": "380420809688", - "eu-north-1": "763104351884", - "eu-south-1": "692866216735", - "eu-south-2": "503227376785", - "eu-west-1": "763104351884", - "eu-west-2": "763104351884", - "eu-west-3": "763104351884", - "il-central-1": "780543022126", - "me-central-1": "914824155844", - "me-south-1": "217643126080", - "mx-central-1": "637423239942", - "sa-east-1": "763104351884", - "us-east-1": "763104351884", - "us-east-2": "763104351884", - "us-gov-east-1": "446045086412", - "us-gov-west-1": "442386744353", - "us-iso-east-1": "886529160074", - "us-isob-east-1": "094389454867", - "us-isof-east-1": "303241398832", - "us-isof-south-1": "454834333376", - "us-west-1": "763104351884", - "us-west-2": "763104351884" - }, - "tag_prefix": "2.6.0-tgi3.1.1", - "repository": "huggingface-pytorch-tgi-inference", - "container_version": { - "gpu": "cu124-ubuntu22.04" - } - }, - "3.2.0": { - "py_versions": [ - "py311" - ], - "registries": { - "af-south-1": "626614931356", - "ap-east-1": "871362719292", - "ap-east-2": "975050140332", - "ap-northeast-1": "763104351884", - "ap-northeast-2": "763104351884", - "ap-northeast-3": "364406365360", - "ap-south-1": "763104351884", - "ap-south-2": "772153158452", - "ap-southeast-1": "763104351884", - "ap-southeast-2": "763104351884", - "ap-southeast-3": "907027046896", - "ap-southeast-4": "457447274322", - "ap-southeast-5": "550225433462", - "ap-southeast-7": "590183813437", - "ca-central-1": "763104351884", - "ca-west-1": "204538143572", - "cn-north-1": "727897471807", - "cn-northwest-1": "727897471807", - "eu-central-1": "763104351884", - "eu-central-2": "380420809688", - "eu-north-1": "763104351884", - "eu-south-1": "692866216735", - "eu-south-2": "503227376785", - "eu-west-1": "763104351884", - "eu-west-2": "763104351884", - "eu-west-3": "763104351884", - "il-central-1": "780543022126", - "me-central-1": "914824155844", - "me-south-1": "217643126080", - "mx-central-1": "637423239942", - "sa-east-1": "763104351884", - "us-east-1": "763104351884", - "us-east-2": "763104351884", - "us-gov-east-1": "446045086412", - "us-gov-west-1": "442386744353", - "us-iso-east-1": "886529160074", - "us-isob-east-1": "094389454867", - "us-isof-east-1": "303241398832", - "us-isof-south-1": "454834333376", - "us-west-1": "763104351884", - "us-west-2": "763104351884" - }, - "tag_prefix": "2.6.0-tgi3.2.0", - "repository": "huggingface-pytorch-tgi-inference", - "container_version": { - "gpu": "cu124-ubuntu22.04" - } - }, - "3.2.3": { - "py_versions": [ - "py311" - ], - "registries": { - "af-south-1": "626614931356", - "ap-east-1": "871362719292", - "ap-east-2": "975050140332", - "ap-northeast-1": "763104351884", - "ap-northeast-2": "763104351884", - "ap-northeast-3": "364406365360", - "ap-south-1": "763104351884", - "ap-south-2": "772153158452", - "ap-southeast-1": "763104351884", - "ap-southeast-2": "763104351884", - "ap-southeast-3": "907027046896", - "ap-southeast-4": "457447274322", - "ap-southeast-5": "550225433462", - "ap-southeast-7": "590183813437", - "ca-central-1": "763104351884", - "ca-west-1": "204538143572", - "cn-north-1": "727897471807", - "cn-northwest-1": "727897471807", - "eu-central-1": "763104351884", - "eu-central-2": "380420809688", - "eu-north-1": "763104351884", - "eu-south-1": "692866216735", - "eu-south-2": "503227376785", - "eu-west-1": "763104351884", - "eu-west-2": "763104351884", - "eu-west-3": "763104351884", - "il-central-1": "780543022126", - "me-central-1": "914824155844", - "me-south-1": "217643126080", - "mx-central-1": "637423239942", - "sa-east-1": "763104351884", - "us-east-1": "763104351884", - "us-east-2": "763104351884", - "us-gov-east-1": "446045086412", - "us-gov-west-1": "442386744353", - "us-iso-east-1": "886529160074", - "us-isob-east-1": "094389454867", - "us-isof-east-1": "303241398832", - "us-isof-south-1": "454834333376", - "us-west-1": "763104351884", - "us-west-2": "763104351884" - }, - "tag_prefix": "2.6.0-tgi3.2.3", - "repository": "huggingface-pytorch-tgi-inference", - "container_version": { - "gpu": "cu124-ubuntu22.04" - } } } } diff --git a/src/sagemaker/image_uri_config/pytorch.json b/src/sagemaker/image_uri_config/pytorch.json index dbff976442..01e0d65dc5 100644 --- a/src/sagemaker/image_uri_config/pytorch.json +++ b/src/sagemaker/image_uri_config/pytorch.json @@ -199,7 +199,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -247,7 +246,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -295,7 +293,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -343,7 +340,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -391,7 +387,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -439,7 +434,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -487,7 +481,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -535,7 +528,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -582,7 +574,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -629,7 +620,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -676,7 +666,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -723,7 +712,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -770,7 +758,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -817,7 +804,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -864,7 +850,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -911,7 +896,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -958,7 +942,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1005,7 +988,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1052,7 +1034,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1101,7 +1082,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1150,7 +1130,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1195,7 +1174,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1240,7 +1218,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1285,7 +1262,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1348,7 +1324,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1398,7 +1373,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1446,7 +1420,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1494,7 +1467,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1542,7 +1514,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1590,7 +1561,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1638,7 +1608,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1818,7 +1787,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1866,7 +1834,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1915,7 +1882,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1963,7 +1929,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2011,7 +1976,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2059,7 +2023,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2107,7 +2070,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2155,7 +2117,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2202,7 +2163,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2249,7 +2209,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2296,7 +2255,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2343,7 +2301,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2390,7 +2347,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2437,7 +2393,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2484,7 +2439,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2531,7 +2485,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2578,7 +2531,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2625,7 +2577,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2672,7 +2623,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2721,7 +2671,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2770,7 +2719,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2819,7 +2767,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2864,7 +2811,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2909,7 +2855,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", diff --git a/src/sagemaker/image_uri_config/tensorflow.json b/src/sagemaker/image_uri_config/tensorflow.json index 097baafa9b..37fa7ee46d 100644 --- a/src/sagemaker/image_uri_config/tensorflow.json +++ b/src/sagemaker/image_uri_config/tensorflow.json @@ -631,7 +631,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -675,7 +674,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -719,7 +717,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -763,7 +760,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -807,7 +803,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -851,7 +846,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -895,7 +889,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -939,7 +932,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -983,7 +975,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1027,7 +1018,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1071,7 +1061,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1115,7 +1104,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1159,7 +1147,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1203,7 +1190,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1247,7 +1233,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1291,7 +1276,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1335,7 +1319,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1379,7 +1362,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1423,7 +1405,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1467,7 +1448,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1511,7 +1491,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1555,7 +1534,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1599,7 +1577,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1643,7 +1620,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1687,7 +1663,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1731,7 +1706,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1775,7 +1749,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1819,7 +1792,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1863,7 +1835,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1907,7 +1878,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1951,7 +1921,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -1995,7 +1964,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2039,7 +2007,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2083,7 +2050,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2127,7 +2093,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2171,7 +2136,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2215,7 +2179,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2259,7 +2222,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2305,7 +2267,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2351,7 +2312,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2393,7 +2353,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2455,7 +2414,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2505,7 +2463,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2555,7 +2512,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2605,7 +2561,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -2655,7 +2610,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3111,7 +3065,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3159,7 +3112,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3208,7 +3160,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3257,7 +3208,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3306,7 +3256,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3355,7 +3304,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3403,7 +3351,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3451,7 +3398,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3499,7 +3445,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3547,7 +3492,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3595,7 +3539,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3643,7 +3586,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3691,7 +3633,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3739,7 +3680,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3787,7 +3727,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3834,7 +3773,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3881,7 +3819,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3928,7 +3865,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -3975,7 +3911,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4022,7 +3957,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4069,7 +4003,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4116,7 +4049,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4163,7 +4095,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4210,7 +4141,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4257,7 +4187,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4304,7 +4233,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4351,7 +4279,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4398,7 +4325,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4445,7 +4371,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4492,7 +4417,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4539,7 +4463,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4586,7 +4509,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4633,7 +4555,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4680,7 +4601,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4725,7 +4645,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4774,7 +4693,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4823,7 +4741,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", @@ -4868,7 +4785,6 @@ "registries": { "af-south-1": "626614931356", "ap-east-1": "871362719292", - "ap-east-2": "975050140332", "ap-northeast-1": "763104351884", "ap-northeast-2": "763104351884", "ap-northeast-3": "364406365360", diff --git a/src/sagemaker/jumpstart/factory/model.py b/src/sagemaker/jumpstart/factory/model.py index 4245c5ac91..53ded3f275 100644 --- a/src/sagemaker/jumpstart/factory/model.py +++ b/src/sagemaker/jumpstart/factory/model.py @@ -104,7 +104,7 @@ def get_default_predictor( """ # if there's a non-default predictor, do not mutate -- return as is - if type(predictor) != Predictor: # pylint: disable=C0123 + if not isinstance(predictor, Predictor): raise RuntimeError( "Can only get default predictor from base Predictor class. " f"Using Predictor class '{type(predictor).__name__}'." diff --git a/src/sagemaker/local/entities.py b/src/sagemaker/local/entities.py index a21a375f54..0cf6c6d55a 100644 --- a/src/sagemaker/local/entities.py +++ b/src/sagemaker/local/entities.py @@ -845,10 +845,10 @@ def _initialize_and_validate_parameters(self, overridden_parameters): ) raise ClientError(error_msg, "start_pipeline_execution") parameter_type = default_parameters[param_name].parameter_type - if type(param_value) != parameter_type.python_type: # pylint: disable=C0123 + if not isinstance(param_value, parameter_type.python_type): error_msg = self._construct_validation_exception_message( - "Unexpected type for parameter '{}'. Expected {} but found " - "{}.".format(param_name, parameter_type.python_type, type(param_value)) + f"Unexpected type for parameter '{param_name}'. Expected \ + {parameter_type.python_type} but found {type(param_value)}." ) raise ClientError(error_msg, "start_pipeline_execution") if param_value == "": diff --git a/src/sagemaker/model_monitor/clarify_model_monitoring.py b/src/sagemaker/model_monitor/clarify_model_monitoring.py index 3edfabc747..2d9a4a69e4 100644 --- a/src/sagemaker/model_monitor/clarify_model_monitoring.py +++ b/src/sagemaker/model_monitor/clarify_model_monitoring.py @@ -86,11 +86,9 @@ def __init__( object that configures network isolation, encryption of inter-container traffic, security group IDs, and subnets. """ - if type(self) == __class__: # pylint: disable=unidiomatic-typecheck + if self.__class__ is __class__: raise TypeError( - "{} is abstract, please instantiate its subclasses instead.".format( - __class__.__name__ - ) + f"{__class__.__name__} is abstract, please instantiate its subclasses instead." ) session = sagemaker_session or Session() diff --git a/src/sagemaker/serve/model_server/multi_model_server/prepare.py b/src/sagemaker/serve/model_server/multi_model_server/prepare.py index 48cf5c878a..e3abc70dd6 100644 --- a/src/sagemaker/serve/model_server/multi_model_server/prepare.py +++ b/src/sagemaker/serve/model_server/multi_model_server/prepare.py @@ -84,7 +84,8 @@ def prepare_for_mms( image_uri: str, inference_spec: InferenceSpec = None, ) -> str: - """Prepares for InferenceSpec using model_path, writes inference.py, and captures dependencies to generate secret_key. + """Prepares for InferenceSpec using model_path, writes inference.py, \ + and captures dependencies to generate secret_key. Args:to model_path (str) : Argument diff --git a/src/sagemaker/serve/utils/conda_in_process.yml b/src/sagemaker/serve/utils/conda_in_process.yml index 9a5bad65c1..8233f80902 100644 --- a/src/sagemaker/serve/utils/conda_in_process.yml +++ b/src/sagemaker/serve/utils/conda_in_process.yml @@ -16,11 +16,11 @@ dependencies: - protobuf>=3.12,<5.0 - smdebug_rulesconfig==1.0.1 - importlib-metadata>=1.4.0,<7.0 - - packaging>=20.0 + - packaging>=23.0,<25 - pandas==2.2.3 - pathos - schema - - PyYAML~=6.0 + - PyYAML>=6.0.1 - jsonschema - platformdirs - tblib>=1.7.0,<4 @@ -43,7 +43,7 @@ dependencies: - colorama>=0.4.4 - contextlib2>=21.6.0 - decorator>=5.1.1 - - dill>=0.3.6 + - dill>=0.3.9 - docutils>=0.16 - entrypoints>=0.4 - filelock>=3.11.0 @@ -82,7 +82,7 @@ dependencies: - python-dateutil>=2.8.2 - pytz>=2023.3 - pytz-deprecation-shim>=0.1.0.post0 - - pyyaml>=5.4.1 + - pyyaml>=6.0.1 - regex>=2023.3.23 - requests>=2.28.2 - rich>=13.3.4 diff --git a/src/sagemaker/serve/utils/in_process_requirements.txt b/src/sagemaker/serve/utils/in_process_requirements.txt index a40738202e..c7659adb1f 100644 --- a/src/sagemaker/serve/utils/in_process_requirements.txt +++ b/src/sagemaker/serve/utils/in_process_requirements.txt @@ -11,7 +11,7 @@ cloudpickle==2.2.1 colorama>=0.4.4 contextlib2>=21.6.0 decorator>=5.1.1 -dill>=0.3.6 +dill>=0.3.9 docutils>=0.16 entrypoints>=0.4 filelock>=3.11.0 @@ -50,7 +50,7 @@ pyrsistent>=0.19.3 python-dateutil>=2.8.2 pytz>=2023.3 pytz-deprecation-shim>=0.1.0.post0 -pyyaml>=5.4.1 +pyyaml>=6.0.1 regex>=2023.3.23 requests>=2.28.2 rich>=13.3.4 diff --git a/tests/data/pipeline/model_step/pytorch_mnist/requirements.txt b/tests/data/pipeline/model_step/pytorch_mnist/requirements.txt index 56d09228be..c25fca7e9f 100644 --- a/tests/data/pipeline/model_step/pytorch_mnist/requirements.txt +++ b/tests/data/pipeline/model_step/pytorch_mnist/requirements.txt @@ -1 +1 @@ -scipy>=1.8.1 +scipy>=1.11.3 diff --git a/tests/data/remote_function/requirements.txt b/tests/data/remote_function/requirements.txt index 0e99587e6e..44ce1d9331 100644 --- a/tests/data/remote_function/requirements.txt +++ b/tests/data/remote_function/requirements.txt @@ -1 +1 @@ -scipy==1.10.1 +scipy==1.11.3 diff --git a/tests/data/serve_resources/mlflow/pytorch/conda.yaml b/tests/data/serve_resources/mlflow/pytorch/conda.yaml index 137e3f694b..e218c7577c 100644 --- a/tests/data/serve_resources/mlflow/pytorch/conda.yaml +++ b/tests/data/serve_resources/mlflow/pytorch/conda.yaml @@ -9,12 +9,12 @@ dependencies: - cffi==1.16.0 - cloudpickle==2.2.1 - defusedxml==0.7.1 - - dill==0.3.8 + - dill==0.3.9 - gmpy2==2.1.2 - - numpy==2.0 + - numpy>=2.0.0,<2.3.0 - opt-einsum==3.3.0 - packaging==24.0 - - pandas==2.2.3 + - pandas==2.2.1 - pyyaml==6.0.1 - requests==2.31.0 - torch>=2.6.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index 969cef50f3..926353225e 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -3,11 +3,11 @@ astunparse==1.6.3 cffi==1.16.0 cloudpickle==2.2.1 defusedxml==0.7.1 -dill==0.3.8 +dill==0.3.9 gmpy2==2.1.2 numpy>=2.0.0,<2.3.0 opt-einsum==3.3.0 -packaging==21.3 +packaging>=23.0,<25 pandas==2.2.1 pyyaml==6.0.1 requests==2.32.2 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 7220d5cc39..c18c7341fc 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -7,7 +7,7 @@ dependencies: - mlflow==2.16.1 - lz4==4.3.2 - numpy>=2.0.0,<2.3.0 - - pandas==2.2.3 + - pandas==2.2.1 - psutil==5.9.8 - scikit-learn==1.3.2 - scipy==1.11.3 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index f2ce3e3350..7050dc4e4c 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,8 +1,8 @@ mlflow==2.16.1 lz4==4.3.2 numpy>=2.0.0,<2.3.0 -pandas==2.2.3 +pandas==2.0.3 psutil==5.9.8 scikit-learn==1.3.2 -scipy==1.10.1 +scipy==1.11.3 xgboost==1.7.1 diff --git a/tests/data/workflow/requirements.txt b/tests/data/workflow/requirements.txt index 0e99587e6e..44ce1d9331 100644 --- a/tests/data/workflow/requirements.txt +++ b/tests/data/workflow/requirements.txt @@ -1 +1 @@ -scipy==1.10.1 +scipy==1.11.3 diff --git a/tests/integ/sagemaker/experiments/test_run.py b/tests/integ/sagemaker/experiments/test_run.py index 4f59d11c54..f00f53a5ad 100644 --- a/tests/integ/sagemaker/experiments/test_run.py +++ b/tests/integ/sagemaker/experiments/test_run.py @@ -720,8 +720,8 @@ def _generate_processor( ) return FrameworkProcessor( estimator_cls=PyTorch, - framework_version="1.10", - py_version="py38", + framework_version="1.13.1", + py_version="py39", instance_count=1, instance_type="ml.m5.xlarge", role=execution_role, diff --git a/tests/integ/sagemaker/jumpstart/private_hub/test_hub_content.py b/tests/integ/sagemaker/jumpstart/private_hub/test_hub_content.py index b25cff2d62..04b945a457 100644 --- a/tests/integ/sagemaker/jumpstart/private_hub/test_hub_content.py +++ b/tests/integ/sagemaker/jumpstart/private_hub/test_hub_content.py @@ -38,7 +38,7 @@ def test_hub_model_reference(setup): describe_model_response = hub_instance.describe_model(model_name=model_id) assert describe_model_response is not None - assert type(describe_model_response) == DescribeHubContentResponse + assert isinstance(describe_model_response, DescribeHubContentResponse) assert describe_model_response.hub_content_name == model_id assert describe_model_response.hub_content_type == "ModelReference" diff --git a/tests/integ/sagemaker/serve/test_serve_js_deep_unit_tests.py b/tests/integ/sagemaker/serve/test_serve_js_deep_unit_tests.py index e13e672bec..ea65f998c8 100644 --- a/tests/integ/sagemaker/serve/test_serve_js_deep_unit_tests.py +++ b/tests/integ/sagemaker/serve/test_serve_js_deep_unit_tests.py @@ -24,11 +24,12 @@ def test_js_model_with_optimize_speculative_decoding_config_gated_requests_are_expected( sagemaker_session, ): - with patch.object( - Session, "create_model", return_value="mock_model" - ) as mock_create_model, patch.object( - Session, "endpoint_from_production_variants" - ) as mock_endpoint_from_production_variants: + with ( + patch.object(Session, "create_model", return_value="mock_model") as mock_create_model, + patch.object( + Session, "endpoint_from_production_variants" + ) as mock_endpoint_from_production_variants, + ): iam_client = sagemaker_session.boto_session.client("iam") role_arn = iam_client.get_role(RoleName=ROLE_NAME)["Role"]["Arn"] @@ -100,17 +101,18 @@ def test_js_model_with_optimize_speculative_decoding_config_gated_requests_are_e def test_js_model_with_optimize_sharding_and_resource_requirements_requests_are_expected( sagemaker_session, ): - with patch.object( - Session, - "wait_for_optimization_job", - return_value={"OptimizationJobName": "mock_optimization_job"}, - ), patch.object( - Session, "create_model", return_value="mock_model" - ) as mock_create_model, patch.object( - Session, "endpoint_from_production_variants", return_value="mock_endpoint_name" - ) as mock_endpoint_from_production_variants, patch.object( - Session, "create_inference_component" - ) as mock_create_inference_component: + with ( + patch.object( + Session, + "wait_for_optimization_job", + return_value={"OptimizationJobName": "mock_optimization_job"}, + ), + patch.object(Session, "create_model", return_value="mock_model") as mock_create_model, + patch.object( + Session, "endpoint_from_production_variants", return_value="mock_endpoint_name" + ) as mock_endpoint_from_production_variants, + patch.object(Session, "create_inference_component") as mock_create_inference_component, + ): iam_client = sagemaker_session.boto_session.client("iam") role_arn = iam_client.get_role(RoleName=ROLE_NAME)["Role"]["Arn"] @@ -185,15 +187,17 @@ def test_js_model_with_optimize_sharding_and_resource_requirements_requests_are_ def test_js_model_with_optimize_quantization_on_pre_optimized_model_requests_are_expected( sagemaker_session, ): - with patch.object( - Session, - "wait_for_optimization_job", - return_value={"OptimizationJobName": "mock_optimization_job"}, - ), patch.object( - Session, "create_model", return_value="mock_model" - ) as mock_create_model, patch.object( - Session, "endpoint_from_production_variants", return_value="mock_endpoint_name" - ) as mock_endpoint_from_production_variants: + with ( + patch.object( + Session, + "wait_for_optimization_job", + return_value={"OptimizationJobName": "mock_optimization_job"}, + ), + patch.object(Session, "create_model", return_value="mock_model") as mock_create_model, + patch.object( + Session, "endpoint_from_production_variants", return_value="mock_endpoint_name" + ) as mock_endpoint_from_production_variants, + ): iam_client = sagemaker_session.boto_session.client("iam") role_arn = iam_client.get_role(RoleName=ROLE_NAME)["Role"]["Arn"] diff --git a/tests/integ/sagemaker/workflow/helpers.py b/tests/integ/sagemaker/workflow/helpers.py index 20365ef169..9f0176c5c2 100644 --- a/tests/integ/sagemaker/workflow/helpers.py +++ b/tests/integ/sagemaker/workflow/helpers.py @@ -70,8 +70,8 @@ def create_and_execute_pipeline( assert execution_steps[0]["StepStatus"] == step_status if step_result_type: result = execution.result(execution_steps[0]["StepName"]) - assert ( - type(result) == step_result_type + assert isinstance( + result, step_result_type ), f"Expected {step_result_type}, instead found {type(result)}" if step_result_value: diff --git a/tests/integ/sagemaker/workflow/test_workflow.py b/tests/integ/sagemaker/workflow/test_workflow.py index 2643a3b88e..9ef0b14a04 100644 --- a/tests/integ/sagemaker/workflow/test_workflow.py +++ b/tests/integ/sagemaker/workflow/test_workflow.py @@ -1122,8 +1122,8 @@ def test_model_registration_with_tuning_model( entry_point=entry_point, source_dir=base_dir, role=role, - framework_version="1.10", - py_version="py38", + framework_version="1.13.1", + py_version="py39", instance_count=instance_count, instance_type=instance_type, sagemaker_session=pipeline_session, @@ -1159,8 +1159,8 @@ def test_model_registration_with_tuning_model( ), entry_point=entry_point, source_dir=base_dir, - framework_version="1.10", - py_version="py38", + framework_version="1.13.1", + py_version="py39", sagemaker_session=pipeline_session, ) step_model_regis_args = model.register( diff --git a/tests/integ/test_feature_store.py b/tests/integ/test_feature_store.py index 43db78527a..75f1807148 100644 --- a/tests/integ/test_feature_store.py +++ b/tests/integ/test_feature_store.py @@ -1645,9 +1645,11 @@ def test_create_dataset_with_feature_group_base( feature_store_session, feature_group, offline_store_s3_uri ) - with timeout(minutes=10) and cleanup_offline_store( - base, feature_store_session - ) and cleanup_offline_store(feature_group, feature_store_session): + with ( + timeout(minutes=10) + and cleanup_offline_store(base, feature_store_session) + and cleanup_offline_store(feature_group, feature_store_session) + ): feature_store = FeatureStore(sagemaker_session=feature_store_session) df, query_string = ( feature_store.create_dataset(base=base, output_path=offline_store_s3_uri) @@ -1832,9 +1834,11 @@ def test_create_dataset_with_feature_group_base_with_additional_params( feature_store_session, feature_group, offline_store_s3_uri ) - with timeout(minutes=10) and cleanup_offline_store( - base, feature_store_session - ) and cleanup_offline_store(feature_group, feature_store_session): + with ( + timeout(minutes=10) + and cleanup_offline_store(base, feature_store_session) + and cleanup_offline_store(feature_group, feature_store_session) + ): feature_store = FeatureStore(sagemaker_session=feature_store_session) df, query_string = ( feature_store.create_dataset(base=base, output_path=offline_store_s3_uri) diff --git a/tests/unit/sagemaker/feature_store/feature_processor/lineage/test_feature_processor_lineage.py b/tests/unit/sagemaker/feature_store/feature_processor/lineage/test_feature_processor_lineage.py index 118800dd0f..f149823b2f 100644 --- a/tests/unit/sagemaker/feature_store/feature_processor/lineage/test_feature_processor_lineage.py +++ b/tests/unit/sagemaker/feature_store/feature_processor/lineage/test_feature_processor_lineage.py @@ -113,69 +113,85 @@ def test_create_lineage_when_no_lineage_exists_with_fg_only(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - side_effect=RESOURCE_NOT_FOUND_EXCEPTION, - ) as load_pipeline_context_method, patch.object( - PipelineLineageEntityHandler, - "create_pipeline_context", - return_value=PIPELINE_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - [], - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + side_effect=RESOURCE_NOT_FOUND_EXCEPTION, + ) as load_pipeline_context_method, + patch.object( + PipelineLineageEntityHandler, + "create_pipeline_context", + return_value=PIPELINE_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + [], + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + ): lineage_handler.create_lineage() retrieve_feature_group_context_arns_method.assert_has_calls( @@ -259,75 +275,92 @@ def test_create_lineage_when_no_lineage_exists_with_raw_data_only(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - side_effect=RESOURCE_NOT_FOUND_EXCEPTION, - ) as load_pipeline_context_method, patch.object( - PipelineLineageEntityHandler, - "create_pipeline_context", - return_value=PIPELINE_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - [], - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + side_effect=RESOURCE_NOT_FOUND_EXCEPTION, + ) as load_pipeline_context_method, + patch.object( + PipelineLineageEntityHandler, + "create_pipeline_context", + return_value=PIPELINE_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + [], + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_called_once_with( @@ -408,75 +441,92 @@ def test_create_lineage_when_no_lineage_exists_with_fg_and_raw_data_with_tags(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - side_effect=RESOURCE_NOT_FOUND_EXCEPTION, - ) as load_pipeline_context_method, patch.object( - PipelineLineageEntityHandler, - "create_pipeline_context", - return_value=PIPELINE_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - [], - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + side_effect=RESOURCE_NOT_FOUND_EXCEPTION, + ) as load_pipeline_context_method, + patch.object( + PipelineLineageEntityHandler, + "create_pipeline_context", + return_value=PIPELINE_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + [], + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -569,75 +619,92 @@ def test_create_lineage_when_no_lineage_exists_with_no_transformation_code(): output=FEATURE_GROUP_DATA_SOURCE[0].name, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=None, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - side_effect=RESOURCE_NOT_FOUND_EXCEPTION, - ) as load_pipeline_context_method, patch.object( - PipelineLineageEntityHandler, - "create_pipeline_context", - return_value=PIPELINE_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - [], - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=None, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + side_effect=RESOURCE_NOT_FOUND_EXCEPTION, + ) as load_pipeline_context_method, + patch.object( + PipelineLineageEntityHandler, + "create_pipeline_context", + return_value=PIPELINE_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + [], + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -728,78 +795,96 @@ def test_create_lineage_when_already_exist_with_no_version_change(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=PIPELINE_CONTEXT, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as create_pipeline_version_context_method, patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=PIPELINE_CONTEXT, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as create_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -925,73 +1010,91 @@ def test_create_lineage_when_already_exist_with_changed_raw_data(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[RAW_DATA_INPUT_ARTIFACTS[0], RAW_DATA_INPUT_ARTIFACTS[1]], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[RAW_DATA_INPUT_ARTIFACTS[0], RAW_DATA_INPUT_ARTIFACTS[1]], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -1140,74 +1243,92 @@ def test_create_lineage_when_already_exist_with_changed_input_fg(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[FEATURE_GROUP_INPUT[0], FEATURE_GROUP_INPUT[0]], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[FEATURE_GROUP_INPUT[0], FEATURE_GROUP_INPUT[0]], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -1354,78 +1475,96 @@ def test_create_lineage_when_already_exist_with_changed_output_fg(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[1], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_1, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[1], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_1, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -1576,78 +1715,96 @@ def test_create_lineage_when_already_exist_with_changed_transformation_code(): transformation_code=TRANSFORMATION_CODE_INPUT_2, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_2, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_2, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -1778,78 +1935,96 @@ def test_create_lineage_when_already_exist_with_last_transformation_code_as_none transformation_code=TRANSFORMATION_CODE_INPUT_2, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_2, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_2, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -1968,77 +2143,95 @@ def test_create_lineage_when_already_exist_with_all_previous_transformation_code transformation_code=TRANSFORMATION_CODE_INPUT_2, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=TRANSFORMATION_CODE_ARTIFACT_2, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - iter([]), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=TRANSFORMATION_CODE_ARTIFACT_2, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + iter([]), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -2154,78 +2347,96 @@ def test_create_lineage_when_already_exist_with_removed_transformation_code(): output=FEATURE_GROUP_DATA_SOURCE[0].name, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - FeatureGroupLineageEntityHandler, - "retrieve_feature_group_context_arns", - side_effect=[ - FEATURE_GROUP_INPUT[0], - FEATURE_GROUP_INPUT[1], - FEATURE_GROUP_INPUT[0], - ], - ) as retrieve_feature_group_context_arns_method, patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - S3LineageEntityHandler, - "create_transformation_code_artifact", - return_value=None, - ) as create_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - generate_pipeline_version_upstream_transformation_code(), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, - "load_artifact_from_arn", - return_value=transformation_code_1, - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, - "update_transformation_code_artifact", - ) as update_transformation_code_artifact_method, patch.object( - PipelineLineageEntityHandler, - "update_pipeline_context", - ) as update_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "create_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ), patch.object( - LineageAssociationHandler, "add_upstream_feature_group_data_associations" - ) as add_upstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_downstream_feature_group_data_associations" - ) as add_downstream_feature_group_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_raw_data_associations" - ) as add_upstream_raw_data_associations_method, patch.object( - LineageAssociationHandler, "add_upstream_transformation_code_associations" - ) as add_upstream_transformation_code_associations_method, patch.object( - LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" - ) as add_pipeline_and_pipeline_version_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + FeatureGroupLineageEntityHandler, + "retrieve_feature_group_context_arns", + side_effect=[ + FEATURE_GROUP_INPUT[0], + FEATURE_GROUP_INPUT[1], + FEATURE_GROUP_INPUT[0], + ], + ) as retrieve_feature_group_context_arns_method, + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + S3LineageEntityHandler, + "create_transformation_code_artifact", + return_value=None, + ) as create_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + generate_pipeline_version_upstream_transformation_code(), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, + "load_artifact_from_arn", + return_value=transformation_code_1, + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, + "update_transformation_code_artifact", + ) as update_transformation_code_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "update_pipeline_context", + ) as update_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "create_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ), + patch.object( + LineageAssociationHandler, "add_upstream_feature_group_data_associations" + ) as add_upstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_downstream_feature_group_data_associations" + ) as add_downstream_feature_group_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_raw_data_associations" + ) as add_upstream_raw_data_associations_method, + patch.object( + LineageAssociationHandler, "add_upstream_transformation_code_associations" + ) as add_upstream_transformation_code_associations_method, + patch.object( + LineageAssociationHandler, "add_pipeline_and_pipeline_version_association" + ) as add_pipeline_and_pipeline_version_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_lineage(TAGS) retrieve_feature_group_context_arns_method.assert_has_calls( @@ -2370,15 +2581,18 @@ def test_get_pipeline_lineage_names_when_lineage_exists(): transformation_code=TRANSFORMATION_CODE_INPUT_1, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=PIPELINE_CONTEXT, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method: + with ( + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=PIPELINE_CONTEXT, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + ): return_value = lineage_handler.get_pipeline_lineage_names() assert return_value == dict( @@ -2416,28 +2630,34 @@ def test_create_schedule_lineage(): pipeline=PIPELINE, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=PIPELINE_CONTEXT, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - S3LineageEntityHandler, - "retrieve_pipeline_schedule_artifact", - return_value=SCHEDULE_ARTIFACT_RESULT, - ) as retrieve_pipeline_schedule_artifact_method, patch.object( - LineageAssociationHandler, - "add_upstream_schedule_associations", - ) as add_upstream_schedule_associations_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=PIPELINE_CONTEXT, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + S3LineageEntityHandler, + "retrieve_pipeline_schedule_artifact", + return_value=SCHEDULE_ARTIFACT_RESULT, + ) as retrieve_pipeline_schedule_artifact_method, + patch.object( + LineageAssociationHandler, + "add_upstream_schedule_associations", + ) as add_upstream_schedule_associations_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_schedule_lineage( pipeline_name=PIPELINE_NAME, schedule_arn=SCHEDULE_ARN, @@ -2487,28 +2707,34 @@ def test_create_trigger_lineage(): pipeline=PIPELINE, sagemaker_session=SAGEMAKER_SESSION_MOCK, ) - with patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=PIPELINE_CONTEXT, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - S3LineageEntityHandler, - "retrieve_pipeline_trigger_artifact", - return_value=PIPELINE_TRIGGER_ARTIFACT, - ) as retrieve_pipeline_trigger_artifact_method, patch.object( - LineageAssociationHandler, - "_add_association", - ) as add_association_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags: + with ( + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=PIPELINE_CONTEXT, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + S3LineageEntityHandler, + "retrieve_pipeline_trigger_artifact", + return_value=PIPELINE_TRIGGER_ARTIFACT, + ) as retrieve_pipeline_trigger_artifact_method, + patch.object( + LineageAssociationHandler, + "_add_association", + ) as add_association_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + ): lineage_handler.create_trigger_lineage( pipeline_name=PIPELINE_NAME, trigger_arn=TRIGGER_ARN, @@ -2564,56 +2790,68 @@ def test_upsert_tags_for_lineage_resources(): ) lineage_handler.sagemaker_session.boto_session = Mock() lineage_handler.sagemaker_session.sagemaker_client = Mock() - with patch.object( - S3LineageEntityHandler, - "retrieve_raw_data_artifact", - side_effect=[ - RAW_DATA_INPUT_ARTIFACTS[0], - RAW_DATA_INPUT_ARTIFACTS[1], - RAW_DATA_INPUT_ARTIFACTS[2], - RAW_DATA_INPUT_ARTIFACTS[3], - ], - ) as retrieve_raw_data_artifact_method, patch.object( - PipelineLineageEntityHandler, - "load_pipeline_context", - return_value=pipeline_context, - ) as load_pipeline_context_method, patch.object( - PipelineVersionLineageEntityHandler, - "load_pipeline_version_context", - return_value=PIPELINE_VERSION_CONTEXT, - ) as load_pipeline_version_context_method, patch.object( - LineageAssociationHandler, - "list_upstream_associations", - side_effect=[ - generate_pipeline_version_upstream_feature_group_list(), - generate_pipeline_version_upstream_raw_data_list(), - iter([]), - ], - ) as list_upstream_associations_method, patch.object( - LineageAssociationHandler, - "list_downstream_associations", - return_value=generate_pipeline_version_downstream_feature_group(), - ) as list_downstream_associations_method, patch.object( - S3LineageEntityHandler, "load_artifact_from_arn", return_value=ARTIFACT_RESULT - ) as load_artifact_from_arn_method, patch.object( - S3LineageEntityHandler, "_load_artifact_from_s3_uri", return_value=ARTIFACT_SUMMARY - ) as load_artifact_from_s3_uri_method, patch.object( - Artifact, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as artifact_set_tags, patch.object( - Context, - "set_tags", - return_value={ - "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] - }, - ) as context_set_tags, patch.object( - EventBridgeSchedulerHelper, "describe_schedule", return_value=dict(Arn="schedule_arn") - ) as get_event_bridge_schedule, patch.object( - EventBridgeRuleHelper, "describe_rule", return_value=dict(Arn="rule_arn") - ) as get_event_bridge_rule: + with ( + patch.object( + S3LineageEntityHandler, + "retrieve_raw_data_artifact", + side_effect=[ + RAW_DATA_INPUT_ARTIFACTS[0], + RAW_DATA_INPUT_ARTIFACTS[1], + RAW_DATA_INPUT_ARTIFACTS[2], + RAW_DATA_INPUT_ARTIFACTS[3], + ], + ) as retrieve_raw_data_artifact_method, + patch.object( + PipelineLineageEntityHandler, + "load_pipeline_context", + return_value=pipeline_context, + ) as load_pipeline_context_method, + patch.object( + PipelineVersionLineageEntityHandler, + "load_pipeline_version_context", + return_value=PIPELINE_VERSION_CONTEXT, + ) as load_pipeline_version_context_method, + patch.object( + LineageAssociationHandler, + "list_upstream_associations", + side_effect=[ + generate_pipeline_version_upstream_feature_group_list(), + generate_pipeline_version_upstream_raw_data_list(), + iter([]), + ], + ) as list_upstream_associations_method, + patch.object( + LineageAssociationHandler, + "list_downstream_associations", + return_value=generate_pipeline_version_downstream_feature_group(), + ) as list_downstream_associations_method, + patch.object( + S3LineageEntityHandler, "load_artifact_from_arn", return_value=ARTIFACT_RESULT + ) as load_artifact_from_arn_method, + patch.object( + S3LineageEntityHandler, "_load_artifact_from_s3_uri", return_value=ARTIFACT_SUMMARY + ) as load_artifact_from_s3_uri_method, + patch.object( + Artifact, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as artifact_set_tags, + patch.object( + Context, + "set_tags", + return_value={ + "Tags": [dict(Key="key_1", Value="value_1"), dict(Key="key_2", Value="value_2")] + }, + ) as context_set_tags, + patch.object( + EventBridgeSchedulerHelper, "describe_schedule", return_value=dict(Arn="schedule_arn") + ) as get_event_bridge_schedule, + patch.object( + EventBridgeRuleHelper, "describe_rule", return_value=dict(Arn="rule_arn") + ) as get_event_bridge_rule, + ): lineage_handler.upsert_tags_for_lineage_resources(TAGS) retrieve_raw_data_artifact_method.assert_has_calls( diff --git a/tests/unit/sagemaker/huggingface/test_llm_utils.py b/tests/unit/sagemaker/huggingface/test_llm_utils.py index 675a6fd885..9bb1b451a1 100644 --- a/tests/unit/sagemaker/huggingface/test_llm_utils.py +++ b/tests/unit/sagemaker/huggingface/test_llm_utils.py @@ -65,7 +65,7 @@ def test_huggingface_model_metadata_unauthorized_exception(self, mock_urllib): "Trying to access a gated/private HuggingFace model without valid credentials. " "Please provide a HUGGING_FACE_HUB_TOKEN in env_vars" ) - self.assertEquals(expected_error_msg, str(context.exception)) + self.assertEqual(expected_error_msg, str(context.exception)) @patch("sagemaker.huggingface.llm_utils.urllib") def test_huggingface_model_metadata_general_exception(self, mock_urllib): @@ -76,7 +76,7 @@ def test_huggingface_model_metadata_general_exception(self, mock_urllib): expected_error_msg = ( f"Did not find model metadata for the following HuggingFace Model ID {MOCK_HF_ID}" ) - self.assertEquals(expected_error_msg, str(context.exception)) + self.assertEqual(expected_error_msg, str(context.exception)) @patch("huggingface_hub.snapshot_download") def test_download_huggingface_model_metadata(self, mock_snapshot_download): diff --git a/tests/unit/sagemaker/jumpstart/constants.py b/tests/unit/sagemaker/jumpstart/constants.py index 25cee7288e..ae02c597da 100644 --- a/tests/unit/sagemaker/jumpstart/constants.py +++ b/tests/unit/sagemaker/jumpstart/constants.py @@ -12095,7 +12095,7 @@ "inference_vulnerabilities": [], "training_vulnerable": False, "training_dependencies": [ - "numpy==2.0", + "numpy==1.23.1", "opencv_python==4.7.0.68", "sagemaker_jumpstart_prepack_script_utilities==1.0.0", ], @@ -14360,10 +14360,10 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==2.0", + "numpy==1.26.4", "oscrypto==1.3.0", "packaging==23.1", - "pandas==2.2.3", + "pandas==2.0.2", "pathos==0.3.0", "pkgutil-resolve-name==1.3.10", "platformdirs==3.8.0", @@ -14884,10 +14884,10 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy>=2.0.0,<2.3.0", + "numpy==1.24.3", "oscrypto==1.3.0", "packaging==23.1", - "pandas==2.2.3", + "pandas==2.0.2", "pathos==0.3.0", "pkgutil-resolve-name==1.3.10", "platformdirs==3.8.0", diff --git a/tests/unit/sagemaker/jumpstart/estimator/test_sagemaker_config.py b/tests/unit/sagemaker/jumpstart/estimator/test_sagemaker_config.py index 073921d5ba..39eca166ee 100644 --- a/tests/unit/sagemaker/jumpstart/estimator/test_sagemaker_config.py +++ b/tests/unit/sagemaker/jumpstart/estimator/test_sagemaker_config.py @@ -123,16 +123,16 @@ def test_without_arg_overwrites_without_kwarg_collisions_with_config( mock_retrieve_model_init_kwargs.return_value = {} - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), config_role) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), config_role) assert "enable_network_isolation" not in mock_estimator_init.call_args[1] assert "encrypt_inter_container_traffic" not in mock_estimator_init.call_args[1] estimator.deploy() - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals(mock_estimator_deploy.call_args[1].get("role"), config_inference_role) + self.assertEqual(mock_estimator_deploy.call_args[1].get("role"), config_inference_role) assert "enable_network_isolation" not in mock_estimator_deploy.call_args[1] @@ -181,13 +181,13 @@ def test_without_arg_overwrites_with_kwarg_collisions_with_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), config_role) - self.assertEquals( + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), config_role) + self.assertEqual( mock_estimator_init.call_args[1].get("enable_network_isolation"), config_enable_network_isolation, ) - self.assertEquals( + self.assertEqual( mock_estimator_init.call_args[1].get("encrypt_inter_container_traffic"), config_intercontainer_encryption, ) @@ -200,11 +200,11 @@ def test_without_arg_overwrites_with_kwarg_collisions_with_config( estimator.deploy() - self.assertEquals(mock_get_sagemaker_config_value.call_count, 6) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 6) - self.assertEquals(mock_estimator_deploy.call_args[1].get("role"), config_inference_role) + self.assertEqual(mock_estimator_deploy.call_args[1].get("role"), config_inference_role) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("enable_network_isolation"), config_inference_enable_network_isolation, ) @@ -257,13 +257,13 @@ def test_with_arg_overwrites_with_kwarg_collisions_with_config( encrypt_inter_container_traffic=override_encrypt_inter_container_traffic, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_estimator_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) - self.assertEquals( + self.assertEqual( mock_estimator_init.call_args[1].get("encrypt_inter_container_traffic"), override_encrypt_inter_container_traffic, ) @@ -280,13 +280,13 @@ def test_with_arg_overwrites_with_kwarg_collisions_with_config( enable_network_isolation=override_inference_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("role"), mock_inference_override_role ) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("enable_network_isolation"), override_inference_enable_network_isolation, ) @@ -336,13 +336,13 @@ def test_with_arg_overwrites_without_kwarg_collisions_with_config( encrypt_inter_container_traffic=override_encrypt_inter_container_traffic, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_estimator_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) - self.assertEquals( + self.assertEqual( mock_estimator_init.call_args[1].get("encrypt_inter_container_traffic"), override_encrypt_inter_container_traffic, ) @@ -355,13 +355,13 @@ def test_with_arg_overwrites_without_kwarg_collisions_with_config( enable_network_isolation=override_inference_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("role"), mock_inference_override_role ) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("enable_network_isolation"), override_inference_enable_network_isolation, ) @@ -412,8 +412,8 @@ def test_without_arg_overwrites_without_kwarg_collisions_without_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), execution_role) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), execution_role) assert "enable_network_isolation" not in mock_estimator_init.call_args[1] assert "encrypt_inter_container_traffic" not in mock_estimator_init.call_args[1] @@ -421,9 +421,9 @@ def test_without_arg_overwrites_without_kwarg_collisions_without_config( mock_retrieve_model_init_kwargs.return_value = {} - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals(mock_estimator_deploy.call_args[1].get("role"), execution_role) + self.assertEqual(mock_estimator_deploy.call_args[1].get("role"), execution_role) assert "enable_network_isolation" not in mock_estimator_deploy.call_args[1] @@ -475,13 +475,13 @@ def test_without_arg_overwrites_with_kwarg_collisions_without_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), execution_role) - self.assertEquals( + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), execution_role) + self.assertEqual( mock_estimator_init.call_args[1].get("enable_network_isolation"), metadata_enable_network_isolation, ) - self.assertEquals( + self.assertEqual( mock_estimator_init.call_args[1].get("encrypt_inter_container_traffic"), metadata_intercontainer_encryption, ) @@ -492,11 +492,11 @@ def test_without_arg_overwrites_with_kwarg_collisions_without_config( estimator.deploy() - self.assertEquals(mock_get_sagemaker_config_value.call_count, 6) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 6) - self.assertEquals(mock_estimator_deploy.call_args[1].get("role"), execution_role) + self.assertEqual(mock_estimator_deploy.call_args[1].get("role"), execution_role) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("enable_network_isolation"), metadata_inference_enable_network_isolation, ) @@ -548,13 +548,13 @@ def test_with_arg_overwrites_with_kwarg_collisions_without_config( encrypt_inter_container_traffic=override_encrypt_inter_container_traffic, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_estimator_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) - self.assertEquals( + self.assertEqual( mock_estimator_init.call_args[1].get("encrypt_inter_container_traffic"), override_encrypt_inter_container_traffic, ) @@ -568,11 +568,11 @@ def test_with_arg_overwrites_with_kwarg_collisions_without_config( enable_network_isolation=override_inference_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals(mock_estimator_deploy.call_args[1].get("role"), override_inference_role) + self.assertEqual(mock_estimator_deploy.call_args[1].get("role"), override_inference_role) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("enable_network_isolation"), override_inference_enable_network_isolation, ) @@ -618,13 +618,13 @@ def test_with_arg_overwrites_without_kwarg_collisions_without_config( enable_network_isolation=override_enable_network_isolation, encrypt_inter_container_traffic=override_encrypt_inter_container_traffic, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_estimator_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_estimator_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_estimator_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) - self.assertEquals( + self.assertEqual( mock_estimator_init.call_args[1].get("encrypt_inter_container_traffic"), override_encrypt_inter_container_traffic, ) @@ -634,11 +634,11 @@ def test_with_arg_overwrites_without_kwarg_collisions_without_config( enable_network_isolation=override_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 3) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 3) - self.assertEquals(mock_estimator_deploy.call_args[1].get("role"), override_inference_role) + self.assertEqual(mock_estimator_deploy.call_args[1].get("role"), override_inference_role) - self.assertEquals( + self.assertEqual( mock_estimator_deploy.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) diff --git a/tests/unit/sagemaker/jumpstart/model/test_sagemaker_config.py b/tests/unit/sagemaker/jumpstart/model/test_sagemaker_config.py index 2be4bde7e4..a0299ebb1a 100644 --- a/tests/unit/sagemaker/jumpstart/model/test_sagemaker_config.py +++ b/tests/unit/sagemaker/jumpstart/model/test_sagemaker_config.py @@ -99,9 +99,9 @@ def test_without_arg_overwrites_without_kwarg_collisions_with_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_model_init.call_args[1].get("role"), config_role) + self.assertEqual(mock_model_init.call_args[1].get("role"), config_role) assert "enable_network_isolation" not in mock_model_init.call_args[1] @@ -147,10 +147,10 @@ def test_all_arg_overwrites_without_kwarg_collisions_with_config( role=override_role, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_model_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_model_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_model_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) @@ -197,10 +197,10 @@ def test_without_arg_overwrites_all_kwarg_collisions_with_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 2) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 2) - self.assertEquals(mock_model_init.call_args[1].get("role"), config_role) - self.assertEquals( + self.assertEqual(mock_model_init.call_args[1].get("role"), config_role) + self.assertEqual( mock_model_init.call_args[1].get("enable_network_isolation"), config_enable_network_isolation, ) @@ -249,10 +249,10 @@ def test_with_arg_overwrites_all_kwarg_collisions_with_config( enable_network_isolation=override_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_model_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_model_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_model_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) @@ -299,10 +299,10 @@ def test_without_arg_overwrites_all_kwarg_collisions_without_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 2) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 2) - self.assertEquals(mock_model_init.call_args[1].get("role"), execution_role) - self.assertEquals( + self.assertEqual(mock_model_init.call_args[1].get("role"), execution_role) + self.assertEqual( mock_model_init.call_args[1].get("enable_network_isolation"), metadata_enable_network_isolation, ) @@ -350,10 +350,10 @@ def test_with_arg_overwrites_all_kwarg_collisions_without_config( enable_network_isolation=override_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_model_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_model_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_model_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) @@ -398,9 +398,9 @@ def test_without_arg_overwrites_without_kwarg_collisions_without_config( model_id=model_id, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_model_init.call_args[1].get("role"), execution_role) + self.assertEqual(mock_model_init.call_args[1].get("role"), execution_role) assert "enable_network_isolation" not in mock_model_init.call_args[1] @mock.patch( @@ -445,10 +445,10 @@ def test_with_arg_overwrites_without_kwarg_collisions_without_config( enable_network_isolation=override_enable_network_isolation, ) - self.assertEquals(mock_get_sagemaker_config_value.call_count, 1) + self.assertEqual(mock_get_sagemaker_config_value.call_count, 1) - self.assertEquals(mock_model_init.call_args[1].get("role"), override_role) - self.assertEquals( + self.assertEqual(mock_model_init.call_args[1].get("role"), override_role) + self.assertEqual( mock_model_init.call_args[1].get("enable_network_isolation"), override_enable_network_isolation, ) diff --git a/tests/unit/sagemaker/jumpstart/test_utils.py b/tests/unit/sagemaker/jumpstart/test_utils.py index e3e3110da8..de9be1d51d 100644 --- a/tests/unit/sagemaker/jumpstart/test_utils.py +++ b/tests/unit/sagemaker/jumpstart/test_utils.py @@ -1388,7 +1388,7 @@ def test_no_model_id_no_version_found(self): mock_sagemaker_session.list_tags = mock_list_tags mock_list_tags.return_value = [{"Key": "blah", "Value": "blah1"}] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, None, None), ) @@ -1403,7 +1403,7 @@ def test_model_id_no_version_found(self): {"Key": JumpStartTag.MODEL_ID, "Value": "model_id"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), ("model_id", None, None, None), ) @@ -1418,7 +1418,7 @@ def test_no_model_id_version_found(self): {"Key": JumpStartTag.MODEL_VERSION, "Value": "model_version"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, "model_version", None, None), ) @@ -1430,7 +1430,7 @@ def test_no_config_name_found(self): mock_sagemaker_session.list_tags = mock_list_tags mock_list_tags.return_value = [{"Key": "blah", "Value": "blah1"}] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, None, None), ) @@ -1445,7 +1445,7 @@ def test_inference_config_name_found(self): {"Key": JumpStartTag.INFERENCE_CONFIG_NAME, "Value": "config_name"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, "config_name", None), ) @@ -1460,7 +1460,7 @@ def test_training_config_name_found(self): {"Key": JumpStartTag.TRAINING_CONFIG_NAME, "Value": "config_name"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, None, "config_name"), ) @@ -1476,7 +1476,7 @@ def test_both_config_name_found(self): {"Key": JumpStartTag.TRAINING_CONFIG_NAME, "Value": "training_config_name"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, "inference_config_name", "training_config_name"), ) @@ -1492,7 +1492,7 @@ def test_model_id_version_found(self): {"Key": JumpStartTag.MODEL_VERSION, "Value": "model_version"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), ("model_id", "model_version", None, None), ) @@ -1510,7 +1510,7 @@ def test_multiple_model_id_versions_found(self): {"Key": JumpStartTag.MODEL_VERSION, "Value": "model_version_2"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, None, None), ) @@ -1528,7 +1528,7 @@ def test_multiple_model_id_versions_found_aliases_consistent(self): {"Key": random.choice(EXTRA_MODEL_VERSION_TAGS), "Value": "model_version_1"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), ("model_id_1", "model_version_1", None, None), ) @@ -1546,7 +1546,7 @@ def test_multiple_model_id_versions_found_aliases_inconsistent(self): {"Key": random.choice(EXTRA_MODEL_VERSION_TAGS), "Value": "model_version_2"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), (None, None, None, None), ) @@ -1564,7 +1564,7 @@ def test_multiple_config_names_found_aliases_inconsistent(self): {"Key": JumpStartTag.INFERENCE_CONFIG_NAME, "Value": "config_name_2"}, ] - self.assertEquals( + self.assertEqual( utils.get_jumpstart_model_info_from_resource_arn("some-arn", mock_sagemaker_session), ("model_id_1", "model_version_1", None, None), ) diff --git a/tests/unit/sagemaker/local/test_local_entities.py b/tests/unit/sagemaker/local/test_local_entities.py index 6a026c316b..74a361cf73 100644 --- a/tests/unit/sagemaker/local/test_local_entities.py +++ b/tests/unit/sagemaker/local/test_local_entities.py @@ -12,6 +12,7 @@ # language governing permissions and limitations under the License. from __future__ import absolute_import +import re import os import pytest @@ -290,10 +291,10 @@ def test_start_local_pipeline_with_wrong_parameter_type(sagemaker_local_session) local_pipeline = sagemaker.local.entities._LocalPipeline(pipeline) with pytest.raises(ClientError) as error: local_pipeline.start(PipelineParameters={"MyStr": True}) - assert ( - f"Unexpected type for parameter '{parameter.name}'. Expected " - f"{parameter.parameter_type.python_type} but found {type(True)}." in str(error.value) + expected_error_pattern = ( + r"Unexpected type for parameter 'MyStr'\. Expected .* but found \." ) + assert re.search(expected_error_pattern, str(error.value)) def test_start_local_pipeline_with_empty_parameter_string_value( diff --git a/tests/unit/sagemaker/modules/train/test_model_trainer.py b/tests/unit/sagemaker/modules/train/test_model_trainer.py index 770420c354..13530a3983 100644 --- a/tests/unit/sagemaker/modules/train/test_model_trainer.py +++ b/tests/unit/sagemaker/modules/train/test_model_trainer.py @@ -1049,15 +1049,16 @@ def mock_upload_data(path, bucket, key_prefix): model_trainer.train() - assert mock_local_container.train.called_once_with( + mock_local_container.assert_called_once_with( training_job_name=unique_name, instance_type=compute.instance_type, instance_count=compute.instance_count, image=training_image, container_root=local_container_root, sagemaker_session=modules_session, - container_entry_point=DEFAULT_ENTRYPOINT, + container_entrypoint=DEFAULT_ENTRYPOINT, container_arguments=DEFAULT_ARGUMENTS, + input_data_config=ANY, hyper_parameters=hyperparameters, environment=environment, ) diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index 986cfbfe93..52e9822e57 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -21,8 +21,8 @@ DEPENDENCY_LIST = [ "requests==2.26.0", - "numpy>=2.0.0,<2.3.0", - "pandas==2.2.3", + "numpy==1.26.4", + "pandas<=1.3.3", "matplotlib<3.5.0", "scikit-learn>0.24.1", "Django!=4.0.0", @@ -34,8 +34,8 @@ EXPECTED_DEPENDENCY_MAP = { "requests": "==2.26.0", - "numpy": "==2.0.0", - "pandas": "==2.2.3", + "numpy": "==1.26.4", + "pandas": "<=1.3.3", "matplotlib": "<3.5.0", "scikit-learn": ">0.24.1", "Django": "!=4.0.0", diff --git a/tests/unit/sagemaker/serve/detector/test_pickle_dependencies.py b/tests/unit/sagemaker/serve/detector/test_pickle_dependencies.py index 34cab8a526..ced9555fc5 100644 --- a/tests/unit/sagemaker/serve/detector/test_pickle_dependencies.py +++ b/tests/unit/sagemaker/serve/detector/test_pickle_dependencies.py @@ -93,13 +93,14 @@ def create_mock_modules(name, doc, file): # happy case def test_generate_requirements_exact_match(monkeypatch): - with patch("cloudpickle.load"), patch("tqdm.tqdm"), patch( - "sagemaker.serve.detector.pickle_dependencies.subprocess.run" - ) as subprocess_run, patch( - "sagemaker.serve.detector.pickle_dependencies.subprocess.Popen" - ) as subprocess_popen, patch( - "builtins.open" - ) as mocked_open, monkeypatch.context() as m: + with ( + patch("cloudpickle.load"), + patch("tqdm.tqdm"), + patch("sagemaker.serve.detector.pickle_dependencies.subprocess.run") as subprocess_run, + patch("sagemaker.serve.detector.pickle_dependencies.subprocess.Popen") as subprocess_popen, + patch("builtins.open") as mocked_open, + monkeypatch.context() as m, + ): mock_run_stdout = MagicMock() mock_run_stdout.stdout = json.dumps(INSTALLED_PKG_JSON).encode("utf-8") subprocess_run.return_value = mock_run_stdout @@ -147,13 +148,14 @@ def test_generate_requirements_exact_match(monkeypatch): def test_generate_requirements_txt_pruning_unused_packages(monkeypatch): - with patch("cloudpickle.load"), patch("tqdm.tqdm"), patch( - "sagemaker.serve.detector.pickle_dependencies.subprocess.run" - ) as subprocess_run, patch( - "sagemaker.serve.detector.pickle_dependencies.subprocess.Popen" - ) as subprocess_popen, patch( - "builtins.open" - ) as mocked_open, monkeypatch.context() as m: + with ( + patch("cloudpickle.load"), + patch("tqdm.tqdm"), + patch("sagemaker.serve.detector.pickle_dependencies.subprocess.run") as subprocess_run, + patch("sagemaker.serve.detector.pickle_dependencies.subprocess.Popen") as subprocess_popen, + patch("builtins.open") as mocked_open, + monkeypatch.context() as m, + ): mock_run_stdout = MagicMock() mock_run_stdout.stdout = json.dumps(INSTALLED_PKG_JSON_UNUSED).encode("utf-8") subprocess_run.return_value = mock_run_stdout @@ -201,13 +203,14 @@ def test_generate_requirements_txt_pruning_unused_packages(monkeypatch): def test_generate_requirements_txt_no_currently_used_packages(monkeypatch): - with patch("cloudpickle.load"), patch("tqdm.tqdm"), patch( - "sagemaker.serve.detector.pickle_dependencies.subprocess.run" - ) as subprocess_run, patch( - "sagemaker.serve.detector.pickle_dependencies.subprocess.Popen" - ) as subprocess_popen, patch( - "builtins.open" - ) as mocked_open, monkeypatch.context() as m: + with ( + patch("cloudpickle.load"), + patch("tqdm.tqdm"), + patch("sagemaker.serve.detector.pickle_dependencies.subprocess.run") as subprocess_run, + patch("sagemaker.serve.detector.pickle_dependencies.subprocess.Popen") as subprocess_popen, + patch("builtins.open") as mocked_open, + monkeypatch.context() as m, + ): mock_run_stdout = MagicMock() mock_run_stdout.stdout = json.dumps([]).encode("utf-8") subprocess_run.return_value = mock_run_stdout diff --git a/tests/unit/sagemaker/serve/model_server/djl_serving/test_djl_prepare.py b/tests/unit/sagemaker/serve/model_server/djl_serving/test_djl_prepare.py index 183d15d13e..aa99e1971c 100644 --- a/tests/unit/sagemaker/serve/model_server/djl_serving/test_djl_prepare.py +++ b/tests/unit/sagemaker/serve/model_server/djl_serving/test_djl_prepare.py @@ -52,8 +52,8 @@ def test_create_dir_structure_from_new(self, mock_path, mock_disk_usage, mock_di mock_disk_space.assert_called_once_with(mock_model_path) mock_disk_usage.assert_called_once() - self.assertEquals(ret_model_path, mock_model_path) - self.assertEquals(ret_code_dir, mock_code_dir) + self.assertEqual(ret_model_path, mock_model_path) + self.assertEqual(ret_code_dir, mock_code_dir) @patch("sagemaker.serve.model_server.djl_serving.prepare.Path") def test_create_dir_structure_invalid_path(self, mock_path): @@ -65,7 +65,7 @@ def test_create_dir_structure_invalid_path(self, mock_path): with self.assertRaises(ValueError) as context: _create_dir_structure(mock_model_path) - self.assertEquals("model_dir is not a valid directory", str(context.exception)) + self.assertEqual("model_dir is not a valid directory", str(context.exception)) @patch("sagemaker.serve.model_server.djl_serving.prepare.S3Downloader") @patch("builtins.open", new_callable=mock_open, read_data="data") diff --git a/tests/unit/sagemaker/serve/model_server/multi_model_server/test_multi_model_server_prepare.py b/tests/unit/sagemaker/serve/model_server/multi_model_server/test_multi_model_server_prepare.py index e877c1e7e9..567a72182a 100644 --- a/tests/unit/sagemaker/serve/model_server/multi_model_server/test_multi_model_server_prepare.py +++ b/tests/unit/sagemaker/serve/model_server/multi_model_server/test_multi_model_server_prepare.py @@ -91,8 +91,8 @@ def test_create_dir_structure_from_new(self, mock_path, mock_disk_usage, mock_di mock_disk_space.assert_called_once_with(mock_model_path) mock_disk_usage.assert_called_once() - self.assertEquals(ret_model_path, mock_model_path) - self.assertEquals(ret_code_dir, mock_code_dir) + self.assertEqual(ret_model_path, mock_model_path) + self.assertEqual(ret_code_dir, mock_code_dir) @patch("sagemaker.serve.model_server.multi_model_server.prepare.Path") def test_create_dir_structure_invalid_path(self, mock_path): @@ -104,4 +104,4 @@ def test_create_dir_structure_invalid_path(self, mock_path): with self.assertRaises(ValueError) as context: _create_dir_structure(mock_model_path) - self.assertEquals("model_dir is not a valid directory", str(context.exception)) + self.assertEqual("model_dir is not a valid directory", str(context.exception)) diff --git a/tests/unit/sagemaker/serve/model_server/tgi/test_tgi_prepare.py b/tests/unit/sagemaker/serve/model_server/tgi/test_tgi_prepare.py index 88d109831d..ed94f10ce9 100644 --- a/tests/unit/sagemaker/serve/model_server/tgi/test_tgi_prepare.py +++ b/tests/unit/sagemaker/serve/model_server/tgi/test_tgi_prepare.py @@ -50,8 +50,8 @@ def test_create_dir_structure_from_new(self, mock_path, mock_disk_usage, mock_di mock_disk_space.assert_called_once_with(mock_model_path) mock_disk_usage.assert_called_once() - self.assertEquals(ret_model_path, mock_model_path) - self.assertEquals(ret_code_dir, mock_code_dir) + self.assertEqual(ret_model_path, mock_model_path) + self.assertEqual(ret_code_dir, mock_code_dir) @patch("sagemaker.serve.model_server.tgi.prepare.Path") def test_create_dir_structure_invalid_path(self, mock_path): @@ -63,7 +63,7 @@ def test_create_dir_structure_invalid_path(self, mock_path): with self.assertRaises(ValueError) as context: _create_dir_structure(mock_model_path) - self.assertEquals("model_dir is not a valid directory", str(context.exception)) + self.assertEqual("model_dir is not a valid directory", str(context.exception)) @patch("sagemaker.serve.model_server.tgi.prepare.S3Downloader") @patch("builtins.open", read_data="data") diff --git a/tests/unit/sagemaker/workflow/test_pipeline.py b/tests/unit/sagemaker/workflow/test_pipeline.py index 14c2d442eb..523b981736 100644 --- a/tests/unit/sagemaker/workflow/test_pipeline.py +++ b/tests/unit/sagemaker/workflow/test_pipeline.py @@ -99,7 +99,7 @@ def test_pipeline_create_and_update_with_config_injection(sagemaker_session_mock RoleArn=pipeline_role_arn, ) pipeline.upsert() - assert sagemaker_session_mock.sagemaker_client.update_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.update_pipeline.assert_called_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=pipeline_role_arn, @@ -130,7 +130,7 @@ def test_pipeline_create_with_parallelism_config(sagemaker_session_mock, role_ar role_arn=role_arn, parallelism_config=dict(MaxParallelExecutionSteps=10), ) - assert sagemaker_session_mock.sagemaker_client.create_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.create_pipeline.assert_called_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=role_arn, @@ -149,7 +149,7 @@ def test_pipeline_create_and_start_with_parallelism_config(sagemaker_session_moc role_arn=role_arn, parallelism_config=dict(MaxParallelExecutionSteps=10), ) - assert sagemaker_session_mock.sagemaker_client.create_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.create_pipeline.assert_called_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=role_arn, @@ -168,7 +168,7 @@ def test_pipeline_create_and_start_with_parallelism_config(sagemaker_session_moc # Specify ParallelismConfiguration to another value which will be honored in backend pipeline.start(parallelism_config=dict(MaxParallelExecutionSteps=20)) - assert sagemaker_session_mock.sagemaker_client.start_pipeline_execution.called_with( + sagemaker_session_mock.sagemaker_client.start_pipeline_execution.assert_called_with( PipelineName="MyPipeline", ParallelismConfiguration={"MaxParallelExecutionSteps": 20}, ) @@ -209,7 +209,7 @@ def test_pipeline_update(sagemaker_session_mock, role_arn): assert not pipeline.steps pipeline.update(role_arn=role_arn) assert len(json.loads(pipeline.definition())["Steps"]) == 0 - assert sagemaker_session_mock.sagemaker_client.update_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.update_pipeline.assert_called_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=role_arn ) @@ -253,7 +253,7 @@ def test_pipeline_update(sagemaker_session_mock, role_arn): pipeline.update(role_arn=role_arn) assert len(json.loads(pipeline.definition())["Steps"]) == 3 - assert sagemaker_session_mock.sagemaker_client.update_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.update_pipeline.assert_called_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=role_arn ) @@ -345,7 +345,11 @@ def test_pipeline_update_with_parallelism_config(sagemaker_session_mock, role_ar role_arn=role_arn, parallelism_config=dict(MaxParallelExecutionSteps=10), ) - assert sagemaker_session_mock.sagemaker_client.update_pipeline.called_with( + pipeline.update( + role_arn=role_arn, + parallelism_config={"MaxParallelExecutionSteps": 10}, + ) + sagemaker_session_mock.sagemaker_client.update_pipeline.assert_called_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=role_arn, @@ -418,13 +422,11 @@ def _raise_does_already_exists_client_error(**kwargs): sagemaker_session_mock.sagemaker_client.update_pipeline.assert_called_once_with( PipelineName="MyPipeline", PipelineDefinition=pipeline.definition(), RoleArn=role_arn ) - assert sagemaker_session_mock.sagemaker_client.list_tags.called_with( - ResourceArn="mock_pipeline_arn" - ) + sagemaker_session_mock.sagemaker_client.list_tags.assert_called_with(ResourceArn="pipeline-arn") tags.append({"Key": "dummy", "Value": "dummy_tag"}) - assert sagemaker_session_mock.sagemaker_client.add_tags.called_with( - ResourceArn="mock_pipeline_arn", Tags=tags + sagemaker_session_mock.sagemaker_client.add_tags.assert_called_with( + ResourceArn="pipeline-arn", Tags=tags ) @@ -523,7 +525,7 @@ def test_pipeline_delete(sagemaker_session_mock): sagemaker_session=sagemaker_session_mock, ) pipeline.delete() - assert sagemaker_session_mock.sagemaker_client.delete_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.delete_pipeline.assert_called_with( PipelineName="MyPipeline", ) @@ -536,7 +538,7 @@ def test_pipeline_describe(sagemaker_session_mock): sagemaker_session=sagemaker_session_mock, ) pipeline.describe() - assert sagemaker_session_mock.sagemaker_client.describe_pipeline.called_with( + sagemaker_session_mock.sagemaker_client.describe_pipeline.assert_called_with( PipelineName="MyPipeline", ) @@ -552,17 +554,17 @@ def test_pipeline_start(sagemaker_session_mock): sagemaker_session=sagemaker_session_mock, ) pipeline.start() - assert sagemaker_session_mock.start_pipeline_execution.called_with( + sagemaker_session_mock.sagemaker_client.start_pipeline_execution.assert_called_with( PipelineName="MyPipeline", ) pipeline.start(execution_display_name="pipeline-execution") - assert sagemaker_session_mock.start_pipeline_execution.called_with( + sagemaker_session_mock.sagemaker_client.start_pipeline_execution.assert_called_with( PipelineName="MyPipeline", PipelineExecutionDisplayName="pipeline-execution" ) pipeline.start(parameters=dict(alpha="epsilon")) - assert sagemaker_session_mock.start_pipeline_execution.called_with( + sagemaker_session_mock.sagemaker_client.start_pipeline_execution.assert_called_with( PipelineName="MyPipeline", PipelineParameters=[{"Name": "alpha", "Value": "epsilon"}] ) @@ -821,10 +823,8 @@ def test_pipeline_build_parameters_from_execution(sagemaker_session_mock): pipeline_execution_arn=reference_execution_arn, parameter_value_overrides=parameter_value_overrides, ) - assert ( - sagemaker_session_mock.sagemaker_client.list_pipeline_parameters_for_execution.called_with( - PipelineExecutionArn=reference_execution_arn - ) + sagemaker_session_mock.sagemaker_client.list_pipeline_parameters_for_execution.assert_called_with( + PipelineExecutionArn=reference_execution_arn ) assert len(parameters) == 1 assert parameters["TestParameterName"] == "NewParameterValue" @@ -850,10 +850,8 @@ def test_pipeline_build_parameters_from_execution_with_invalid_overrides(sagemak + f"are not present in the pipeline execution: {reference_execution_arn}" in str(error) ) - assert ( - sagemaker_session_mock.sagemaker_client.list_pipeline_parameters_for_execution.called_with( - PipelineExecutionArn=reference_execution_arn - ) + sagemaker_session_mock.sagemaker_client.list_pipeline_parameters_for_execution.assert_called_with( + PipelineExecutionArn=reference_execution_arn ) @@ -908,24 +906,23 @@ def test_pipeline_execution_basics(sagemaker_session_mock): ) execution = pipeline.start() execution.stop() - assert sagemaker_session_mock.sagemaker_client.stop_pipeline_execution.called_with( + sagemaker_session_mock.sagemaker_client.stop_pipeline_execution.assert_called_with( PipelineExecutionArn="my:arn" ) execution.describe() - assert sagemaker_session_mock.sagemaker_client.describe_pipeline_execution.called_with( + sagemaker_session_mock.sagemaker_client.describe_pipeline_execution.assert_called_with( PipelineExecutionArn="my:arn" ) steps = execution.list_steps() - assert sagemaker_session_mock.sagemaker_client.describe_pipeline_execution_steps.called_with( + sagemaker_session_mock.sagemaker_client.list_pipeline_execution_steps.assert_called_with( PipelineExecutionArn="my:arn" ) assert len(steps) == 1 list_parameters_response = execution.list_parameters() - assert ( - sagemaker_session_mock.sagemaker_client.list_pipeline_parameters_for_execution.called_with( - PipelineExecutionArn="my:arn" - ) + sagemaker_session_mock.sagemaker_client.list_pipeline_parameters_for_execution.assert_called_with( + PipelineExecutionArn="my:arn" ) + parameter_list = list_parameters_response["PipelineParameters"] assert len(parameter_list) == 1 assert parameter_list[0]["Name"] == "TestParameterName" diff --git a/tests/unit/test_exception_on_bad_status.py b/tests/unit/test_exception_on_bad_status.py index 2ef017efd3..dc53c97799 100644 --- a/tests/unit/test_exception_on_bad_status.py +++ b/tests/unit/test_exception_on_bad_status.py @@ -52,7 +52,7 @@ def test_raise_when_failed_created_package(): False ), "sagemaker.exceptions.UnexpectedStatusException should have been raised but was not" except Exception as e: - assert type(e) == sagemaker.exceptions.UnexpectedStatusException + assert isinstance(e, sagemaker.exceptions.UnexpectedStatusException) assert e.actual_status == "EnRoute" assert "Completed" in e.allowed_statuses @@ -73,7 +73,7 @@ def test_does_raise_when_incorrect_job_status(): False ), "sagemaker.exceptions.UnexpectedStatusException should have been raised but was not" except Exception as e: - assert type(e) == sagemaker.exceptions.UnexpectedStatusException + assert isinstance(e, sagemaker.exceptions.UnexpectedStatusException) assert e.actual_status == "Failed" assert "Completed" in e.allowed_statuses assert "Stopped" in e.allowed_statuses @@ -92,7 +92,7 @@ def test_does_raise_capacity_error_when_incorrect_job_status(): ) assert False, "sagemaker.exceptions.CapacityError should have been raised but was not" except Exception as e: - assert type(e) == sagemaker.exceptions.CapacityError + assert isinstance(e, sagemaker.exceptions.CapacityError) assert e.actual_status == "Failed" assert "Completed" in e.allowed_statuses assert "Stopped" in e.allowed_statuses @@ -114,6 +114,6 @@ def test_raise_when_failed_to_deploy_endpoint(): False ), "sagemaker.exceptions.UnexpectedStatusException should have been raised but was not" except Exception as e: - assert type(e) == sagemaker.exceptions.UnexpectedStatusException + assert isinstance(e, sagemaker.exceptions.UnexpectedStatusException) assert e.actual_status == "Failed" assert "InService" in e.allowed_statuses diff --git a/tests/unit/test_hyperparameter.py b/tests/unit/test_hyperparameter.py index ba7a363c40..edb2de97ee 100644 --- a/tests/unit/test_hyperparameter.py +++ b/tests/unit/test_hyperparameter.py @@ -62,7 +62,7 @@ def test_validated(): def test_data_type(): x = Test() x.validated = 66 - assert type(x.validated) == Test.__dict__["validated"].data_type + assert isinstance(x.validated, Test.__dict__["validated"].data_type) def test_from_string(): diff --git a/tests/unit/test_predictor_async.py b/tests/unit/test_predictor_async.py index fa2d6da6c7..c9f12ff023 100644 --- a/tests/unit/test_predictor_async.py +++ b/tests/unit/test_predictor_async.py @@ -233,7 +233,7 @@ def test_async_predict_call_verify_exceptions(): with pytest.raises( PollingTimeoutError, match=f"No result at {ASYNC_OUTPUT_LOCATION} after polling for " - f"{DEFAULT_WAITER_CONFIG.delay*DEFAULT_WAITER_CONFIG.max_attempts}" + f"{DEFAULT_WAITER_CONFIG.delay * DEFAULT_WAITER_CONFIG.max_attempts}" f" seconds. Inference could still be running", ): predictor_async.predict(input_path=input_location, waiter_config=DEFAULT_WAITER_CONFIG) @@ -253,7 +253,7 @@ def test_async_predict_call_verify_exceptions_with_null_failure_path(): with pytest.raises( PollingTimeoutError, match=f"No result at {ASYNC_OUTPUT_LOCATION} after polling for " - f"{DEFAULT_WAITER_CONFIG.delay*DEFAULT_WAITER_CONFIG.max_attempts}" + f"{DEFAULT_WAITER_CONFIG.delay * DEFAULT_WAITER_CONFIG.max_attempts}" f" seconds. Inference could still be running", ): predictor_async.predict(input_path=input_location, waiter_config=DEFAULT_WAITER_CONFIG) diff --git a/tests/unit/test_tuner.py b/tests/unit/test_tuner.py index f0325b79e9..b4d21008b5 100644 --- a/tests/unit/test_tuner.py +++ b/tests/unit/test_tuner.py @@ -46,7 +46,54 @@ from sagemaker.workflow.parameters import ParameterString, ParameterInteger from src.sagemaker.tuner import InstanceConfig -from .tuner_test_utils import * # noqa: F403 +from .tuner_test_utils import ( + BASE_JOB_NAME, + BUCKET_NAME, + CategoricalParameter, + ContinuousParameter, + DATA_DIR, + EARLY_STOPPING_TYPE, + Estimator, + ESTIMATOR, + ESTIMATOR_NAME, + ESTIMATOR_NAME_TWO, + ESTIMATOR_TWO, + FRAMEWORK_VERSION, + HYPERPARAMETER_RANGES, + HYPERPARAMETER_RANGES_TWO, + IMAGE_NAME, + INPUTS, + INSTANCE_COUNT, + INSTANCE_TYPE, + IntegerParameter, + JOB_NAME, + LIST_TAGS_RESULT, + MAX_JOBS, + MAX_PARALLEL_JOBS, + METRIC_DEFINITIONS, + MODEL_DATA, + MULTI_ALGO_TUNING_JOB_DETAILS, + NUM_COMPONENTS, + OBJECTIVE_METRIC_NAME, + OBJECTIVE_METRIC_NAME_TWO, + OBJECTIVE_TYPE, + PCA, + PY_VERSION, + REGION, + ROLE, + SAGEMAKER_SESSION, + SCRIPT_NAME, + STRATEGY, + TAGS, + TRAINING_JOB_DESCRIPTION, + TRAINING_JOB_NAME, + TUNING_JOB_DETAILS, + WarmStartConfig, + WarmStartTypes, + WARM_START_CONFIG, + ENDPOINT_DESC, + ENDPOINT_CONFIG_DESC, +) @pytest.fixture() diff --git a/tox.ini b/tox.ini index 1664da221d..c47d206380 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ [tox] isolated_build = true -envlist = black-format,flake8,pylint,docstyle,sphinx,doc8,twine,py38,py39,py310,py311 +envlist = black-format,flake8,pylint,docstyle,sphinx,doc8,twine,py39,py310,py311,py312 skip_missing_interpreters = False @@ -21,13 +21,13 @@ exclude = tests/data/ venv/ env/ - tests/unit/test_tensorboard.py # excluding this file for time being + tests/unit/test_tensorboard.py max-complexity = 10 ignore = C901, - E203, # whitespace before ':': Black disagrees with and explicitly violates this. + E203, FI10, FI12, FI13, @@ -35,7 +35,7 @@ ignore = FI15, FI16, FI17, - FI18, # __future__ import "annotations" missing -> check only Python 3.7 compatible + FI18, FI50, FI51, FI52, @@ -90,7 +90,10 @@ commands = pytest {posargs} deps = .[test] depends = - {py38,py39,py310,p311}: clean + {py39,py310,py311,py312}: clean + +[testenv:py312] +basepython = python3.12 [testenv:runcoverage] description = run unit tests with coverage @@ -105,6 +108,7 @@ deps = -r requirements/tox/flake8_requirements.txt commands = flake8 +basepython = python3.12 [testenv:pylint] skipdist = true @@ -112,7 +116,7 @@ skip_install = true deps = -r requirements/tox/pylint_requirements.txt commands = - python -m pylint --rcfile=.pylintrc -j 0 src/sagemaker + python -m pylint --rcfile=.pylintrc -j 0 src/sagemaker --fail-under=9.9 [testenv:spelling] skipdist = true @@ -139,7 +143,7 @@ changedir = doc # https://github.com/pypa/pip/issues/988 commands = pip install --exists-action=w -r requirements.txt - sphinx-build -T -W -b html -d _build/doctrees-readthedocs -D language=en . _build/html + sphinx-build -T -b html -d _build/doctrees-readthedocs -D language=en . _build/html [testenv:doc8] deps = From 86292e58cb08fb3c0a0b2b5d27258dbd8b4d874a Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 15:21:43 -0700 Subject: [PATCH 23/27] resolve conflict dependency with numpy 2.0 --- requirements/extras/test_requirements.txt | 2 +- tests/data/serve_resources/mlflow/pytorch/conda.yaml | 2 +- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 2 +- tests/data/serve_resources/mlflow/xgboost/requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index e15c53b8af..59ae23ac5f 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -23,7 +23,7 @@ sagemaker-experiments==0.1.35 Jinja2==3.1.6 pyvis==0.2.1 pandas==2.2.3 -scikit-learn==1.4.0 +scikit-learn==1.6.1 cloudpickle==2.2.1 jsonpickle<4.0.0 PyYAML>=6.0.1 diff --git a/tests/data/serve_resources/mlflow/pytorch/conda.yaml b/tests/data/serve_resources/mlflow/pytorch/conda.yaml index e218c7577c..1b646dac0a 100644 --- a/tests/data/serve_resources/mlflow/pytorch/conda.yaml +++ b/tests/data/serve_resources/mlflow/pytorch/conda.yaml @@ -20,5 +20,5 @@ dependencies: - torch>=2.6.0 - torchvision>=0.17.0 - tqdm==4.66.2 - - scikit-learn==1.3.2 + - scikit-learn==1.6.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index c18c7341fc..2b1b62d5d3 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -9,7 +9,7 @@ dependencies: - numpy>=2.0.0,<2.3.0 - pandas==2.2.1 - psutil==5.9.8 - - scikit-learn==1.3.2 + - scikit-learn==1.6.1 - scipy==1.11.3 - xgboost==1.7.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index 7050dc4e4c..6d9bbf9987 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -3,6 +3,6 @@ lz4==4.3.2 numpy>=2.0.0,<2.3.0 pandas==2.0.3 psutil==5.9.8 -scikit-learn==1.3.2 +scikit-learn==1.6.1 scipy==1.11.3 xgboost==1.7.1 From 5817b2e51501efb601fb542343d8ae80636bb959 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Thu, 17 Apr 2025 23:33:53 -0700 Subject: [PATCH 24/27] resolve conflict dependency with numpy 2.0 --- .../remote_function/old_deps_requirements.txt | 2 +- tests/data/remote_function/requirements.txt | 2 +- .../serve_resources/mlflow/pytorch/conda.yaml | 2 +- .../mlflow/pytorch/requirements.txt | 2 +- .../mlflow/tensorflow/conda.yaml | 2 +- .../mlflow/tensorflow/requirements.txt | 2 +- .../serve_resources/mlflow/xgboost/conda.yaml | 4 ++-- .../mlflow/xgboost/requirements.txt | 4 ++-- tests/data/workflow/requirements.txt | 2 +- tests/unit/sagemaker/jumpstart/constants.py | 18 +++++++++--------- .../serve/detector/test_dependency_manager.py | 2 +- 11 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/data/remote_function/old_deps_requirements.txt b/tests/data/remote_function/old_deps_requirements.txt index d3bddebad1..a1e6c0bf85 100644 --- a/tests/data/remote_function/old_deps_requirements.txt +++ b/tests/data/remote_function/old_deps_requirements.txt @@ -1 +1 @@ -pandas==1.3.4 +pandas==2.2.3 diff --git a/tests/data/remote_function/requirements.txt b/tests/data/remote_function/requirements.txt index 44ce1d9331..f89caf8c2b 100644 --- a/tests/data/remote_function/requirements.txt +++ b/tests/data/remote_function/requirements.txt @@ -1 +1 @@ -scipy==1.11.3 +scipy==1.13.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/conda.yaml b/tests/data/serve_resources/mlflow/pytorch/conda.yaml index 1b646dac0a..b3f6ec147f 100644 --- a/tests/data/serve_resources/mlflow/pytorch/conda.yaml +++ b/tests/data/serve_resources/mlflow/pytorch/conda.yaml @@ -14,7 +14,7 @@ dependencies: - numpy>=2.0.0,<2.3.0 - opt-einsum==3.3.0 - packaging==24.0 - - pandas==2.2.1 + - pandas==2.2.3 - pyyaml==6.0.1 - requests==2.31.0 - torch>=2.6.0 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index 926353225e..6af3e46cda 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -8,7 +8,7 @@ gmpy2==2.1.2 numpy>=2.0.0,<2.3.0 opt-einsum==3.3.0 packaging>=23.0,<25 -pandas==2.2.1 +pandas==2.2.3 pyyaml==6.0.1 requests==2.32.2 torch>=2.6.0 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 4d2a50079c..619e823706 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -7,5 +7,5 @@ dependencies: - mlflow==2.16.1 - cloudpickle==2.2.1 - numpy>=2.0.0,<2.3.0 - - tensorflow==2.16.1 + - tensorflow==2.17.0 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index 12573c8994..fd6c0b8468 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ mlflow==2.16.1 cloudpickle==2.2.1 numpy>=2.0.0,<2.3.0 -tensorflow==2.16.1 +tensorflow==2.17.0 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 2b1b62d5d3..2e8f2761c7 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -7,9 +7,9 @@ dependencies: - mlflow==2.16.1 - lz4==4.3.2 - numpy>=2.0.0,<2.3.0 - - pandas==2.2.1 + - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.6.1 - - scipy==1.11.3 + - scipy==1.13.0 - xgboost==1.7.1 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index 6d9bbf9987..dcea3fe1ec 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,8 +1,8 @@ mlflow==2.16.1 lz4==4.3.2 numpy>=2.0.0,<2.3.0 -pandas==2.0.3 +pandas==2.2.3 psutil==5.9.8 scikit-learn==1.6.1 -scipy==1.11.3 +scipy==1.13.0 xgboost==1.7.1 diff --git a/tests/data/workflow/requirements.txt b/tests/data/workflow/requirements.txt index 44ce1d9331..f89caf8c2b 100644 --- a/tests/data/workflow/requirements.txt +++ b/tests/data/workflow/requirements.txt @@ -1 +1 @@ -scipy==1.11.3 +scipy==1.13.0 diff --git a/tests/unit/sagemaker/jumpstart/constants.py b/tests/unit/sagemaker/jumpstart/constants.py index ae02c597da..90d2e7717e 100644 --- a/tests/unit/sagemaker/jumpstart/constants.py +++ b/tests/unit/sagemaker/jumpstart/constants.py @@ -5361,7 +5361,7 @@ "safetensors==0.3.1", "sagemaker_jumpstart_huggingface_script_utilities==1.1.3", "sagemaker_jumpstart_script_utilities==1.1.9", - "scipy==1.11.1", + "scipy==1.13.0", "termcolor==2.3.0", "texttable==1.6.7", "tokenize-rt==5.1.0", @@ -7870,7 +7870,7 @@ "safetensors==0.3.1", "sagemaker_jumpstart_huggingface_script_utilities==1.1.3", "sagemaker_jumpstart_script_utilities==1.1.9", - "scipy==1.11.1", + "scipy==1.13.0", "termcolor==2.3.0", "texttable==1.6.7", "tokenize-rt==5.1.0", @@ -8346,7 +8346,7 @@ "safetensors==0.3.1", "sagemaker_jumpstart_huggingface_script_utilities==1.1.3", "sagemaker_jumpstart_script_utilities==1.1.9", - "scipy==1.11.1", + "scipy==1.13.0", "termcolor==2.3.0", "texttable==1.6.7", "tokenize-rt==5.1.0", @@ -12095,7 +12095,7 @@ "inference_vulnerabilities": [], "training_vulnerable": False, "training_dependencies": [ - "numpy==1.23.1", + "numpy>=2.0.0", "opencv_python==4.7.0.68", "sagemaker_jumpstart_prepack_script_utilities==1.0.0", ], @@ -14360,10 +14360,10 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==1.26.4", + "numpy>=2.0.0", "oscrypto==1.3.0", "packaging==23.1", - "pandas==2.0.2", + "pandas==2.2.3", "pathos==0.3.0", "pkgutil-resolve-name==1.3.10", "platformdirs==3.8.0", @@ -14884,10 +14884,10 @@ "jmespath==1.0.1", "jsonschema==4.17.3", "multiprocess==0.70.14", - "numpy==1.24.3", + "numpy>=2.0.0", "oscrypto==1.3.0", "packaging==23.1", - "pandas==2.0.2", + "pandas==2.2.3", "pathos==0.3.0", "pkgutil-resolve-name==1.3.10", "platformdirs==3.8.0", @@ -17400,7 +17400,7 @@ "safetensors==0.3.1", "sagemaker_jumpstart_huggingface_script_utilities==1.1.4", "sagemaker_jumpstart_script_utilities==1.1.9", - "scipy==1.11.1", + "scipy==1.13.0", "termcolor==2.3.0", "texttable==1.6.7", "tokenize-rt==5.1.0", diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index 52e9822e57..6f88e67f8e 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -21,7 +21,7 @@ DEPENDENCY_LIST = [ "requests==2.26.0", - "numpy==1.26.4", + "numpy>=2.0.0", "pandas<=1.3.3", "matplotlib<3.5.0", "scikit-learn>0.24.1", From 8a68dbb2289ebaf61486009b362a3d5e2c775007 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Mon, 21 Apr 2025 16:03:20 -0700 Subject: [PATCH 25/27] resolve conflict dependency with numpy 2.0 --- tests/data/serve_resources/mlflow/pytorch/conda.yaml | 2 +- tests/data/serve_resources/mlflow/pytorch/requirements.txt | 2 +- tests/data/serve_resources/mlflow/tensorflow/conda.yaml | 6 +++--- .../data/serve_resources/mlflow/tensorflow/requirements.txt | 6 +++--- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 4 ++-- tests/data/serve_resources/mlflow/xgboost/requirements.txt | 4 ++-- .../sagemaker/serve/detector/test_dependency_manager.py | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/data/serve_resources/mlflow/pytorch/conda.yaml b/tests/data/serve_resources/mlflow/pytorch/conda.yaml index b3f6ec147f..015cb47850 100644 --- a/tests/data/serve_resources/mlflow/pytorch/conda.yaml +++ b/tests/data/serve_resources/mlflow/pytorch/conda.yaml @@ -4,7 +4,7 @@ dependencies: - python=3.10.13 - pip<=23.3.1 - pip: - - mlflow==2.16.1 + - mlflow>=2.16.1 - astunparse==1.6.3 - cffi==1.16.0 - cloudpickle==2.2.1 diff --git a/tests/data/serve_resources/mlflow/pytorch/requirements.txt b/tests/data/serve_resources/mlflow/pytorch/requirements.txt index 6af3e46cda..101aa2651b 100644 --- a/tests/data/serve_resources/mlflow/pytorch/requirements.txt +++ b/tests/data/serve_resources/mlflow/pytorch/requirements.txt @@ -1,4 +1,4 @@ -mlflow==2.16.1 +mlflow>=2.16.1 astunparse==1.6.3 cffi==1.16.0 cloudpickle==2.2.1 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 619e823706..bdcf8fb0c9 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -4,8 +4,8 @@ dependencies: - python=3.10.13 - pip<=23.3.1 - pip: - - mlflow==2.16.1 + - mlflow>=2.16.1 - cloudpickle==2.2.1 - - numpy>=2.0.0,<2.3.0 - - tensorflow==2.17.0 + - numpy==2.0.0 + - tensorflow==2.18.0 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index fd6c0b8468..9405f52b7f 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ -mlflow==2.16.1 +mlflow>=2.16.1 cloudpickle==2.2.1 -numpy>=2.0.0,<2.3.0 -tensorflow==2.17.0 +numpy==2.0.0 +tensorflow==2.18.0 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 2e8f2761c7..3d899ee271 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -4,12 +4,12 @@ dependencies: - python=3.10.13 - pip<=23.3.1 - pip: - - mlflow==2.16.1 + - mlflow>=2.16.1 - lz4==4.3.2 - numpy>=2.0.0,<2.3.0 - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.6.1 - scipy==1.13.0 - - xgboost==1.7.1 + - xgboost==2.1.4 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index dcea3fe1ec..1e323aae82 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,8 +1,8 @@ -mlflow==2.16.1 +mlflow>=2.16.1 lz4==4.3.2 numpy>=2.0.0,<2.3.0 pandas==2.2.3 psutil==5.9.8 scikit-learn==1.6.1 scipy==1.13.0 -xgboost==1.7.1 +xgboost==2.1.4 diff --git a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py index 6f88e67f8e..bf418b44c2 100644 --- a/tests/unit/sagemaker/serve/detector/test_dependency_manager.py +++ b/tests/unit/sagemaker/serve/detector/test_dependency_manager.py @@ -34,7 +34,7 @@ EXPECTED_DEPENDENCY_MAP = { "requests": "==2.26.0", - "numpy": "==1.26.4", + "numpy": ">=2.0.0", "pandas": "<=1.3.3", "matplotlib": "<3.5.0", "scikit-learn": ">0.24.1", From 1d9bd7fdf8d7e727a7075b8489ec66bba4c894e0 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Tue, 22 Apr 2025 15:02:32 -0700 Subject: [PATCH 26/27] resolve conflict dependency with numpy 2.0 --- requirements/extras/test_requirements.txt | 2 +- tests/data/remote_function/old_deps_requirements.txt | 2 +- tests/data/serve_resources/mlflow/tensorflow/conda.yaml | 4 ++-- tests/data/serve_resources/mlflow/tensorflow/requirements.txt | 2 +- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 4 ++-- tests/data/serve_resources/mlflow/xgboost/requirements.txt | 2 +- tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/requirements/extras/test_requirements.txt b/requirements/extras/test_requirements.txt index 59ae23ac5f..7d4385c14b 100644 --- a/requirements/extras/test_requirements.txt +++ b/requirements/extras/test_requirements.txt @@ -42,7 +42,7 @@ onnx==1.17.0 nbformat>=5.9,<6 accelerate>=0.24.1,<=0.27.0 schema==0.7.5 -tensorflow>=2.16.2,<=2.18.0 +tensorflow==2.18.0 mlflow>=2.16.1 huggingface_hub==0.26.2 uvicorn>=0.30.1 diff --git a/tests/data/remote_function/old_deps_requirements.txt b/tests/data/remote_function/old_deps_requirements.txt index a1e6c0bf85..d3bddebad1 100644 --- a/tests/data/remote_function/old_deps_requirements.txt +++ b/tests/data/remote_function/old_deps_requirements.txt @@ -1 +1 @@ -pandas==2.2.3 +pandas==1.3.4 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index bdcf8fb0c9..809565236b 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -2,10 +2,10 @@ channels: - conda-forge dependencies: - python=3.10.13 -- pip<=23.3.1 +- pip<=24.3 - pip: - mlflow>=2.16.1 - cloudpickle==2.2.1 - - numpy==2.0.0 + - numpy - tensorflow==2.18.0 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index 9405f52b7f..80e4952071 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ mlflow>=2.16.1 cloudpickle==2.2.1 -numpy==2.0.0 +numpy tensorflow==2.18.0 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 3d899ee271..3d83c02727 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -2,11 +2,11 @@ channels: - conda-forge dependencies: - python=3.10.13 -- pip<=23.3.1 +- pip<=24.3 - pip: - mlflow>=2.16.1 - lz4==4.3.2 - - numpy>=2.0.0,<2.3.0 + - numpy - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.6.1 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index 1e323aae82..6586a64c1a 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,6 +1,6 @@ mlflow>=2.16.1 lz4==4.3.2 -numpy>=2.0.0,<2.3.0 +numpy pandas==2.2.3 psutil==5.9.8 scikit-learn==1.6.1 diff --git a/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py b/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py index 4b53c93ad4..099211bd0d 100644 --- a/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py +++ b/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py @@ -48,7 +48,7 @@ def mock_mlflow_client(): def test_encode(): existing_names = set() assert encode("test-name", existing_names) == "test-name" - assert encode("test:name", existing_names) == "test_3a_name" + assert encode("test:name", existing_names) == "test:name" assert encode("test-name", existing_names) == "test-name_1" From bc8ed28d28185eed2ae9416d0cca6c7e5e49ad83 Mon Sep 17 00:00:00 2001 From: Roja Reddy Sareddy Date: Sun, 27 Apr 2025 16:00:03 -0700 Subject: [PATCH 27/27] resolve conflict dependency with numpy 2.0 --- doc/requirements.txt | 2 +- tests/data/serve_resources/mlflow/tensorflow/conda.yaml | 2 +- .../serve_resources/mlflow/tensorflow/requirements.txt | 2 +- tests/data/serve_resources/mlflow/xgboost/conda.yaml | 2 +- .../data/serve_resources/mlflow/xgboost/requirements.txt | 2 +- .../sagemaker/mlflow/test_forward_sagemaker_metrics.py | 9 ++++++++- 6 files changed, 13 insertions(+), 6 deletions(-) diff --git a/doc/requirements.txt b/doc/requirements.txt index 71a95f7633..b4241e06a4 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,7 +1,7 @@ sphinx==5.1.1 sphinx-rtd-theme==0.5.0 docutils==0.15.2 -packaging==20.9 +packaging>=23.0,<25 jinja2==3.1.6 schema==0.7.5 accelerate>=0.24.1,<=0.27.0 diff --git a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml index 809565236b..bed6a78250 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/conda.yaml +++ b/tests/data/serve_resources/mlflow/tensorflow/conda.yaml @@ -6,6 +6,6 @@ dependencies: - pip: - mlflow>=2.16.1 - cloudpickle==2.2.1 - - numpy + - numpy>=1.26.4 - tensorflow==2.18.0 name: mlflow-env diff --git a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt index 80e4952071..1e056aa3f2 100644 --- a/tests/data/serve_resources/mlflow/tensorflow/requirements.txt +++ b/tests/data/serve_resources/mlflow/tensorflow/requirements.txt @@ -1,4 +1,4 @@ mlflow>=2.16.1 cloudpickle==2.2.1 -numpy +numpy>=1.26.4 tensorflow==2.18.0 diff --git a/tests/data/serve_resources/mlflow/xgboost/conda.yaml b/tests/data/serve_resources/mlflow/xgboost/conda.yaml index 3d83c02727..02f96238b2 100644 --- a/tests/data/serve_resources/mlflow/xgboost/conda.yaml +++ b/tests/data/serve_resources/mlflow/xgboost/conda.yaml @@ -6,7 +6,7 @@ dependencies: - pip: - mlflow>=2.16.1 - lz4==4.3.2 - - numpy + - numpy>=1.26.4 - pandas==2.2.3 - psutil==5.9.8 - scikit-learn==1.6.1 diff --git a/tests/data/serve_resources/mlflow/xgboost/requirements.txt b/tests/data/serve_resources/mlflow/xgboost/requirements.txt index 6586a64c1a..49aeff45a1 100644 --- a/tests/data/serve_resources/mlflow/xgboost/requirements.txt +++ b/tests/data/serve_resources/mlflow/xgboost/requirements.txt @@ -1,6 +1,6 @@ mlflow>=2.16.1 lz4==4.3.2 -numpy +numpy>=1.26.4 pandas==2.2.3 psutil==5.9.8 scikit-learn==1.6.1 diff --git a/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py b/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py index 099211bd0d..c1c6d67a8b 100644 --- a/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py +++ b/tests/unit/sagemaker/mlflow/test_forward_sagemaker_metrics.py @@ -188,6 +188,7 @@ def getenv_side_effect(arg, default=None): Mock(spec=requests.Response), Mock(spec=requests.Response), ], + "https://test.sagemaker.aws/api/2.0/mlflow/runs/update": Mock(spec=requests.Response), "https://test.sagemaker.aws/api/2.0/mlflow/runs/terminate": Mock(spec=requests.Response), } @@ -215,6 +216,11 @@ def getenv_side_effect(arg, default=None): mock_response.status_code = 200 mock_response.text = json.dumps({}) + mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/update"].status_code = 200 + mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/update"].text = json.dumps( + {"run_id": "test_run_id", "status": "FINISHED"} + ) + mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/terminate"].status_code = 200 mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/terminate"].text = json.dumps({}) @@ -222,6 +228,7 @@ def getenv_side_effect(arg, default=None): mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/experiments/get-by-name"], mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/create"], *mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/log-batch"], + mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/update"], mock_responses["https://test.sagemaker.aws/api/2.0/mlflow/runs/terminate"], ] @@ -231,7 +238,7 @@ def getenv_side_effect(arg, default=None): log_to_mlflow(metrics, params, tags) - assert mock_request.call_count == 6 # Total number of API calls + assert mock_request.call_count == 7 # Total number of API calls @patch("sagemaker.mlflow.forward_sagemaker_metrics.get_training_job_details")