diff --git a/torchrec/distributed/mc_modules.py b/torchrec/distributed/mc_modules.py index fc326d7d6..2634c3983 100644 --- a/torchrec/distributed/mc_modules.py +++ b/torchrec/distributed/mc_modules.py @@ -416,7 +416,7 @@ def _create_managed_collision_modules( ), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}" if self._sharding_features[-1] != sharding.feature_names(): - logger.warn( + logger.warning( "The order of tables of this sharding is altered due to grouping: " f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}" ) @@ -1122,7 +1122,7 @@ def _create_managed_collision_modules( ), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}" if self._sharding_features[-1] != sharding.feature_names(): - logger.warn( + logger.warning( "The order of tables of this sharding is altered due to grouping: " f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}" ) diff --git a/torchrec/distributed/planner/enumerators.py b/torchrec/distributed/planner/enumerators.py index 472e3d6e6..9a4cbe4b9 100644 --- a/torchrec/distributed/planner/enumerators.py +++ b/torchrec/distributed/planner/enumerators.py @@ -255,7 +255,7 @@ def _filter_sharding_types( set(constrained_sharding_types) & set(allowed_sharding_types) ) if not filtered_sharding_types: - logger.warn( + logger.warning( "No available sharding types after applying user provided " f"constraints for {name}. Constrained sharding types: " f"{constrained_sharding_types}, allowed sharding types: " @@ -302,7 +302,7 @@ def _filter_compute_kernels( filtered_compute_kernels.remove(EmbeddingComputeKernel.DENSE.value) if not filtered_compute_kernels: - logger.warn( + logger.warning( "No available compute kernels after applying user provided " f"constraints for {name}. Constrained compute kernels: " f"{constrained_compute_kernels}, allowed compute kernels: " diff --git a/torchrec/distributed/train_pipeline/utils.py b/torchrec/distributed/train_pipeline/utils.py index a61609eae..de030ad46 100644 --- a/torchrec/distributed/train_pipeline/utils.py +++ b/torchrec/distributed/train_pipeline/utils.py @@ -402,7 +402,7 @@ def _rewrite_model( # noqa C901 input_model.module = graph_model if non_pipelined_sharded_modules: - logger.warn( + logger.warning( "Sharded modules were not pipelined: %s. " + "This should be fixed for pipelining to work to the full extent.", ", ".join(non_pipelined_sharded_modules), diff --git a/torchrec/metrics/throughput.py b/torchrec/metrics/throughput.py index 758250426..edbaec119 100644 --- a/torchrec/metrics/throughput.py +++ b/torchrec/metrics/throughput.py @@ -99,7 +99,7 @@ def __init__( ) if window_seconds > MAX_WINDOW_TS: - logger.warn( + logger.warning( f"window_seconds is greater than {MAX_WINDOW_TS}, capping to {MAX_WINDOW_TS} to make sure window_qps is not staled" ) window_seconds = MAX_WINDOW_TS diff --git a/torchrec/sparse/jagged_tensor.py b/torchrec/sparse/jagged_tensor.py index c4d48ec5b..37a411836 100644 --- a/torchrec/sparse/jagged_tensor.py +++ b/torchrec/sparse/jagged_tensor.py @@ -2689,7 +2689,7 @@ def to_dict(self) -> Dict[str, JaggedTensor]: Dict[str, JaggedTensor]: dictionary of JaggedTensor for each key. """ if not torch.jit.is_scripting() and is_non_strict_exporting(): - logger.warn( + logger.warning( "Trying to non-strict torch.export KJT to_dict, which is extremely slow and not recommended!" ) _jt_dict = _maybe_compute_kjt_to_jt_dict(