From 9a24f4d08790b5116274c29acb869820a4819082 Mon Sep 17 00:00:00 2001 From: Emmanuel Ferdman Date: Fri, 23 May 2025 03:10:31 -0700 Subject: [PATCH] Migrate to correct logger interface Signed-off-by: Emmanuel Ferdman --- torchrec/distributed/benchmark/benchmark_utils.py | 2 +- torchrec/distributed/mc_modules.py | 4 ++-- torchrec/distributed/planner/enumerators.py | 4 ++-- torchrec/distributed/train_pipeline/utils.py | 2 +- torchrec/metrics/throughput.py | 2 +- torchrec/sparse/jagged_tensor.py | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/torchrec/distributed/benchmark/benchmark_utils.py b/torchrec/distributed/benchmark/benchmark_utils.py index c407c640f..1f4babdbe 100644 --- a/torchrec/distributed/benchmark/benchmark_utils.py +++ b/torchrec/distributed/benchmark/benchmark_utils.py @@ -498,7 +498,7 @@ def rtf(**kwargs): if field.name not in names: names.add(field.name) else: - logger.warn(f"WARNING: duplicate argument {field.name}") + logger.warning(f"WARNING: duplicate argument {field.name}") continue rtf = click.option( f"--{field.name}", type=field.type, default=field.default diff --git a/torchrec/distributed/mc_modules.py b/torchrec/distributed/mc_modules.py index 34e4ac672..738e0c931 100644 --- a/torchrec/distributed/mc_modules.py +++ b/torchrec/distributed/mc_modules.py @@ -413,7 +413,7 @@ def _create_managed_collision_modules( ), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}" if self._sharding_features[-1] != sharding.feature_names(): - logger.warn( + logger.warning( "The order of tables of this sharding is altered due to grouping: " f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}" ) @@ -1105,7 +1105,7 @@ def _create_managed_collision_modules( ), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}" if self._sharding_features[-1] != sharding.feature_names(): - logger.warn( + logger.warning( "The order of tables of this sharding is altered due to grouping: " f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}" ) diff --git a/torchrec/distributed/planner/enumerators.py b/torchrec/distributed/planner/enumerators.py index 7676c5d45..2fba6dfe8 100644 --- a/torchrec/distributed/planner/enumerators.py +++ b/torchrec/distributed/planner/enumerators.py @@ -253,7 +253,7 @@ def _filter_sharding_types( set(constrained_sharding_types) & set(allowed_sharding_types) ) if not filtered_sharding_types: - logger.warn( + logger.warning( "No available sharding types after applying user provided " f"constraints for {name}. Constrained sharding types: " f"{constrained_sharding_types}, allowed sharding types: " @@ -300,7 +300,7 @@ def _filter_compute_kernels( filtered_compute_kernels.remove(EmbeddingComputeKernel.DENSE.value) if not filtered_compute_kernels: - logger.warn( + logger.warning( "No available compute kernels after applying user provided " f"constraints for {name}. Constrained compute kernels: " f"{constrained_compute_kernels}, allowed compute kernels: " diff --git a/torchrec/distributed/train_pipeline/utils.py b/torchrec/distributed/train_pipeline/utils.py index 74337514c..fb1477884 100644 --- a/torchrec/distributed/train_pipeline/utils.py +++ b/torchrec/distributed/train_pipeline/utils.py @@ -1583,7 +1583,7 @@ def _rewrite_model( # noqa C901 input_model.module = graph_model if non_pipelined_sharded_modules: - logger.warn( + logger.warning( "Sharded modules were not pipelined: %s. " + "This should be fixed for pipelining to work to the full extent.", ", ".join(non_pipelined_sharded_modules), diff --git a/torchrec/metrics/throughput.py b/torchrec/metrics/throughput.py index 758250426..edbaec119 100644 --- a/torchrec/metrics/throughput.py +++ b/torchrec/metrics/throughput.py @@ -99,7 +99,7 @@ def __init__( ) if window_seconds > MAX_WINDOW_TS: - logger.warn( + logger.warning( f"window_seconds is greater than {MAX_WINDOW_TS}, capping to {MAX_WINDOW_TS} to make sure window_qps is not staled" ) window_seconds = MAX_WINDOW_TS diff --git a/torchrec/sparse/jagged_tensor.py b/torchrec/sparse/jagged_tensor.py index 2bbe09149..67117684e 100644 --- a/torchrec/sparse/jagged_tensor.py +++ b/torchrec/sparse/jagged_tensor.py @@ -2704,7 +2704,7 @@ def to_dict(self) -> Dict[str, JaggedTensor]: Dict[str, JaggedTensor]: dictionary of JaggedTensor for each key. """ if not torch.jit.is_scripting() and is_non_strict_exporting(): - logger.warn( + logger.warning( "Trying to non-strict torch.export KJT to_dict, which is extremely slow and not recommended!" ) _jt_dict = _maybe_compute_kjt_to_jt_dict(