Skip to content

Commit 4b3f60c

Browse files
Migrate to correct logger interface (#3123)
Summary: Pull Request resolved: #3123 ## PR Summary This small PR resolves the annoying deprecation warnings of the `logger` library: ```python DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead ``` See [CI logs](https://github.com/pytorch/torchrec/actions/runs/15201361250/job/42755956905#step:15:5170) for those warnings. Pull Request resolved: #2996 Reviewed By: spmex Differential Revision: D77035942 Pulled By: TroyGarden fbshipit-source-id: 36f4915ddde3bfc2204f95747434a0a55ed911a5
1 parent 341768d commit 4b3f60c

File tree

7 files changed

+12
-10
lines changed

7 files changed

+12
-10
lines changed

torchrec/distributed/mc_modules.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,7 @@ def _create_managed_collision_modules(
416416
), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}"
417417

418418
if self._sharding_features[-1] != sharding.feature_names():
419-
logger.warn(
419+
logger.warning(
420420
"The order of tables of this sharding is altered due to grouping: "
421421
f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}"
422422
)
@@ -1122,7 +1122,7 @@ def _create_managed_collision_modules(
11221122
), f"Shared feature is not supported. {num_sharding_features=}, {self._sharding_per_table_feature_splits[-1]=}"
11231123

11241124
if self._sharding_features[-1] != sharding.feature_names():
1125-
logger.warn(
1125+
logger.warning(
11261126
"The order of tables of this sharding is altered due to grouping: "
11271127
f"{self._sharding_features[-1]=} vs {sharding.feature_names()=}"
11281128
)

torchrec/distributed/model_tracker/model_delta_tracker.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,9 @@ def __init__(
100100
for fqn, feature_names in self._fqn_to_feature_map.items():
101101
for feature_name in feature_names:
102102
if feature_name in self.feature_to_fqn:
103-
logger.warn(f"Duplicate feature name: {feature_name} in fqn {fqn}")
103+
logger.warning(
104+
f"Duplicate feature name: {feature_name} in fqn {fqn}"
105+
)
104106
continue
105107
self.feature_to_fqn[feature_name] = fqn
106108
logger.info(f"feature_to_fqn: {self.feature_to_fqn}")

torchrec/distributed/planner/enumerators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def _filter_sharding_types(
279279
set(constrained_sharding_types) & set(allowed_sharding_types)
280280
)
281281
if not filtered_sharding_types:
282-
logger.warn(
282+
logger.warning(
283283
"No available sharding types after applying user provided "
284284
f"constraints for {name}. Constrained sharding types: "
285285
f"{constrained_sharding_types}, allowed sharding types: "
@@ -326,7 +326,7 @@ def _filter_compute_kernels(
326326
filtered_compute_kernels.remove(EmbeddingComputeKernel.DENSE.value)
327327

328328
if not filtered_compute_kernels:
329-
logger.warn(
329+
logger.warning(
330330
"No available compute kernels after applying user provided "
331331
f"constraints for {name}. Constrained compute kernels: "
332332
f"{constrained_compute_kernels}, allowed compute kernels: "

torchrec/distributed/planner/tests/test_enumerators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -731,7 +731,7 @@ def test_filter_sharding_types_mch_ebc_no_available(self) -> None:
731731
)
732732

733733
sharder = ManagedCollisionEmbeddingBagCollectionSharder()
734-
with self.assertWarns(Warning):
734+
with self.assertLogs(level="WARNING"):
735735
allowed_sharding_types = enumerator._filter_sharding_types(
736736
"table_0", sharder.sharding_types("cuda")
737737
)
@@ -811,7 +811,7 @@ def test_filter_compute_kernels_mch_ebc_no_available(self) -> None:
811811

812812
sharder = ManagedCollisionEmbeddingBagCollectionSharder()
813813
sharding_type = ShardingType.ROW_WISE.value
814-
with self.assertWarns(Warning):
814+
with self.assertLogs(level="WARNING"):
815815
allowed_compute_kernels = enumerator._filter_compute_kernels(
816816
"table_0", sharder.compute_kernels(sharding_type, "cuda"), sharding_type
817817
)

torchrec/distributed/train_pipeline/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ def _rewrite_model( # noqa C901
402402
input_model.module = graph_model
403403

404404
if non_pipelined_sharded_modules:
405-
logger.warn(
405+
logger.warning(
406406
"Sharded modules were not pipelined: %s. "
407407
+ "This should be fixed for pipelining to work to the full extent.",
408408
", ".join(non_pipelined_sharded_modules),

torchrec/metrics/throughput.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def __init__(
9999
)
100100

101101
if window_seconds > MAX_WINDOW_TS:
102-
logger.warn(
102+
logger.warning(
103103
f"window_seconds is greater than {MAX_WINDOW_TS}, capping to {MAX_WINDOW_TS} to make sure window_qps is not staled"
104104
)
105105
window_seconds = MAX_WINDOW_TS

torchrec/sparse/jagged_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2688,7 +2688,7 @@ def to_dict(self) -> Dict[str, JaggedTensor]:
26882688
Dict[str, JaggedTensor]: dictionary of JaggedTensor for each key.
26892689
"""
26902690
if not torch.jit.is_scripting() and is_non_strict_exporting():
2691-
logger.warn(
2691+
logger.warning(
26922692
"Trying to non-strict torch.export KJT to_dict, which is extremely slow and not recommended!"
26932693
)
26942694
_jt_dict = _maybe_compute_kjt_to_jt_dict(

0 commit comments

Comments
 (0)