Skip to content

Commit ef04ad9

Browse files
iden-kalemajfacebook-github-bot
authored andcommitted
allow multi gpu tests to be run (#765)
Summary: Pull Request resolved: #765 Tests were always skipped since their name does not follow the `test_*.py` or `*_test.py` format. Instead, skip tests in CPU/single-GPU environments using the `unittest.skipIf` decorator. Reviewed By: HuanyuZhang Differential Revision: D76141869 fbshipit-source-id: b8009714e02e702f865012d5606c2de05265f27c
1 parent f3752c3 commit ef04ad9

File tree

2 files changed

+5
-3
lines changed

2 files changed

+5
-3
lines changed

opacus/tests/multigpu_adaptive_clipping.py renamed to opacus/tests/multigpu_adaptive_clipping_test.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def forward(self, x):
6767

6868
def demo_basic(rank, weight, world_size, dp):
6969
torch.manual_seed(world_size)
70-
batch_size = 32
70+
batch_size = 2
7171
setup(rank, world_size)
7272

7373
# create model and move it to GPU with id rank
@@ -129,7 +129,8 @@ def run_demo(demo_fn, weight, world_size, dp):
129129

130130

131131
class GradientComputationTestAdaptiveClipping(unittest.TestCase):
132-
def test_gradient_correct_adaptive(self) -> None:
132+
@unittest.skipIf(torch.cuda.device_count() < 2, "Need at least 2 GPUs")
133+
def test_adaptive_gradient_correct(self) -> None:
133134

134135
# Tests that gradient is the same with DP or without DP in the distributed setting
135136
n_gpus = torch.cuda.device_count()

opacus/tests/multigpu_gradcheck.py renamed to opacus/tests/multigpu_gradcheck_test.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def run_ghost_clipping_test(
112112

113113
def demo_basic(rank, weight, world_size, dp, clipping, grad_sample_mode):
114114
torch.manual_seed(world_size)
115-
batch_size = 32
115+
batch_size = 2
116116
setup(rank, world_size)
117117

118118
# create model and move it to GPU with id rank
@@ -188,6 +188,7 @@ def run_demo(demo_fn, weight, world_size, dp, clipping, grad_sample_mode):
188188

189189

190190
class GradientComputationTest(unittest.TestCase):
191+
@unittest.skipIf(torch.cuda.device_count() < 2, "Need at least 2 GPUs")
191192
def test_gradient_correct(self) -> None:
192193
# Tests that gradient is the same with DP or without DDP
193194
n_gpus = torch.cuda.device_count()

0 commit comments

Comments
 (0)