Skip to content

Commit

Permalink
Merge pull request #189 from stanfordnlp/gemma
Browse files Browse the repository at this point in the history
[P1] Update the version requirement for transformers and its dependencies.
  • Loading branch information
frankaging authored Oct 8, 2024
2 parents c46e80c + 7446e37 commit 2242266
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 10 deletions.
9 changes: 5 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
torch>=2.0.0
transformers==4.40.2
datasets>=2.16.1
transformers==4.45.1
tokenizers>=0.20.0
datasets>=3.0.1
protobuf>=3.20.0
matplotlib>=3.7.4
ipywidgets>=8.1.1
plotnine>=0.12.4
huggingface-hub>=0.24.0
huggingface-hub>=0.25.1
numpy>=1.23.5
fsspec>=2023.6.0
accelerate>=0.29.1
accelerate>=0.34.2
sentencepiece>=0.1.96
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,11 @@ def test_clean_run_positive(self):
base = {"input_ids": torch.randint(0, 10, (10, 5)).to(self.device)}
golden_out = self.gpt2(**base).logits
our_output = intervenable(base, output_original_output=True)[0][0]
self.assertTrue(torch.allclose(golden_out, our_output))
self.assertTrue(torch.allclose(golden_out, our_output, rtol=1e-05, atol=1e-06))
# make sure the toolkit also works
self.assertTrue(
torch.allclose(GPT2_RUN(self.gpt2, base["input_ids"], {}, {}), golden_out)
torch.allclose(GPT2_RUN(self.gpt2, base["input_ids"], {}, {}), golden_out,
rtol=1e-05, atol=1e-06)
)

def _test_subspace_partition_in_forward(self, intervention_type):
Expand Down Expand Up @@ -134,7 +135,8 @@ def _test_subspace_partition_in_forward(self, intervention_type):
# make sure the toolkit also works
self.assertTrue(
torch.allclose(
with_partition_our_output[0], without_partition_our_output[0]
with_partition_our_output[0], without_partition_our_output[0],
rtol=1e-05, atol=1e-06
)
)

Expand Down
3 changes: 2 additions & 1 deletion tests/integration_tests/IntervenableBasicTestCase.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,6 +609,7 @@ def test_customized_intervention_function_get(self):

_, tokenizer, gpt2 = pv.create_gpt2()

gpt2.config.output_attentions = True
pv_gpt2 = pv.IntervenableModel({
"layer": 10,
"component": "attention_weight",
Expand Down Expand Up @@ -668,4 +669,4 @@ def test_customized_intervention_function_zeroout(self):
def tearDownClass(self):
print(f"Removing testing dir {self._test_dir}")
if os.path.exists(self._test_dir) and os.path.isdir(self._test_dir):
shutil.rmtree(self._test_dir)
shutil.rmtree(self._test_dir)
5 changes: 3 additions & 2 deletions tests/integration_tests/InterventionWithGPT2TestCase.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,9 @@ def _test_with_head_position_intervention(
)
},
)

self.assertTrue(torch.allclose(out_output[0], golden_out))
# Relax the atol to 1e-6 to accommodate for different Transformers versions.
# The max of the absolute diff is usually between 1e-8 to 1e-7.
self.assertTrue(torch.allclose(out_output[0], golden_out, rtol=1e-05, atol=1e-06))


def test_with_multiple_heads_positions_vanilla_intervention_positive(self):
Expand Down

0 comments on commit 2242266

Please sign in to comment.