Skip to content

Commit be98696

Browse files
committed
feat: Standalone human eval reproduction script
1 parent 58eaa19 commit be98696

File tree

1 file changed

+314
-0
lines changed

1 file changed

+314
-0
lines changed

eval/human_eval_test.ipynb

Lines changed: 314 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,314 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {},
7+
"outputs": [
8+
{
9+
"name": "stderr",
10+
"output_type": "stream",
11+
"text": [
12+
"/home/azureuser/.conda/envs/llm_env/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13+
" from .autonotebook import tqdm as notebook_tqdm\n"
14+
]
15+
}
16+
],
17+
"source": [
18+
"import os\n",
19+
"import sys\n",
20+
"import tempfile\n",
21+
"sys.path.append('../')\n",
22+
"\n",
23+
"import torch\n",
24+
"from human_eval.data import stream_jsonl, write_jsonl, read_problems\n",
25+
"from human_eval.evaluation import evaluate_functional_correctness\n",
26+
"from transformers import AutoTokenizer, AutoModelForCausalLM\n",
27+
"\n",
28+
"os.environ['TOKENIZERS_PARALLELISM'] = 'true'"
29+
]
30+
},
31+
{
32+
"cell_type": "code",
33+
"execution_count": null,
34+
"metadata": {},
35+
"outputs": [],
36+
"source": [
37+
"output_dir = tempfile.gettempdir()\n",
38+
"\n",
39+
"n_samples_per_task = 1\n",
40+
"batch_size = 32\n",
41+
"n_workers = 8\n",
42+
"\n",
43+
"max_gen_length = 512\n",
44+
"\n",
45+
"use_instruct_model = True\n",
46+
"model_size = '1.3b'"
47+
]
48+
},
49+
{
50+
"cell_type": "code",
51+
"execution_count": 6,
52+
"metadata": {},
53+
"outputs": [],
54+
"source": [
55+
"def cleanup_code(code: str, instruct_format: bool = False) -> str:\n",
56+
" \"\"\"\n",
57+
" Cleans up the generated code.\n",
58+
" \"\"\"\n",
59+
" if instruct_format:\n",
60+
" code = code.replace(\"\\r\", \"\")\n",
61+
" if \"```python\" in code:\n",
62+
" code_start_idx = code.index(\"```python\")\n",
63+
" code = code[code_start_idx:].replace(\"```python\", \"\").strip()\n",
64+
" end_idx = code.find(\"```\") if \"```\" in code else len(code)\n",
65+
" code = code[:end_idx].strip()\n",
66+
"\n",
67+
" else:\n",
68+
" stop_words = set([\"\\ndef\", \"\\nclass\", \"\\nif\", \"\\n#\", \"\\nprint\"])\n",
69+
" min_stop_idx = len(code)\n",
70+
" for stop_word in stop_words:\n",
71+
" stop_index = code.find(stop_word)\n",
72+
" if 0 <= stop_index < min_stop_idx:\n",
73+
" min_stop_idx = stop_index\n",
74+
" code = code[:min_stop_idx]\n",
75+
"\n",
76+
" return code"
77+
]
78+
},
79+
{
80+
"cell_type": "code",
81+
"execution_count": 8,
82+
"metadata": {},
83+
"outputs": [
84+
{
85+
"name": "stderr",
86+
"output_type": "stream",
87+
"text": [
88+
"/home/azureuser/.conda/envs/llm_env/lib/python3.9/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
89+
" warnings.warn(\n",
90+
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
91+
]
92+
}
93+
],
94+
"source": [
95+
"device = 'cuda'\n",
96+
"\n",
97+
"model_type = 'instruct' if use_instruct_model else 'base'\n",
98+
"model_name = f'deepseek-ai/deepseek-coder-{model_size}-{model_type}'\n",
99+
"\n",
100+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
101+
"tokenizer.padding_side = 'left'\n",
102+
"# tokenizer.pad_token = tokenizer.eos_token # to avoid an error\n",
103+
"model = AutoModelForCausalLM.from_pretrained(\n",
104+
" model_name, attn_implementation='flash_attention_2',\n",
105+
" torch_dtype=torch.bfloat16, device_map=device, trust_remote_code=True,\n",
106+
")\n",
107+
"model = torch.compile(model)\n",
108+
"model = model.eval()"
109+
]
110+
},
111+
{
112+
"cell_type": "code",
113+
"execution_count": 19,
114+
"metadata": {},
115+
"outputs": [
116+
{
117+
"name": "stdout",
118+
"output_type": "stream",
119+
"text": [
120+
"# Problems: 164\n"
121+
]
122+
}
123+
],
124+
"source": [
125+
"problems = read_problems()\n",
126+
"print(f'# Problems: {len(problems)}')\n",
127+
"\n",
128+
"problem_tuples = [(k, v['prompt']) for k, v in problems.items()]\n",
129+
"task_ids, prompts = zip(*problem_tuples)\n",
130+
"\n",
131+
"# Create lists of the input task ids and corresponding GenerateData objects as inputs\n",
132+
"input_tasks = [\n",
133+
" task_id\n",
134+
" for task_id in task_ids\n",
135+
" for _ in range(n_samples_per_task)\n",
136+
"]\n",
137+
"inputs = [\n",
138+
" prompt\n",
139+
" for prompt in prompts\n",
140+
" for _ in range(n_samples_per_task)\n",
141+
"]\n",
142+
"\n",
143+
"if use_instruct_model:\n",
144+
" instruct_template = \\\n",
145+
" \"Below is an instruction that describes a task, paired with an input that provides further context.\\n\" + \\\n",
146+
" \"Write a response that appropriately completes the request.\\n\\n### Instruction:\\nWrite a program to \" + \\\n",
147+
" \"perform the given task.\\n\\nInput:\\n{}\\n\\n### Response:\\n\"\n",
148+
" inputs = [instruct_template.format(prompt) for prompt in prompts]\n",
149+
"\n",
150+
"inputs = tokenizer(inputs, padding=True, return_tensors='pt').to(device)"
151+
]
152+
},
153+
{
154+
"cell_type": "code",
155+
"execution_count": 20,
156+
"metadata": {},
157+
"outputs": [
158+
{
159+
"name": "stderr",
160+
"output_type": "stream",
161+
"text": [
162+
"Setting `pad_token_id` to `eos_token_id`:32021 for open-end generation.\n",
163+
"Setting `pad_token_id` to `eos_token_id`:32021 for open-end generation.\n",
164+
"Setting `pad_token_id` to `eos_token_id`:32021 for open-end generation.\n",
165+
"Setting `pad_token_id` to `eos_token_id`:32021 for open-end generation.\n",
166+
"Setting `pad_token_id` to `eos_token_id`:32021 for open-end generation.\n",
167+
"Setting `pad_token_id` to `eos_token_id`:32021 for open-end generation.\n"
168+
]
169+
}
170+
],
171+
"source": [
172+
"completions = []\n",
173+
"\n",
174+
"for i in range(0, len(inputs['input_ids']), batch_size):\n",
175+
" batch_inputs = {k: v[i:i+batch_size] for k, v in inputs.items()}\n",
176+
"\n",
177+
" with torch.no_grad():\n",
178+
" generated_ids = model.generate(**batch_inputs, max_new_tokens=max_gen_length)\n",
179+
" # generated_ids = model.generate(\n",
180+
" # **batch_inputs,\n",
181+
" # max_new_tokens = max_gen_length,\n",
182+
" # do_sample = False,\n",
183+
" # eos_token_id = tokenizer.eos_token_id,\n",
184+
" # pad_token_id = tokenizer.eos_token_id,\n",
185+
" # )\n",
186+
" \n",
187+
" completion_ids = generated_ids[:, batch_inputs['input_ids'].shape[1]:]\n",
188+
" batch_completions = tokenizer.batch_decode(completion_ids, skip_special_tokens=True)\n",
189+
" completions.extend(batch_completions)\n",
190+
"\n",
191+
"cleaned_completions = [cleanup_code(c, use_instruct_model) for c in completions]"
192+
]
193+
},
194+
{
195+
"cell_type": "code",
196+
"execution_count": 21,
197+
"metadata": {},
198+
"outputs": [
199+
{
200+
"name": "stdout",
201+
"output_type": "stream",
202+
"text": [
203+
"Reading samples...\n"
204+
]
205+
},
206+
{
207+
"name": "stderr",
208+
"output_type": "stream",
209+
"text": [
210+
"164it [00:00, 21024.72it/s]"
211+
]
212+
},
213+
{
214+
"name": "stdout",
215+
"output_type": "stream",
216+
"text": [
217+
"Running test suites...\n"
218+
]
219+
},
220+
{
221+
"name": "stderr",
222+
"output_type": "stream",
223+
"text": [
224+
"\n",
225+
"100%|██████████| 164/164 [00:25<00:00, 6.37it/s]\n"
226+
]
227+
},
228+
{
229+
"name": "stdout",
230+
"output_type": "stream",
231+
"text": [
232+
"Writing results to /tmp/human_eval_samples.jsonl_results.jsonl...\n"
233+
]
234+
},
235+
{
236+
"name": "stderr",
237+
"output_type": "stream",
238+
"text": [
239+
"100%|██████████| 164/164 [00:00<00:00, 59762.45it/s]"
240+
]
241+
},
242+
{
243+
"name": "stdout",
244+
"output_type": "stream",
245+
"text": [
246+
"{'pass@1': 0.6524390243902439}\n"
247+
]
248+
},
249+
{
250+
"name": "stderr",
251+
"output_type": "stream",
252+
"text": [
253+
"\n"
254+
]
255+
}
256+
],
257+
"source": [
258+
"samples = [\n",
259+
" dict(task_id=task_id, completion=completion)\n",
260+
" for task_id, completion in zip(input_tasks, cleaned_completions)\n",
261+
"]\n",
262+
"\n",
263+
"# Write the results to a file\n",
264+
"filepath = os.path.join(output_dir, 'human_eval_samples.jsonl')\n",
265+
"os.makedirs(output_dir, exist_ok=True)\n",
266+
"write_jsonl(filepath, samples)\n",
267+
"\n",
268+
"print(evaluate_functional_correctness(filepath, k=[1], n_workers=n_workers, timeout=20))"
269+
]
270+
},
271+
{
272+
"cell_type": "code",
273+
"execution_count": 22,
274+
"metadata": {},
275+
"outputs": [
276+
{
277+
"name": "stdout",
278+
"output_type": "stream",
279+
"text": [
280+
"Passed: 0.65\n"
281+
]
282+
}
283+
],
284+
"source": [
285+
"# Read the results\n",
286+
"results = list(stream_jsonl(filepath + '_results.jsonl'))\n",
287+
"passed = [r['passed'] for r in results]\n",
288+
"passed_frac = sum(passed) / len(passed)\n",
289+
"print(f'Passed: {passed_frac:.2f}')"
290+
]
291+
}
292+
],
293+
"metadata": {
294+
"kernelspec": {
295+
"display_name": "llm_env",
296+
"language": "python",
297+
"name": "python3"
298+
},
299+
"language_info": {
300+
"codemirror_mode": {
301+
"name": "ipython",
302+
"version": 3
303+
},
304+
"file_extension": ".py",
305+
"mimetype": "text/x-python",
306+
"name": "python",
307+
"nbconvert_exporter": "python",
308+
"pygments_lexer": "ipython3",
309+
"version": "3.9.19"
310+
}
311+
},
312+
"nbformat": 4,
313+
"nbformat_minor": 2
314+
}

0 commit comments

Comments
 (0)